diff options
-rw-r--r-- | kernel/trace/Kconfig | 12 | ||||
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/trace.h | 30 | ||||
-rw-r--r-- | kernel/trace/trace_event_types.h | 18 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 1202 |
5 files changed, 1263 insertions, 0 deletions
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 06be85a7ef8c..fb5fbf75f279 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
@@ -411,6 +411,18 @@ config BLK_DEV_IO_TRACE | |||
411 | 411 | ||
412 | If unsure, say N. | 412 | If unsure, say N. |
413 | 413 | ||
414 | config KPROBE_TRACER | ||
415 | depends on KPROBES | ||
416 | depends on X86 | ||
417 | bool "Trace kprobes" | ||
418 | select TRACING | ||
419 | select GENERIC_TRACER | ||
420 | help | ||
421 | This tracer probes everywhere where kprobes can probe it, and | ||
422 | records various registers and memories specified by user. | ||
423 | This also allows you to trace kprobe probe points as a dynamic | ||
424 | defined events. It provides per-probe event filtering interface. | ||
425 | |||
414 | config DYNAMIC_FTRACE | 426 | config DYNAMIC_FTRACE |
415 | bool "enable/disable ftrace tracepoints dynamically" | 427 | bool "enable/disable ftrace tracepoints dynamically" |
416 | depends on FUNCTION_TRACER | 428 | depends on FUNCTION_TRACER |
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 844164dca90a..7c00a1ec1496 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -54,5 +54,6 @@ obj-$(CONFIG_EVENT_TRACING) += trace_export.o | |||
54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o | 54 | obj-$(CONFIG_FTRACE_SYSCALLS) += trace_syscalls.o |
55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o | 55 | obj-$(CONFIG_EVENT_PROFILE) += trace_event_profile.o |
56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 56 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
57 | obj-$(CONFIG_KPROBE_TRACER) += trace_kprobe.o | ||
57 | 58 | ||
58 | libftrace-y := ftrace.o | 59 | libftrace-y := ftrace.o |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 654fd657bd03..667f832d16b7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -38,6 +38,8 @@ enum trace_type { | |||
38 | TRACE_KMEM_FREE, | 38 | TRACE_KMEM_FREE, |
39 | TRACE_POWER, | 39 | TRACE_POWER, |
40 | TRACE_BLK, | 40 | TRACE_BLK, |
41 | TRACE_KPROBE, | ||
42 | TRACE_KRETPROBE, | ||
41 | 43 | ||
42 | __TRACE_LAST_TYPE, | 44 | __TRACE_LAST_TYPE, |
43 | }; | 45 | }; |
@@ -205,6 +207,30 @@ struct syscall_trace_exit { | |||
205 | unsigned long ret; | 207 | unsigned long ret; |
206 | }; | 208 | }; |
207 | 209 | ||
210 | struct kprobe_trace_entry { | ||
211 | struct trace_entry ent; | ||
212 | unsigned long ip; | ||
213 | int nargs; | ||
214 | unsigned long args[]; | ||
215 | }; | ||
216 | |||
217 | #define SIZEOF_KPROBE_TRACE_ENTRY(n) \ | ||
218 | (offsetof(struct kprobe_trace_entry, args) + \ | ||
219 | (sizeof(unsigned long) * (n))) | ||
220 | |||
221 | struct kretprobe_trace_entry { | ||
222 | struct trace_entry ent; | ||
223 | unsigned long func; | ||
224 | unsigned long ret_ip; | ||
225 | int nargs; | ||
226 | unsigned long args[]; | ||
227 | }; | ||
228 | |||
229 | #define SIZEOF_KRETPROBE_TRACE_ENTRY(n) \ | ||
230 | (offsetof(struct kretprobe_trace_entry, args) + \ | ||
231 | (sizeof(unsigned long) * (n))) | ||
232 | |||
233 | |||
208 | 234 | ||
209 | /* | 235 | /* |
210 | * trace_flag_type is an enumeration that holds different | 236 | * trace_flag_type is an enumeration that holds different |
@@ -317,6 +343,10 @@ extern void __ftrace_bad_type(void); | |||
317 | TRACE_KMEM_ALLOC); \ | 343 | TRACE_KMEM_ALLOC); \ |
318 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ | 344 | IF_ASSIGN(var, ent, struct kmemtrace_free_entry, \ |
319 | TRACE_KMEM_FREE); \ | 345 | TRACE_KMEM_FREE); \ |
346 | IF_ASSIGN(var, ent, struct kprobe_trace_entry, \ | ||
347 | TRACE_KPROBE); \ | ||
348 | IF_ASSIGN(var, ent, struct kretprobe_trace_entry, \ | ||
349 | TRACE_KRETPROBE); \ | ||
320 | __ftrace_bad_type(); \ | 350 | __ftrace_bad_type(); \ |
321 | } while (0) | 351 | } while (0) |
322 | 352 | ||
diff --git a/kernel/trace/trace_event_types.h b/kernel/trace/trace_event_types.h index e74f0906ab1a..186b598a1f11 100644 --- a/kernel/trace/trace_event_types.h +++ b/kernel/trace/trace_event_types.h | |||
@@ -175,4 +175,22 @@ TRACE_EVENT_FORMAT(kmem_free, TRACE_KMEM_FREE, kmemtrace_free_entry, ignore, | |||
175 | TP_RAW_FMT("type:%u call_site:%lx ptr:%p") | 175 | TP_RAW_FMT("type:%u call_site:%lx ptr:%p") |
176 | ); | 176 | ); |
177 | 177 | ||
178 | TRACE_EVENT_FORMAT(kprobe, TRACE_KPROBE, kprobe_trace_entry, ignore, | ||
179 | TRACE_STRUCT( | ||
180 | TRACE_FIELD(unsigned long, ip, ip) | ||
181 | TRACE_FIELD(int, nargs, nargs) | ||
182 | TRACE_FIELD_ZERO(unsigned long, args) | ||
183 | ), | ||
184 | TP_RAW_FMT("%08lx: args:0x%lx ...") | ||
185 | ); | ||
186 | |||
187 | TRACE_EVENT_FORMAT(kretprobe, TRACE_KRETPROBE, kretprobe_trace_entry, ignore, | ||
188 | TRACE_STRUCT( | ||
189 | TRACE_FIELD(unsigned long, func, func) | ||
190 | TRACE_FIELD(unsigned long, ret_ip, ret_ip) | ||
191 | TRACE_FIELD(int, nargs, nargs) | ||
192 | TRACE_FIELD_ZERO(unsigned long, args) | ||
193 | ), | ||
194 | TP_RAW_FMT("%08lx <- %08lx: args:0x%lx ...") | ||
195 | ); | ||
178 | #undef TRACE_SYSTEM | 196 | #undef TRACE_SYSTEM |
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c new file mode 100644 index 000000000000..0c4f00aafb92 --- /dev/null +++ b/kernel/trace/trace_kprobe.c | |||
@@ -0,0 +1,1202 @@ | |||
1 | /* | ||
2 | * kprobe based kernel tracer | ||
3 | * | ||
4 | * Created by Masami Hiramatsu <mhiramat@redhat.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | * | ||
10 | * This program is distributed in the hope that it will be useful, | ||
11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | * GNU General Public License for more details. | ||
14 | * | ||
15 | * You should have received a copy of the GNU General Public License | ||
16 | * along with this program; if not, write to the Free Software | ||
17 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
18 | */ | ||
19 | |||
20 | #include <linux/module.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | #include <linux/kprobes.h> | ||
23 | #include <linux/seq_file.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/debugfs.h> | ||
27 | #include <linux/types.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/ctype.h> | ||
30 | #include <linux/ptrace.h> | ||
31 | |||
32 | #include "trace.h" | ||
33 | #include "trace_output.h" | ||
34 | |||
35 | #define TRACE_KPROBE_ARGS 6 | ||
36 | #define MAX_ARGSTR_LEN 63 | ||
37 | |||
38 | /* currently, trace_kprobe only supports X86. */ | ||
39 | |||
40 | struct fetch_func { | ||
41 | unsigned long (*func)(struct pt_regs *, void *); | ||
42 | void *data; | ||
43 | }; | ||
44 | |||
45 | static __kprobes unsigned long call_fetch(struct fetch_func *f, | ||
46 | struct pt_regs *regs) | ||
47 | { | ||
48 | return f->func(regs, f->data); | ||
49 | } | ||
50 | |||
51 | /* fetch handlers */ | ||
52 | static __kprobes unsigned long fetch_register(struct pt_regs *regs, | ||
53 | void *offset) | ||
54 | { | ||
55 | return regs_get_register(regs, (unsigned int)((unsigned long)offset)); | ||
56 | } | ||
57 | |||
58 | static __kprobes unsigned long fetch_stack(struct pt_regs *regs, | ||
59 | void *num) | ||
60 | { | ||
61 | return regs_get_kernel_stack_nth(regs, | ||
62 | (unsigned int)((unsigned long)num)); | ||
63 | } | ||
64 | |||
65 | static __kprobes unsigned long fetch_memory(struct pt_regs *regs, void *addr) | ||
66 | { | ||
67 | unsigned long retval; | ||
68 | |||
69 | if (probe_kernel_address(addr, retval)) | ||
70 | return 0; | ||
71 | return retval; | ||
72 | } | ||
73 | |||
74 | static __kprobes unsigned long fetch_argument(struct pt_regs *regs, void *num) | ||
75 | { | ||
76 | return regs_get_argument_nth(regs, (unsigned int)((unsigned long)num)); | ||
77 | } | ||
78 | |||
79 | static __kprobes unsigned long fetch_retvalue(struct pt_regs *regs, | ||
80 | void *dummy) | ||
81 | { | ||
82 | return regs_return_value(regs); | ||
83 | } | ||
84 | |||
85 | static __kprobes unsigned long fetch_ip(struct pt_regs *regs, void *dummy) | ||
86 | { | ||
87 | return instruction_pointer(regs); | ||
88 | } | ||
89 | |||
90 | static __kprobes unsigned long fetch_stack_address(struct pt_regs *regs, | ||
91 | void *dummy) | ||
92 | { | ||
93 | return kernel_stack_pointer(regs); | ||
94 | } | ||
95 | |||
96 | /* Memory fetching by symbol */ | ||
97 | struct symbol_cache { | ||
98 | char *symbol; | ||
99 | long offset; | ||
100 | unsigned long addr; | ||
101 | }; | ||
102 | |||
103 | static unsigned long update_symbol_cache(struct symbol_cache *sc) | ||
104 | { | ||
105 | sc->addr = (unsigned long)kallsyms_lookup_name(sc->symbol); | ||
106 | if (sc->addr) | ||
107 | sc->addr += sc->offset; | ||
108 | return sc->addr; | ||
109 | } | ||
110 | |||
111 | static void free_symbol_cache(struct symbol_cache *sc) | ||
112 | { | ||
113 | kfree(sc->symbol); | ||
114 | kfree(sc); | ||
115 | } | ||
116 | |||
117 | static struct symbol_cache *alloc_symbol_cache(const char *sym, long offset) | ||
118 | { | ||
119 | struct symbol_cache *sc; | ||
120 | |||
121 | if (!sym || strlen(sym) == 0) | ||
122 | return NULL; | ||
123 | sc = kzalloc(sizeof(struct symbol_cache), GFP_KERNEL); | ||
124 | if (!sc) | ||
125 | return NULL; | ||
126 | |||
127 | sc->symbol = kstrdup(sym, GFP_KERNEL); | ||
128 | if (!sc->symbol) { | ||
129 | kfree(sc); | ||
130 | return NULL; | ||
131 | } | ||
132 | sc->offset = offset; | ||
133 | |||
134 | update_symbol_cache(sc); | ||
135 | return sc; | ||
136 | } | ||
137 | |||
138 | static __kprobes unsigned long fetch_symbol(struct pt_regs *regs, void *data) | ||
139 | { | ||
140 | struct symbol_cache *sc = data; | ||
141 | |||
142 | if (sc->addr) | ||
143 | return fetch_memory(regs, (void *)sc->addr); | ||
144 | else | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | /* Special indirect memory access interface */ | ||
149 | struct indirect_fetch_data { | ||
150 | struct fetch_func orig; | ||
151 | long offset; | ||
152 | }; | ||
153 | |||
154 | static __kprobes unsigned long fetch_indirect(struct pt_regs *regs, void *data) | ||
155 | { | ||
156 | struct indirect_fetch_data *ind = data; | ||
157 | unsigned long addr; | ||
158 | |||
159 | addr = call_fetch(&ind->orig, regs); | ||
160 | if (addr) { | ||
161 | addr += ind->offset; | ||
162 | return fetch_memory(regs, (void *)addr); | ||
163 | } else | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | static __kprobes void free_indirect_fetch_data(struct indirect_fetch_data *data) | ||
168 | { | ||
169 | if (data->orig.func == fetch_indirect) | ||
170 | free_indirect_fetch_data(data->orig.data); | ||
171 | else if (data->orig.func == fetch_symbol) | ||
172 | free_symbol_cache(data->orig.data); | ||
173 | kfree(data); | ||
174 | } | ||
175 | |||
176 | /** | ||
177 | * kprobe_trace_core | ||
178 | */ | ||
179 | |||
180 | struct trace_probe { | ||
181 | struct list_head list; | ||
182 | union { | ||
183 | struct kprobe kp; | ||
184 | struct kretprobe rp; | ||
185 | }; | ||
186 | const char *symbol; /* symbol name */ | ||
187 | unsigned int nr_args; | ||
188 | struct fetch_func args[TRACE_KPROBE_ARGS]; | ||
189 | struct ftrace_event_call call; | ||
190 | }; | ||
191 | |||
192 | static int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs); | ||
193 | static int kretprobe_trace_func(struct kretprobe_instance *ri, | ||
194 | struct pt_regs *regs); | ||
195 | |||
196 | static __kprobes int probe_is_return(struct trace_probe *tp) | ||
197 | { | ||
198 | return (tp->rp.handler == kretprobe_trace_func); | ||
199 | } | ||
200 | |||
201 | static __kprobes const char *probe_symbol(struct trace_probe *tp) | ||
202 | { | ||
203 | return tp->symbol ? tp->symbol : "unknown"; | ||
204 | } | ||
205 | |||
206 | static __kprobes long probe_offset(struct trace_probe *tp) | ||
207 | { | ||
208 | return (probe_is_return(tp)) ? tp->rp.kp.offset : tp->kp.offset; | ||
209 | } | ||
210 | |||
211 | static __kprobes void *probe_address(struct trace_probe *tp) | ||
212 | { | ||
213 | return (probe_is_return(tp)) ? tp->rp.kp.addr : tp->kp.addr; | ||
214 | } | ||
215 | |||
216 | static int trace_arg_string(char *buf, size_t n, struct fetch_func *ff) | ||
217 | { | ||
218 | int ret = -EINVAL; | ||
219 | |||
220 | if (ff->func == fetch_argument) | ||
221 | ret = snprintf(buf, n, "a%lu", (unsigned long)ff->data); | ||
222 | else if (ff->func == fetch_register) { | ||
223 | const char *name; | ||
224 | name = regs_query_register_name((unsigned int)((long)ff->data)); | ||
225 | ret = snprintf(buf, n, "%%%s", name); | ||
226 | } else if (ff->func == fetch_stack) | ||
227 | ret = snprintf(buf, n, "s%lu", (unsigned long)ff->data); | ||
228 | else if (ff->func == fetch_memory) | ||
229 | ret = snprintf(buf, n, "@0x%p", ff->data); | ||
230 | else if (ff->func == fetch_symbol) { | ||
231 | struct symbol_cache *sc = ff->data; | ||
232 | ret = snprintf(buf, n, "@%s%+ld", sc->symbol, sc->offset); | ||
233 | } else if (ff->func == fetch_retvalue) | ||
234 | ret = snprintf(buf, n, "rv"); | ||
235 | else if (ff->func == fetch_ip) | ||
236 | ret = snprintf(buf, n, "ra"); | ||
237 | else if (ff->func == fetch_stack_address) | ||
238 | ret = snprintf(buf, n, "sa"); | ||
239 | else if (ff->func == fetch_indirect) { | ||
240 | struct indirect_fetch_data *id = ff->data; | ||
241 | size_t l = 0; | ||
242 | ret = snprintf(buf, n, "%+ld(", id->offset); | ||
243 | if (ret >= n) | ||
244 | goto end; | ||
245 | l += ret; | ||
246 | ret = trace_arg_string(buf + l, n - l, &id->orig); | ||
247 | if (ret < 0) | ||
248 | goto end; | ||
249 | l += ret; | ||
250 | ret = snprintf(buf + l, n - l, ")"); | ||
251 | ret += l; | ||
252 | } | ||
253 | end: | ||
254 | if (ret >= n) | ||
255 | return -ENOSPC; | ||
256 | return ret; | ||
257 | } | ||
258 | |||
259 | static int register_probe_event(struct trace_probe *tp); | ||
260 | static void unregister_probe_event(struct trace_probe *tp); | ||
261 | |||
262 | static DEFINE_MUTEX(probe_lock); | ||
263 | static LIST_HEAD(probe_list); | ||
264 | |||
265 | static struct trace_probe *alloc_trace_probe(const char *symbol, | ||
266 | const char *event) | ||
267 | { | ||
268 | struct trace_probe *tp; | ||
269 | |||
270 | tp = kzalloc(sizeof(struct trace_probe), GFP_KERNEL); | ||
271 | if (!tp) | ||
272 | return ERR_PTR(-ENOMEM); | ||
273 | |||
274 | if (symbol) { | ||
275 | tp->symbol = kstrdup(symbol, GFP_KERNEL); | ||
276 | if (!tp->symbol) | ||
277 | goto error; | ||
278 | } | ||
279 | if (event) { | ||
280 | tp->call.name = kstrdup(event, GFP_KERNEL); | ||
281 | if (!tp->call.name) | ||
282 | goto error; | ||
283 | } | ||
284 | |||
285 | INIT_LIST_HEAD(&tp->list); | ||
286 | return tp; | ||
287 | error: | ||
288 | kfree(tp->symbol); | ||
289 | kfree(tp); | ||
290 | return ERR_PTR(-ENOMEM); | ||
291 | } | ||
292 | |||
293 | static void free_trace_probe(struct trace_probe *tp) | ||
294 | { | ||
295 | int i; | ||
296 | |||
297 | for (i = 0; i < tp->nr_args; i++) | ||
298 | if (tp->args[i].func == fetch_symbol) | ||
299 | free_symbol_cache(tp->args[i].data); | ||
300 | else if (tp->args[i].func == fetch_indirect) | ||
301 | free_indirect_fetch_data(tp->args[i].data); | ||
302 | |||
303 | kfree(tp->call.name); | ||
304 | kfree(tp->symbol); | ||
305 | kfree(tp); | ||
306 | } | ||
307 | |||
308 | static struct trace_probe *find_probe_event(const char *event) | ||
309 | { | ||
310 | struct trace_probe *tp; | ||
311 | |||
312 | list_for_each_entry(tp, &probe_list, list) | ||
313 | if (tp->call.name && !strcmp(tp->call.name, event)) | ||
314 | return tp; | ||
315 | return NULL; | ||
316 | } | ||
317 | |||
318 | static void __unregister_trace_probe(struct trace_probe *tp) | ||
319 | { | ||
320 | if (probe_is_return(tp)) | ||
321 | unregister_kretprobe(&tp->rp); | ||
322 | else | ||
323 | unregister_kprobe(&tp->kp); | ||
324 | } | ||
325 | |||
326 | /* Unregister a trace_probe and probe_event: call with locking probe_lock */ | ||
327 | static void unregister_trace_probe(struct trace_probe *tp) | ||
328 | { | ||
329 | if (tp->call.name) | ||
330 | unregister_probe_event(tp); | ||
331 | __unregister_trace_probe(tp); | ||
332 | list_del(&tp->list); | ||
333 | } | ||
334 | |||
335 | /* Register a trace_probe and probe_event */ | ||
336 | static int register_trace_probe(struct trace_probe *tp) | ||
337 | { | ||
338 | struct trace_probe *old_tp; | ||
339 | int ret; | ||
340 | |||
341 | mutex_lock(&probe_lock); | ||
342 | |||
343 | if (probe_is_return(tp)) | ||
344 | ret = register_kretprobe(&tp->rp); | ||
345 | else | ||
346 | ret = register_kprobe(&tp->kp); | ||
347 | |||
348 | if (ret) { | ||
349 | pr_warning("Could not insert probe(%d)\n", ret); | ||
350 | if (ret == -EILSEQ) { | ||
351 | pr_warning("Probing address(0x%p) is not an " | ||
352 | "instruction boundary.\n", | ||
353 | probe_address(tp)); | ||
354 | ret = -EINVAL; | ||
355 | } | ||
356 | goto end; | ||
357 | } | ||
358 | /* register as an event */ | ||
359 | if (tp->call.name) { | ||
360 | old_tp = find_probe_event(tp->call.name); | ||
361 | if (old_tp) { | ||
362 | /* delete old event */ | ||
363 | unregister_trace_probe(old_tp); | ||
364 | free_trace_probe(old_tp); | ||
365 | } | ||
366 | ret = register_probe_event(tp); | ||
367 | if (ret) { | ||
368 | pr_warning("Faild to register probe event(%d)\n", ret); | ||
369 | __unregister_trace_probe(tp); | ||
370 | } | ||
371 | } | ||
372 | list_add_tail(&tp->list, &probe_list); | ||
373 | end: | ||
374 | mutex_unlock(&probe_lock); | ||
375 | return ret; | ||
376 | } | ||
377 | |||
378 | /* Split symbol and offset. */ | ||
379 | static int split_symbol_offset(char *symbol, long *offset) | ||
380 | { | ||
381 | char *tmp; | ||
382 | int ret; | ||
383 | |||
384 | if (!offset) | ||
385 | return -EINVAL; | ||
386 | |||
387 | tmp = strchr(symbol, '+'); | ||
388 | if (!tmp) | ||
389 | tmp = strchr(symbol, '-'); | ||
390 | |||
391 | if (tmp) { | ||
392 | /* skip sign because strict_strtol doesn't accept '+' */ | ||
393 | ret = strict_strtol(tmp + 1, 0, offset); | ||
394 | if (ret) | ||
395 | return ret; | ||
396 | if (*tmp == '-') | ||
397 | *offset = -(*offset); | ||
398 | *tmp = '\0'; | ||
399 | } else | ||
400 | *offset = 0; | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | #define PARAM_MAX_ARGS 16 | ||
405 | #define PARAM_MAX_STACK (THREAD_SIZE / sizeof(unsigned long)) | ||
406 | |||
407 | static int parse_trace_arg(char *arg, struct fetch_func *ff, int is_return) | ||
408 | { | ||
409 | int ret = 0; | ||
410 | unsigned long param; | ||
411 | long offset; | ||
412 | char *tmp; | ||
413 | |||
414 | switch (arg[0]) { | ||
415 | case 'a': /* argument */ | ||
416 | ret = strict_strtoul(arg + 1, 10, ¶m); | ||
417 | if (ret || param > PARAM_MAX_ARGS) | ||
418 | ret = -EINVAL; | ||
419 | else { | ||
420 | ff->func = fetch_argument; | ||
421 | ff->data = (void *)param; | ||
422 | } | ||
423 | break; | ||
424 | case 'r': /* retval or retaddr */ | ||
425 | if (is_return && arg[1] == 'v') { | ||
426 | ff->func = fetch_retvalue; | ||
427 | ff->data = NULL; | ||
428 | } else if (is_return && arg[1] == 'a') { | ||
429 | ff->func = fetch_ip; | ||
430 | ff->data = NULL; | ||
431 | } else | ||
432 | ret = -EINVAL; | ||
433 | break; | ||
434 | case '%': /* named register */ | ||
435 | ret = regs_query_register_offset(arg + 1); | ||
436 | if (ret >= 0) { | ||
437 | ff->func = fetch_register; | ||
438 | ff->data = (void *)(unsigned long)ret; | ||
439 | ret = 0; | ||
440 | } | ||
441 | break; | ||
442 | case 's': /* stack */ | ||
443 | if (arg[1] == 'a') { | ||
444 | ff->func = fetch_stack_address; | ||
445 | ff->data = NULL; | ||
446 | } else { | ||
447 | ret = strict_strtoul(arg + 1, 10, ¶m); | ||
448 | if (ret || param > PARAM_MAX_STACK) | ||
449 | ret = -EINVAL; | ||
450 | else { | ||
451 | ff->func = fetch_stack; | ||
452 | ff->data = (void *)param; | ||
453 | } | ||
454 | } | ||
455 | break; | ||
456 | case '@': /* memory or symbol */ | ||
457 | if (isdigit(arg[1])) { | ||
458 | ret = strict_strtoul(arg + 1, 0, ¶m); | ||
459 | if (ret) | ||
460 | break; | ||
461 | ff->func = fetch_memory; | ||
462 | ff->data = (void *)param; | ||
463 | } else { | ||
464 | ret = split_symbol_offset(arg + 1, &offset); | ||
465 | if (ret) | ||
466 | break; | ||
467 | ff->data = alloc_symbol_cache(arg + 1, | ||
468 | offset); | ||
469 | if (ff->data) | ||
470 | ff->func = fetch_symbol; | ||
471 | else | ||
472 | ret = -EINVAL; | ||
473 | } | ||
474 | break; | ||
475 | case '+': /* indirect memory */ | ||
476 | case '-': | ||
477 | tmp = strchr(arg, '('); | ||
478 | if (!tmp) { | ||
479 | ret = -EINVAL; | ||
480 | break; | ||
481 | } | ||
482 | *tmp = '\0'; | ||
483 | ret = strict_strtol(arg + 1, 0, &offset); | ||
484 | if (ret) | ||
485 | break; | ||
486 | if (arg[0] == '-') | ||
487 | offset = -offset; | ||
488 | arg = tmp + 1; | ||
489 | tmp = strrchr(arg, ')'); | ||
490 | if (tmp) { | ||
491 | struct indirect_fetch_data *id; | ||
492 | *tmp = '\0'; | ||
493 | id = kzalloc(sizeof(struct indirect_fetch_data), | ||
494 | GFP_KERNEL); | ||
495 | if (!id) | ||
496 | return -ENOMEM; | ||
497 | id->offset = offset; | ||
498 | ret = parse_trace_arg(arg, &id->orig, is_return); | ||
499 | if (ret) | ||
500 | kfree(id); | ||
501 | else { | ||
502 | ff->func = fetch_indirect; | ||
503 | ff->data = (void *)id; | ||
504 | } | ||
505 | } else | ||
506 | ret = -EINVAL; | ||
507 | break; | ||
508 | default: | ||
509 | /* TODO: support custom handler */ | ||
510 | ret = -EINVAL; | ||
511 | } | ||
512 | return ret; | ||
513 | } | ||
514 | |||
515 | static int create_trace_probe(int argc, char **argv) | ||
516 | { | ||
517 | /* | ||
518 | * Argument syntax: | ||
519 | * - Add kprobe: p[:EVENT] SYMBOL[+OFFS|-OFFS]|ADDRESS [FETCHARGS] | ||
520 | * - Add kretprobe: r[:EVENT] SYMBOL[+0] [FETCHARGS] | ||
521 | * Fetch args: | ||
522 | * aN : fetch Nth of function argument. (N:0-) | ||
523 | * rv : fetch return value | ||
524 | * ra : fetch return address | ||
525 | * sa : fetch stack address | ||
526 | * sN : fetch Nth of stack (N:0-) | ||
527 | * @ADDR : fetch memory at ADDR (ADDR should be in kernel) | ||
528 | * @SYM[+|-offs] : fetch memory at SYM +|- offs (SYM is a data symbol) | ||
529 | * %REG : fetch register REG | ||
530 | * Indirect memory fetch: | ||
531 | * +|-offs(ARG) : fetch memory at ARG +|- offs address. | ||
532 | */ | ||
533 | struct trace_probe *tp; | ||
534 | struct kprobe *kp; | ||
535 | int i, ret = 0; | ||
536 | int is_return = 0; | ||
537 | char *symbol = NULL, *event = NULL; | ||
538 | long offset = 0; | ||
539 | void *addr = NULL; | ||
540 | |||
541 | if (argc < 2) | ||
542 | return -EINVAL; | ||
543 | |||
544 | if (argv[0][0] == 'p') | ||
545 | is_return = 0; | ||
546 | else if (argv[0][0] == 'r') | ||
547 | is_return = 1; | ||
548 | else | ||
549 | return -EINVAL; | ||
550 | |||
551 | if (argv[0][1] == ':') { | ||
552 | event = &argv[0][2]; | ||
553 | if (strlen(event) == 0) { | ||
554 | pr_info("Event name is not specifiled\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | } | ||
558 | |||
559 | if (isdigit(argv[1][0])) { | ||
560 | if (is_return) | ||
561 | return -EINVAL; | ||
562 | /* an address specified */ | ||
563 | ret = strict_strtoul(&argv[0][2], 0, (unsigned long *)&addr); | ||
564 | if (ret) | ||
565 | return ret; | ||
566 | } else { | ||
567 | /* a symbol specified */ | ||
568 | symbol = argv[1]; | ||
569 | /* TODO: support .init module functions */ | ||
570 | ret = split_symbol_offset(symbol, &offset); | ||
571 | if (ret) | ||
572 | return ret; | ||
573 | if (offset && is_return) | ||
574 | return -EINVAL; | ||
575 | } | ||
576 | |||
577 | /* setup a probe */ | ||
578 | tp = alloc_trace_probe(symbol, event); | ||
579 | if (IS_ERR(tp)) | ||
580 | return PTR_ERR(tp); | ||
581 | |||
582 | if (is_return) { | ||
583 | kp = &tp->rp.kp; | ||
584 | tp->rp.handler = kretprobe_trace_func; | ||
585 | } else { | ||
586 | kp = &tp->kp; | ||
587 | tp->kp.pre_handler = kprobe_trace_func; | ||
588 | } | ||
589 | |||
590 | if (tp->symbol) { | ||
591 | kp->symbol_name = tp->symbol; | ||
592 | kp->offset = offset; | ||
593 | } else | ||
594 | kp->addr = addr; | ||
595 | |||
596 | /* parse arguments */ | ||
597 | argc -= 2; argv += 2; ret = 0; | ||
598 | for (i = 0; i < argc && i < TRACE_KPROBE_ARGS; i++) { | ||
599 | if (strlen(argv[i]) > MAX_ARGSTR_LEN) { | ||
600 | pr_info("Argument%d(%s) is too long.\n", i, argv[i]); | ||
601 | ret = -ENOSPC; | ||
602 | goto error; | ||
603 | } | ||
604 | ret = parse_trace_arg(argv[i], &tp->args[i], is_return); | ||
605 | if (ret) | ||
606 | goto error; | ||
607 | } | ||
608 | tp->nr_args = i; | ||
609 | |||
610 | ret = register_trace_probe(tp); | ||
611 | if (ret) | ||
612 | goto error; | ||
613 | return 0; | ||
614 | |||
615 | error: | ||
616 | free_trace_probe(tp); | ||
617 | return ret; | ||
618 | } | ||
619 | |||
620 | static void cleanup_all_probes(void) | ||
621 | { | ||
622 | struct trace_probe *tp; | ||
623 | |||
624 | mutex_lock(&probe_lock); | ||
625 | /* TODO: Use batch unregistration */ | ||
626 | while (!list_empty(&probe_list)) { | ||
627 | tp = list_entry(probe_list.next, struct trace_probe, list); | ||
628 | unregister_trace_probe(tp); | ||
629 | free_trace_probe(tp); | ||
630 | } | ||
631 | mutex_unlock(&probe_lock); | ||
632 | } | ||
633 | |||
634 | |||
635 | /* Probes listing interfaces */ | ||
636 | static void *probes_seq_start(struct seq_file *m, loff_t *pos) | ||
637 | { | ||
638 | mutex_lock(&probe_lock); | ||
639 | return seq_list_start(&probe_list, *pos); | ||
640 | } | ||
641 | |||
642 | static void *probes_seq_next(struct seq_file *m, void *v, loff_t *pos) | ||
643 | { | ||
644 | return seq_list_next(v, &probe_list, pos); | ||
645 | } | ||
646 | |||
647 | static void probes_seq_stop(struct seq_file *m, void *v) | ||
648 | { | ||
649 | mutex_unlock(&probe_lock); | ||
650 | } | ||
651 | |||
652 | static int probes_seq_show(struct seq_file *m, void *v) | ||
653 | { | ||
654 | struct trace_probe *tp = v; | ||
655 | int i, ret; | ||
656 | char buf[MAX_ARGSTR_LEN + 1]; | ||
657 | |||
658 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); | ||
659 | if (tp->call.name) | ||
660 | seq_printf(m, ":%s", tp->call.name); | ||
661 | |||
662 | if (tp->symbol) | ||
663 | seq_printf(m, " %s%+ld", probe_symbol(tp), probe_offset(tp)); | ||
664 | else | ||
665 | seq_printf(m, " 0x%p", probe_address(tp)); | ||
666 | |||
667 | for (i = 0; i < tp->nr_args; i++) { | ||
668 | ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); | ||
669 | if (ret < 0) { | ||
670 | pr_warning("Argument%d decoding error(%d).\n", i, ret); | ||
671 | return ret; | ||
672 | } | ||
673 | seq_printf(m, " %s", buf); | ||
674 | } | ||
675 | seq_printf(m, "\n"); | ||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | static const struct seq_operations probes_seq_op = { | ||
680 | .start = probes_seq_start, | ||
681 | .next = probes_seq_next, | ||
682 | .stop = probes_seq_stop, | ||
683 | .show = probes_seq_show | ||
684 | }; | ||
685 | |||
686 | static int probes_open(struct inode *inode, struct file *file) | ||
687 | { | ||
688 | if ((file->f_mode & FMODE_WRITE) && | ||
689 | (file->f_flags & O_TRUNC)) | ||
690 | cleanup_all_probes(); | ||
691 | |||
692 | return seq_open(file, &probes_seq_op); | ||
693 | } | ||
694 | |||
695 | static int command_trace_probe(const char *buf) | ||
696 | { | ||
697 | char **argv; | ||
698 | int argc = 0, ret = 0; | ||
699 | |||
700 | argv = argv_split(GFP_KERNEL, buf, &argc); | ||
701 | if (!argv) | ||
702 | return -ENOMEM; | ||
703 | |||
704 | if (argc) | ||
705 | ret = create_trace_probe(argc, argv); | ||
706 | |||
707 | argv_free(argv); | ||
708 | return ret; | ||
709 | } | ||
710 | |||
711 | #define WRITE_BUFSIZE 128 | ||
712 | |||
713 | static ssize_t probes_write(struct file *file, const char __user *buffer, | ||
714 | size_t count, loff_t *ppos) | ||
715 | { | ||
716 | char *kbuf, *tmp; | ||
717 | int ret; | ||
718 | size_t done; | ||
719 | size_t size; | ||
720 | |||
721 | kbuf = kmalloc(WRITE_BUFSIZE, GFP_KERNEL); | ||
722 | if (!kbuf) | ||
723 | return -ENOMEM; | ||
724 | |||
725 | ret = done = 0; | ||
726 | while (done < count) { | ||
727 | size = count - done; | ||
728 | if (size >= WRITE_BUFSIZE) | ||
729 | size = WRITE_BUFSIZE - 1; | ||
730 | if (copy_from_user(kbuf, buffer + done, size)) { | ||
731 | ret = -EFAULT; | ||
732 | goto out; | ||
733 | } | ||
734 | kbuf[size] = '\0'; | ||
735 | tmp = strchr(kbuf, '\n'); | ||
736 | if (tmp) { | ||
737 | *tmp = '\0'; | ||
738 | size = tmp - kbuf + 1; | ||
739 | } else if (done + size < count) { | ||
740 | pr_warning("Line length is too long: " | ||
741 | "Should be less than %d.", WRITE_BUFSIZE); | ||
742 | ret = -EINVAL; | ||
743 | goto out; | ||
744 | } | ||
745 | done += size; | ||
746 | /* Remove comments */ | ||
747 | tmp = strchr(kbuf, '#'); | ||
748 | if (tmp) | ||
749 | *tmp = '\0'; | ||
750 | |||
751 | ret = command_trace_probe(kbuf); | ||
752 | if (ret) | ||
753 | goto out; | ||
754 | } | ||
755 | ret = done; | ||
756 | out: | ||
757 | kfree(kbuf); | ||
758 | return ret; | ||
759 | } | ||
760 | |||
761 | static const struct file_operations kprobe_events_ops = { | ||
762 | .owner = THIS_MODULE, | ||
763 | .open = probes_open, | ||
764 | .read = seq_read, | ||
765 | .llseek = seq_lseek, | ||
766 | .release = seq_release, | ||
767 | .write = probes_write, | ||
768 | }; | ||
769 | |||
770 | /* Kprobe handler */ | ||
771 | static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | ||
772 | { | ||
773 | struct trace_probe *tp = container_of(kp, struct trace_probe, kp); | ||
774 | struct kprobe_trace_entry *entry; | ||
775 | struct ring_buffer_event *event; | ||
776 | int size, i, pc; | ||
777 | unsigned long irq_flags; | ||
778 | struct ftrace_event_call *call = &event_kprobe; | ||
779 | |||
780 | if (&tp->call.name) | ||
781 | call = &tp->call; | ||
782 | |||
783 | local_save_flags(irq_flags); | ||
784 | pc = preempt_count(); | ||
785 | |||
786 | size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); | ||
787 | |||
788 | event = trace_current_buffer_lock_reserve(TRACE_KPROBE, size, | ||
789 | irq_flags, pc); | ||
790 | if (!event) | ||
791 | return 0; | ||
792 | |||
793 | entry = ring_buffer_event_data(event); | ||
794 | entry->nargs = tp->nr_args; | ||
795 | entry->ip = (unsigned long)kp->addr; | ||
796 | for (i = 0; i < tp->nr_args; i++) | ||
797 | entry->args[i] = call_fetch(&tp->args[i], regs); | ||
798 | |||
799 | if (!filter_current_check_discard(call, entry, event)) | ||
800 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | /* Kretprobe handler */ | ||
805 | static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | ||
806 | struct pt_regs *regs) | ||
807 | { | ||
808 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | ||
809 | struct kretprobe_trace_entry *entry; | ||
810 | struct ring_buffer_event *event; | ||
811 | int size, i, pc; | ||
812 | unsigned long irq_flags; | ||
813 | struct ftrace_event_call *call = &event_kretprobe; | ||
814 | |||
815 | if (&tp->call.name) | ||
816 | call = &tp->call; | ||
817 | |||
818 | local_save_flags(irq_flags); | ||
819 | pc = preempt_count(); | ||
820 | |||
821 | size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); | ||
822 | |||
823 | event = trace_current_buffer_lock_reserve(TRACE_KRETPROBE, size, | ||
824 | irq_flags, pc); | ||
825 | if (!event) | ||
826 | return 0; | ||
827 | |||
828 | entry = ring_buffer_event_data(event); | ||
829 | entry->nargs = tp->nr_args; | ||
830 | entry->func = (unsigned long)probe_address(tp); | ||
831 | entry->ret_ip = (unsigned long)ri->ret_addr; | ||
832 | for (i = 0; i < tp->nr_args; i++) | ||
833 | entry->args[i] = call_fetch(&tp->args[i], regs); | ||
834 | |||
835 | if (!filter_current_check_discard(call, entry, event)) | ||
836 | trace_nowake_buffer_unlock_commit(event, irq_flags, pc); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | |||
841 | /* Event entry printers */ | ||
842 | enum print_line_t | ||
843 | print_kprobe_event(struct trace_iterator *iter, int flags) | ||
844 | { | ||
845 | struct kprobe_trace_entry *field; | ||
846 | struct trace_seq *s = &iter->seq; | ||
847 | int i; | ||
848 | |||
849 | trace_assign_type(field, iter->ent); | ||
850 | |||
851 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | ||
852 | goto partial; | ||
853 | |||
854 | if (!trace_seq_puts(s, ":")) | ||
855 | goto partial; | ||
856 | |||
857 | for (i = 0; i < field->nargs; i++) | ||
858 | if (!trace_seq_printf(s, " 0x%lx", field->args[i])) | ||
859 | goto partial; | ||
860 | |||
861 | if (!trace_seq_puts(s, "\n")) | ||
862 | goto partial; | ||
863 | |||
864 | return TRACE_TYPE_HANDLED; | ||
865 | partial: | ||
866 | return TRACE_TYPE_PARTIAL_LINE; | ||
867 | } | ||
868 | |||
869 | enum print_line_t | ||
870 | print_kretprobe_event(struct trace_iterator *iter, int flags) | ||
871 | { | ||
872 | struct kretprobe_trace_entry *field; | ||
873 | struct trace_seq *s = &iter->seq; | ||
874 | int i; | ||
875 | |||
876 | trace_assign_type(field, iter->ent); | ||
877 | |||
878 | if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) | ||
879 | goto partial; | ||
880 | |||
881 | if (!trace_seq_puts(s, " <- ")) | ||
882 | goto partial; | ||
883 | |||
884 | if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) | ||
885 | goto partial; | ||
886 | |||
887 | if (!trace_seq_puts(s, ":")) | ||
888 | goto partial; | ||
889 | |||
890 | for (i = 0; i < field->nargs; i++) | ||
891 | if (!trace_seq_printf(s, " 0x%lx", field->args[i])) | ||
892 | goto partial; | ||
893 | |||
894 | if (!trace_seq_puts(s, "\n")) | ||
895 | goto partial; | ||
896 | |||
897 | return TRACE_TYPE_HANDLED; | ||
898 | partial: | ||
899 | return TRACE_TYPE_PARTIAL_LINE; | ||
900 | } | ||
901 | |||
902 | static struct trace_event kprobe_trace_event = { | ||
903 | .type = TRACE_KPROBE, | ||
904 | .trace = print_kprobe_event, | ||
905 | }; | ||
906 | |||
907 | static struct trace_event kretprobe_trace_event = { | ||
908 | .type = TRACE_KRETPROBE, | ||
909 | .trace = print_kretprobe_event, | ||
910 | }; | ||
911 | |||
912 | static int probe_event_enable(struct ftrace_event_call *call) | ||
913 | { | ||
914 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
915 | |||
916 | if (probe_is_return(tp)) | ||
917 | return enable_kretprobe(&tp->rp); | ||
918 | else | ||
919 | return enable_kprobe(&tp->kp); | ||
920 | } | ||
921 | |||
922 | static void probe_event_disable(struct ftrace_event_call *call) | ||
923 | { | ||
924 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
925 | |||
926 | if (probe_is_return(tp)) | ||
927 | disable_kretprobe(&tp->rp); | ||
928 | else | ||
929 | disable_kprobe(&tp->kp); | ||
930 | } | ||
931 | |||
932 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | ||
933 | { | ||
934 | INIT_LIST_HEAD(&event_call->fields); | ||
935 | init_preds(event_call); | ||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | #undef DEFINE_FIELD | ||
940 | #define DEFINE_FIELD(type, item, name, is_signed) \ | ||
941 | do { \ | ||
942 | ret = trace_define_field(event_call, #type, name, \ | ||
943 | offsetof(typeof(field), item), \ | ||
944 | sizeof(field.item), is_signed, \ | ||
945 | FILTER_OTHER); \ | ||
946 | if (ret) \ | ||
947 | return ret; \ | ||
948 | } while (0) | ||
949 | |||
950 | static int kprobe_event_define_fields(struct ftrace_event_call *event_call) | ||
951 | { | ||
952 | int ret, i; | ||
953 | struct kprobe_trace_entry field; | ||
954 | char buf[MAX_ARGSTR_LEN + 1]; | ||
955 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | ||
956 | |||
957 | ret = trace_define_common_fields(event_call); | ||
958 | if (!ret) | ||
959 | return ret; | ||
960 | |||
961 | DEFINE_FIELD(unsigned long, ip, "ip", 0); | ||
962 | DEFINE_FIELD(int, nargs, "nargs", 1); | ||
963 | for (i = 0; i < tp->nr_args; i++) { | ||
964 | /* Set argN as a field */ | ||
965 | sprintf(buf, "arg%d", i); | ||
966 | DEFINE_FIELD(unsigned long, args[i], buf, 0); | ||
967 | /* Set argument string as an alias field */ | ||
968 | ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); | ||
969 | if (ret < 0) | ||
970 | return ret; | ||
971 | DEFINE_FIELD(unsigned long, args[i], buf, 0); | ||
972 | } | ||
973 | return 0; | ||
974 | } | ||
975 | |||
976 | static int kretprobe_event_define_fields(struct ftrace_event_call *event_call) | ||
977 | { | ||
978 | int ret, i; | ||
979 | struct kretprobe_trace_entry field; | ||
980 | char buf[MAX_ARGSTR_LEN + 1]; | ||
981 | struct trace_probe *tp = (struct trace_probe *)event_call->data; | ||
982 | |||
983 | ret = trace_define_common_fields(event_call); | ||
984 | if (!ret) | ||
985 | return ret; | ||
986 | |||
987 | DEFINE_FIELD(unsigned long, func, "func", 0); | ||
988 | DEFINE_FIELD(unsigned long, ret_ip, "ret_ip", 0); | ||
989 | DEFINE_FIELD(int, nargs, "nargs", 1); | ||
990 | for (i = 0; i < tp->nr_args; i++) { | ||
991 | /* Set argN as a field */ | ||
992 | sprintf(buf, "arg%d", i); | ||
993 | DEFINE_FIELD(unsigned long, args[i], buf, 0); | ||
994 | /* Set argument string as an alias field */ | ||
995 | ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); | ||
996 | if (ret < 0) | ||
997 | return ret; | ||
998 | DEFINE_FIELD(unsigned long, args[i], buf, 0); | ||
999 | } | ||
1000 | return 0; | ||
1001 | } | ||
1002 | |||
1003 | static int __probe_event_show_format(struct trace_seq *s, | ||
1004 | struct trace_probe *tp, const char *fmt, | ||
1005 | const char *arg) | ||
1006 | { | ||
1007 | int i, ret; | ||
1008 | char buf[MAX_ARGSTR_LEN + 1]; | ||
1009 | |||
1010 | /* Show aliases */ | ||
1011 | for (i = 0; i < tp->nr_args; i++) { | ||
1012 | ret = trace_arg_string(buf, MAX_ARGSTR_LEN, &tp->args[i]); | ||
1013 | if (ret < 0) | ||
1014 | return ret; | ||
1015 | if (!trace_seq_printf(s, "\talias: %s;\toriginal: arg%d;\n", | ||
1016 | buf, i)) | ||
1017 | return 0; | ||
1018 | } | ||
1019 | /* Show format */ | ||
1020 | if (!trace_seq_printf(s, "\nprint fmt: \"%s", fmt)) | ||
1021 | return 0; | ||
1022 | |||
1023 | for (i = 0; i < tp->nr_args; i++) | ||
1024 | if (!trace_seq_puts(s, " 0x%lx")) | ||
1025 | return 0; | ||
1026 | |||
1027 | if (!trace_seq_printf(s, "\", %s", arg)) | ||
1028 | return 0; | ||
1029 | |||
1030 | for (i = 0; i < tp->nr_args; i++) | ||
1031 | if (!trace_seq_printf(s, ", arg%d", i)) | ||
1032 | return 0; | ||
1033 | |||
1034 | return trace_seq_puts(s, "\n"); | ||
1035 | } | ||
1036 | |||
1037 | #undef SHOW_FIELD | ||
1038 | #define SHOW_FIELD(type, item, name) \ | ||
1039 | do { \ | ||
1040 | ret = trace_seq_printf(s, "\tfield: " #type " %s;\t" \ | ||
1041 | "offset:%u;tsize:%u;\n", name, \ | ||
1042 | (unsigned int)offsetof(typeof(field), item),\ | ||
1043 | (unsigned int)sizeof(type)); \ | ||
1044 | if (!ret) \ | ||
1045 | return 0; \ | ||
1046 | } while (0) | ||
1047 | |||
1048 | static int kprobe_event_show_format(struct ftrace_event_call *call, | ||
1049 | struct trace_seq *s) | ||
1050 | { | ||
1051 | struct kprobe_trace_entry field __attribute__((unused)); | ||
1052 | int ret, i; | ||
1053 | char buf[8]; | ||
1054 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1055 | |||
1056 | SHOW_FIELD(unsigned long, ip, "ip"); | ||
1057 | SHOW_FIELD(int, nargs, "nargs"); | ||
1058 | |||
1059 | /* Show fields */ | ||
1060 | for (i = 0; i < tp->nr_args; i++) { | ||
1061 | sprintf(buf, "arg%d", i); | ||
1062 | SHOW_FIELD(unsigned long, args[i], buf); | ||
1063 | } | ||
1064 | trace_seq_puts(s, "\n"); | ||
1065 | |||
1066 | return __probe_event_show_format(s, tp, "%lx:", "ip"); | ||
1067 | } | ||
1068 | |||
1069 | static int kretprobe_event_show_format(struct ftrace_event_call *call, | ||
1070 | struct trace_seq *s) | ||
1071 | { | ||
1072 | struct kretprobe_trace_entry field __attribute__((unused)); | ||
1073 | int ret, i; | ||
1074 | char buf[8]; | ||
1075 | struct trace_probe *tp = (struct trace_probe *)call->data; | ||
1076 | |||
1077 | SHOW_FIELD(unsigned long, func, "func"); | ||
1078 | SHOW_FIELD(unsigned long, ret_ip, "ret_ip"); | ||
1079 | SHOW_FIELD(int, nargs, "nargs"); | ||
1080 | |||
1081 | /* Show fields */ | ||
1082 | for (i = 0; i < tp->nr_args; i++) { | ||
1083 | sprintf(buf, "arg%d", i); | ||
1084 | SHOW_FIELD(unsigned long, args[i], buf); | ||
1085 | } | ||
1086 | trace_seq_puts(s, "\n"); | ||
1087 | |||
1088 | return __probe_event_show_format(s, tp, "%lx <- %lx:", | ||
1089 | "func, ret_ip"); | ||
1090 | } | ||
1091 | |||
1092 | static int register_probe_event(struct trace_probe *tp) | ||
1093 | { | ||
1094 | struct ftrace_event_call *call = &tp->call; | ||
1095 | int ret; | ||
1096 | |||
1097 | /* Initialize ftrace_event_call */ | ||
1098 | call->system = "kprobes"; | ||
1099 | if (probe_is_return(tp)) { | ||
1100 | call->event = &kretprobe_trace_event; | ||
1101 | call->id = TRACE_KRETPROBE; | ||
1102 | call->raw_init = probe_event_raw_init; | ||
1103 | call->show_format = kretprobe_event_show_format; | ||
1104 | call->define_fields = kretprobe_event_define_fields; | ||
1105 | } else { | ||
1106 | call->event = &kprobe_trace_event; | ||
1107 | call->id = TRACE_KPROBE; | ||
1108 | call->raw_init = probe_event_raw_init; | ||
1109 | call->show_format = kprobe_event_show_format; | ||
1110 | call->define_fields = kprobe_event_define_fields; | ||
1111 | } | ||
1112 | call->enabled = 1; | ||
1113 | call->regfunc = probe_event_enable; | ||
1114 | call->unregfunc = probe_event_disable; | ||
1115 | call->data = tp; | ||
1116 | ret = trace_add_event_call(call); | ||
1117 | if (ret) | ||
1118 | pr_info("Failed to register kprobe event: %s\n", call->name); | ||
1119 | return ret; | ||
1120 | } | ||
1121 | |||
1122 | static void unregister_probe_event(struct trace_probe *tp) | ||
1123 | { | ||
1124 | /* | ||
1125 | * Prevent to unregister event itself because the event is shared | ||
1126 | * among other probes. | ||
1127 | */ | ||
1128 | tp->call.event = NULL; | ||
1129 | trace_remove_event_call(&tp->call); | ||
1130 | } | ||
1131 | |||
1132 | /* Make a debugfs interface for controling probe points */ | ||
1133 | static __init int init_kprobe_trace(void) | ||
1134 | { | ||
1135 | struct dentry *d_tracer; | ||
1136 | struct dentry *entry; | ||
1137 | int ret; | ||
1138 | |||
1139 | ret = register_ftrace_event(&kprobe_trace_event); | ||
1140 | if (!ret) { | ||
1141 | pr_warning("Could not register kprobe_trace_event type.\n"); | ||
1142 | return 0; | ||
1143 | } | ||
1144 | ret = register_ftrace_event(&kretprobe_trace_event); | ||
1145 | if (!ret) { | ||
1146 | pr_warning("Could not register kretprobe_trace_event type.\n"); | ||
1147 | return 0; | ||
1148 | } | ||
1149 | |||
1150 | d_tracer = tracing_init_dentry(); | ||
1151 | if (!d_tracer) | ||
1152 | return 0; | ||
1153 | |||
1154 | entry = debugfs_create_file("kprobe_events", 0644, d_tracer, | ||
1155 | NULL, &kprobe_events_ops); | ||
1156 | |||
1157 | if (!entry) | ||
1158 | pr_warning("Could not create debugfs " | ||
1159 | "'kprobe_events' entry\n"); | ||
1160 | return 0; | ||
1161 | } | ||
1162 | fs_initcall(init_kprobe_trace); | ||
1163 | |||
1164 | |||
1165 | #ifdef CONFIG_FTRACE_STARTUP_TEST | ||
1166 | |||
1167 | static int kprobe_trace_selftest_target(int a1, int a2, int a3, | ||
1168 | int a4, int a5, int a6) | ||
1169 | { | ||
1170 | return a1 + a2 + a3 + a4 + a5 + a6; | ||
1171 | } | ||
1172 | |||
1173 | static __init int kprobe_trace_self_tests_init(void) | ||
1174 | { | ||
1175 | int ret; | ||
1176 | int (*target)(int, int, int, int, int, int); | ||
1177 | |||
1178 | target = kprobe_trace_selftest_target; | ||
1179 | |||
1180 | pr_info("Testing kprobe tracing: "); | ||
1181 | |||
1182 | ret = command_trace_probe("p:testprobe kprobe_trace_selftest_target " | ||
1183 | "a1 a2 a3 a4 a5 a6"); | ||
1184 | if (WARN_ON_ONCE(ret)) | ||
1185 | pr_warning("error enabling function entry\n"); | ||
1186 | |||
1187 | ret = command_trace_probe("r:testprobe2 kprobe_trace_selftest_target " | ||
1188 | "ra rv"); | ||
1189 | if (WARN_ON_ONCE(ret)) | ||
1190 | pr_warning("error enabling function return\n"); | ||
1191 | |||
1192 | ret = target(1, 2, 3, 4, 5, 6); | ||
1193 | |||
1194 | cleanup_all_probes(); | ||
1195 | |||
1196 | pr_cont("OK\n"); | ||
1197 | return 0; | ||
1198 | } | ||
1199 | |||
1200 | late_initcall(kprobe_trace_self_tests_init); | ||
1201 | |||
1202 | #endif | ||