diff options
author | Masami Hiramatsu <mhiramat@kernel.org> | 2019-06-19 11:07:20 -0400 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2019-08-31 12:19:38 -0400 |
commit | 60d53e2c3b75e79c83970fe73db79123d9462c7c (patch) | |
tree | 5bc6054b21e2b2c39e43018b1c4bd1f9871f32b2 /kernel/trace | |
parent | 17e262e9954bc3b6c857670f60b5b015f53509a7 (diff) |
tracing/probe: Split trace_event related data from trace_probe
Split the trace_event related data from trace_probe data structure
and introduce trace_probe_event data structure for its folder.
This trace_probe_event data structure can have multiple trace_probe.
Link: http://lkml.kernel.org/r/156095683995.28024.7552150340561557873.stgit@devnote2
Signed-off-by: Masami Hiramatsu <mhiramat@kernel.org>
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace_kprobe.c | 157 | ||||
-rw-r--r-- | kernel/trace/trace_probe.c | 54 | ||||
-rw-r--r-- | kernel/trace/trace_probe.h | 48 | ||||
-rw-r--r-- | kernel/trace/trace_uprobe.c | 165 |
4 files changed, 311 insertions, 113 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 9d483ad9bb6c..eac6344a2e7c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -180,20 +180,33 @@ unsigned long trace_kprobe_address(struct trace_kprobe *tk) | |||
180 | return addr; | 180 | return addr; |
181 | } | 181 | } |
182 | 182 | ||
183 | static nokprobe_inline struct trace_kprobe * | ||
184 | trace_kprobe_primary_from_call(struct trace_event_call *call) | ||
185 | { | ||
186 | struct trace_probe *tp; | ||
187 | |||
188 | tp = trace_probe_primary_from_call(call); | ||
189 | if (WARN_ON_ONCE(!tp)) | ||
190 | return NULL; | ||
191 | |||
192 | return container_of(tp, struct trace_kprobe, tp); | ||
193 | } | ||
194 | |||
183 | bool trace_kprobe_on_func_entry(struct trace_event_call *call) | 195 | bool trace_kprobe_on_func_entry(struct trace_event_call *call) |
184 | { | 196 | { |
185 | struct trace_kprobe *tk = (struct trace_kprobe *)call->data; | 197 | struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); |
186 | 198 | ||
187 | return kprobe_on_func_entry(tk->rp.kp.addr, | 199 | return tk ? kprobe_on_func_entry(tk->rp.kp.addr, |
188 | tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, | 200 | tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name, |
189 | tk->rp.kp.addr ? 0 : tk->rp.kp.offset); | 201 | tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false; |
190 | } | 202 | } |
191 | 203 | ||
192 | bool trace_kprobe_error_injectable(struct trace_event_call *call) | 204 | bool trace_kprobe_error_injectable(struct trace_event_call *call) |
193 | { | 205 | { |
194 | struct trace_kprobe *tk = (struct trace_kprobe *)call->data; | 206 | struct trace_kprobe *tk = trace_kprobe_primary_from_call(call); |
195 | 207 | ||
196 | return within_error_injection_list(trace_kprobe_address(tk)); | 208 | return tk ? within_error_injection_list(trace_kprobe_address(tk)) : |
209 | false; | ||
197 | } | 210 | } |
198 | 211 | ||
199 | static int register_kprobe_event(struct trace_kprobe *tk); | 212 | static int register_kprobe_event(struct trace_kprobe *tk); |
@@ -291,32 +304,75 @@ static inline int __enable_trace_kprobe(struct trace_kprobe *tk) | |||
291 | return ret; | 304 | return ret; |
292 | } | 305 | } |
293 | 306 | ||
307 | static void __disable_trace_kprobe(struct trace_probe *tp) | ||
308 | { | ||
309 | struct trace_probe *pos; | ||
310 | struct trace_kprobe *tk; | ||
311 | |||
312 | list_for_each_entry(pos, trace_probe_probe_list(tp), list) { | ||
313 | tk = container_of(pos, struct trace_kprobe, tp); | ||
314 | if (!trace_kprobe_is_registered(tk)) | ||
315 | continue; | ||
316 | if (trace_kprobe_is_return(tk)) | ||
317 | disable_kretprobe(&tk->rp); | ||
318 | else | ||
319 | disable_kprobe(&tk->rp.kp); | ||
320 | } | ||
321 | } | ||
322 | |||
294 | /* | 323 | /* |
295 | * Enable trace_probe | 324 | * Enable trace_probe |
296 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. | 325 | * if the file is NULL, enable "perf" handler, or enable "trace" handler. |
297 | */ | 326 | */ |
298 | static int | 327 | static int enable_trace_kprobe(struct trace_event_call *call, |
299 | enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | 328 | struct trace_event_file *file) |
300 | { | 329 | { |
301 | bool enabled = trace_probe_is_enabled(&tk->tp); | 330 | struct trace_probe *pos, *tp; |
331 | struct trace_kprobe *tk; | ||
332 | bool enabled; | ||
302 | int ret = 0; | 333 | int ret = 0; |
303 | 334 | ||
335 | tp = trace_probe_primary_from_call(call); | ||
336 | if (WARN_ON_ONCE(!tp)) | ||
337 | return -ENODEV; | ||
338 | enabled = trace_probe_is_enabled(tp); | ||
339 | |||
340 | /* This also changes "enabled" state */ | ||
304 | if (file) { | 341 | if (file) { |
305 | ret = trace_probe_add_file(&tk->tp, file); | 342 | ret = trace_probe_add_file(tp, file); |
306 | if (ret) | 343 | if (ret) |
307 | return ret; | 344 | return ret; |
308 | } else | 345 | } else |
309 | trace_probe_set_flag(&tk->tp, TP_FLAG_PROFILE); | 346 | trace_probe_set_flag(tp, TP_FLAG_PROFILE); |
310 | 347 | ||
311 | if (enabled) | 348 | if (enabled) |
312 | return 0; | 349 | return 0; |
313 | 350 | ||
314 | ret = __enable_trace_kprobe(tk); | 351 | enabled = false; |
315 | if (ret) { | 352 | list_for_each_entry(pos, trace_probe_probe_list(tp), list) { |
353 | tk = container_of(pos, struct trace_kprobe, tp); | ||
354 | if (trace_kprobe_has_gone(tk)) | ||
355 | continue; | ||
356 | ret = __enable_trace_kprobe(tk); | ||
357 | if (ret) { | ||
358 | if (enabled) { | ||
359 | __disable_trace_kprobe(tp); | ||
360 | enabled = false; | ||
361 | } | ||
362 | break; | ||
363 | } | ||
364 | enabled = true; | ||
365 | } | ||
366 | |||
367 | if (!enabled) { | ||
368 | /* No probe is enabled. Roll back */ | ||
316 | if (file) | 369 | if (file) |
317 | trace_probe_remove_file(&tk->tp, file); | 370 | trace_probe_remove_file(tp, file); |
318 | else | 371 | else |
319 | trace_probe_clear_flag(&tk->tp, TP_FLAG_PROFILE); | 372 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
373 | if (!ret) | ||
374 | /* Since all probes are gone, this is not available */ | ||
375 | ret = -EADDRNOTAVAIL; | ||
320 | } | 376 | } |
321 | 377 | ||
322 | return ret; | 378 | return ret; |
@@ -326,11 +382,14 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | |||
326 | * Disable trace_probe | 382 | * Disable trace_probe |
327 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. | 383 | * if the file is NULL, disable "perf" handler, or disable "trace" handler. |
328 | */ | 384 | */ |
329 | static int | 385 | static int disable_trace_kprobe(struct trace_event_call *call, |
330 | disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | 386 | struct trace_event_file *file) |
331 | { | 387 | { |
332 | struct trace_probe *tp = &tk->tp; | 388 | struct trace_probe *tp; |
333 | int ret = 0; | 389 | |
390 | tp = trace_probe_primary_from_call(call); | ||
391 | if (WARN_ON_ONCE(!tp)) | ||
392 | return -ENODEV; | ||
334 | 393 | ||
335 | if (file) { | 394 | if (file) { |
336 | if (!trace_probe_get_file_link(tp, file)) | 395 | if (!trace_probe_get_file_link(tp, file)) |
@@ -341,12 +400,8 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | |||
341 | } else | 400 | } else |
342 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); | 401 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
343 | 402 | ||
344 | if (!trace_probe_is_enabled(tp) && trace_kprobe_is_registered(tk)) { | 403 | if (!trace_probe_is_enabled(tp)) |
345 | if (trace_kprobe_is_return(tk)) | 404 | __disable_trace_kprobe(tp); |
346 | disable_kretprobe(&tk->rp); | ||
347 | else | ||
348 | disable_kprobe(&tk->rp.kp); | ||
349 | } | ||
350 | 405 | ||
351 | out: | 406 | out: |
352 | if (file) | 407 | if (file) |
@@ -358,7 +413,7 @@ disable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file) | |||
358 | */ | 413 | */ |
359 | trace_probe_remove_file(tp, file); | 414 | trace_probe_remove_file(tp, file); |
360 | 415 | ||
361 | return ret; | 416 | return 0; |
362 | } | 417 | } |
363 | 418 | ||
364 | #if defined(CONFIG_KPROBES_ON_FTRACE) && \ | 419 | #if defined(CONFIG_KPROBES_ON_FTRACE) && \ |
@@ -1089,7 +1144,10 @@ print_kprobe_event(struct trace_iterator *iter, int flags, | |||
1089 | struct trace_probe *tp; | 1144 | struct trace_probe *tp; |
1090 | 1145 | ||
1091 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1146 | field = (struct kprobe_trace_entry_head *)iter->ent; |
1092 | tp = container_of(event, struct trace_probe, call.event); | 1147 | tp = trace_probe_primary_from_call( |
1148 | container_of(event, struct trace_event_call, event)); | ||
1149 | if (WARN_ON_ONCE(!tp)) | ||
1150 | goto out; | ||
1093 | 1151 | ||
1094 | trace_seq_printf(s, "%s: (", trace_probe_name(tp)); | 1152 | trace_seq_printf(s, "%s: (", trace_probe_name(tp)); |
1095 | 1153 | ||
@@ -1116,7 +1174,10 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, | |||
1116 | struct trace_probe *tp; | 1174 | struct trace_probe *tp; |
1117 | 1175 | ||
1118 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1176 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
1119 | tp = container_of(event, struct trace_probe, call.event); | 1177 | tp = trace_probe_primary_from_call( |
1178 | container_of(event, struct trace_event_call, event)); | ||
1179 | if (WARN_ON_ONCE(!tp)) | ||
1180 | goto out; | ||
1120 | 1181 | ||
1121 | trace_seq_printf(s, "%s: (", trace_probe_name(tp)); | 1182 | trace_seq_printf(s, "%s: (", trace_probe_name(tp)); |
1122 | 1183 | ||
@@ -1145,23 +1206,31 @@ static int kprobe_event_define_fields(struct trace_event_call *event_call) | |||
1145 | { | 1206 | { |
1146 | int ret; | 1207 | int ret; |
1147 | struct kprobe_trace_entry_head field; | 1208 | struct kprobe_trace_entry_head field; |
1148 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; | 1209 | struct trace_probe *tp; |
1210 | |||
1211 | tp = trace_probe_primary_from_call(event_call); | ||
1212 | if (WARN_ON_ONCE(!tp)) | ||
1213 | return -ENOENT; | ||
1149 | 1214 | ||
1150 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 1215 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); |
1151 | 1216 | ||
1152 | return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); | 1217 | return traceprobe_define_arg_fields(event_call, sizeof(field), tp); |
1153 | } | 1218 | } |
1154 | 1219 | ||
1155 | static int kretprobe_event_define_fields(struct trace_event_call *event_call) | 1220 | static int kretprobe_event_define_fields(struct trace_event_call *event_call) |
1156 | { | 1221 | { |
1157 | int ret; | 1222 | int ret; |
1158 | struct kretprobe_trace_entry_head field; | 1223 | struct kretprobe_trace_entry_head field; |
1159 | struct trace_kprobe *tk = (struct trace_kprobe *)event_call->data; | 1224 | struct trace_probe *tp; |
1225 | |||
1226 | tp = trace_probe_primary_from_call(event_call); | ||
1227 | if (WARN_ON_ONCE(!tp)) | ||
1228 | return -ENOENT; | ||
1160 | 1229 | ||
1161 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); | 1230 | DEFINE_FIELD(unsigned long, func, FIELD_STRING_FUNC, 0); |
1162 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); | 1231 | DEFINE_FIELD(unsigned long, ret_ip, FIELD_STRING_RETIP, 0); |
1163 | 1232 | ||
1164 | return traceprobe_define_arg_fields(event_call, sizeof(field), &tk->tp); | 1233 | return traceprobe_define_arg_fields(event_call, sizeof(field), tp); |
1165 | } | 1234 | } |
1166 | 1235 | ||
1167 | #ifdef CONFIG_PERF_EVENTS | 1236 | #ifdef CONFIG_PERF_EVENTS |
@@ -1289,20 +1358,19 @@ int bpf_get_kprobe_info(const struct perf_event *event, u32 *fd_type, | |||
1289 | static int kprobe_register(struct trace_event_call *event, | 1358 | static int kprobe_register(struct trace_event_call *event, |
1290 | enum trace_reg type, void *data) | 1359 | enum trace_reg type, void *data) |
1291 | { | 1360 | { |
1292 | struct trace_kprobe *tk = (struct trace_kprobe *)event->data; | ||
1293 | struct trace_event_file *file = data; | 1361 | struct trace_event_file *file = data; |
1294 | 1362 | ||
1295 | switch (type) { | 1363 | switch (type) { |
1296 | case TRACE_REG_REGISTER: | 1364 | case TRACE_REG_REGISTER: |
1297 | return enable_trace_kprobe(tk, file); | 1365 | return enable_trace_kprobe(event, file); |
1298 | case TRACE_REG_UNREGISTER: | 1366 | case TRACE_REG_UNREGISTER: |
1299 | return disable_trace_kprobe(tk, file); | 1367 | return disable_trace_kprobe(event, file); |
1300 | 1368 | ||
1301 | #ifdef CONFIG_PERF_EVENTS | 1369 | #ifdef CONFIG_PERF_EVENTS |
1302 | case TRACE_REG_PERF_REGISTER: | 1370 | case TRACE_REG_PERF_REGISTER: |
1303 | return enable_trace_kprobe(tk, NULL); | 1371 | return enable_trace_kprobe(event, NULL); |
1304 | case TRACE_REG_PERF_UNREGISTER: | 1372 | case TRACE_REG_PERF_UNREGISTER: |
1305 | return disable_trace_kprobe(tk, NULL); | 1373 | return disable_trace_kprobe(event, NULL); |
1306 | case TRACE_REG_PERF_OPEN: | 1374 | case TRACE_REG_PERF_OPEN: |
1307 | case TRACE_REG_PERF_CLOSE: | 1375 | case TRACE_REG_PERF_CLOSE: |
1308 | case TRACE_REG_PERF_ADD: | 1376 | case TRACE_REG_PERF_ADD: |
@@ -1369,7 +1437,6 @@ static inline void init_trace_event_call(struct trace_kprobe *tk) | |||
1369 | 1437 | ||
1370 | call->flags = TRACE_EVENT_FL_KPROBE; | 1438 | call->flags = TRACE_EVENT_FL_KPROBE; |
1371 | call->class->reg = kprobe_register; | 1439 | call->class->reg = kprobe_register; |
1372 | call->data = tk; | ||
1373 | } | 1440 | } |
1374 | 1441 | ||
1375 | static int register_kprobe_event(struct trace_kprobe *tk) | 1442 | static int register_kprobe_event(struct trace_kprobe *tk) |
@@ -1432,7 +1499,9 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call) | |||
1432 | { | 1499 | { |
1433 | struct trace_kprobe *tk; | 1500 | struct trace_kprobe *tk; |
1434 | 1501 | ||
1435 | tk = container_of(event_call, struct trace_kprobe, tp.call); | 1502 | tk = trace_kprobe_primary_from_call(event_call); |
1503 | if (unlikely(!tk)) | ||
1504 | return; | ||
1436 | 1505 | ||
1437 | if (trace_probe_is_enabled(&tk->tp)) { | 1506 | if (trace_probe_is_enabled(&tk->tp)) { |
1438 | WARN_ON(1); | 1507 | WARN_ON(1); |
@@ -1577,7 +1646,8 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1577 | pr_warn("error on getting probe file.\n"); | 1646 | pr_warn("error on getting probe file.\n"); |
1578 | warn++; | 1647 | warn++; |
1579 | } else | 1648 | } else |
1580 | enable_trace_kprobe(tk, file); | 1649 | enable_trace_kprobe( |
1650 | trace_probe_event_call(&tk->tp), file); | ||
1581 | } | 1651 | } |
1582 | } | 1652 | } |
1583 | 1653 | ||
@@ -1598,7 +1668,8 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1598 | pr_warn("error on getting probe file.\n"); | 1668 | pr_warn("error on getting probe file.\n"); |
1599 | warn++; | 1669 | warn++; |
1600 | } else | 1670 | } else |
1601 | enable_trace_kprobe(tk, file); | 1671 | enable_trace_kprobe( |
1672 | trace_probe_event_call(&tk->tp), file); | ||
1602 | } | 1673 | } |
1603 | } | 1674 | } |
1604 | 1675 | ||
@@ -1631,7 +1702,8 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1631 | pr_warn("error on getting probe file.\n"); | 1702 | pr_warn("error on getting probe file.\n"); |
1632 | warn++; | 1703 | warn++; |
1633 | } else | 1704 | } else |
1634 | disable_trace_kprobe(tk, file); | 1705 | disable_trace_kprobe( |
1706 | trace_probe_event_call(&tk->tp), file); | ||
1635 | } | 1707 | } |
1636 | 1708 | ||
1637 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); | 1709 | tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM); |
@@ -1649,7 +1721,8 @@ static __init int kprobe_trace_self_tests_init(void) | |||
1649 | pr_warn("error on getting probe file.\n"); | 1721 | pr_warn("error on getting probe file.\n"); |
1650 | warn++; | 1722 | warn++; |
1651 | } else | 1723 | } else |
1652 | disable_trace_kprobe(tk, file); | 1724 | disable_trace_kprobe( |
1725 | trace_probe_event_call(&tk->tp), file); | ||
1653 | } | 1726 | } |
1654 | 1727 | ||
1655 | ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe); | 1728 | ret = trace_run_command("-:testprobe", create_or_delete_trace_kprobe); |
diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index fb6bfbc5bf86..28733bd6b607 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c | |||
@@ -889,41 +889,59 @@ int traceprobe_define_arg_fields(struct trace_event_call *event_call, | |||
889 | 889 | ||
890 | void trace_probe_cleanup(struct trace_probe *tp) | 890 | void trace_probe_cleanup(struct trace_probe *tp) |
891 | { | 891 | { |
892 | struct trace_event_call *call = trace_probe_event_call(tp); | ||
893 | int i; | 892 | int i; |
894 | 893 | ||
895 | for (i = 0; i < tp->nr_args; i++) | 894 | for (i = 0; i < tp->nr_args; i++) |
896 | traceprobe_free_probe_arg(&tp->args[i]); | 895 | traceprobe_free_probe_arg(&tp->args[i]); |
897 | 896 | ||
898 | if (call->class) | 897 | if (tp->event) { |
899 | kfree(call->class->system); | 898 | struct trace_event_call *call = trace_probe_event_call(tp); |
900 | kfree(call->name); | 899 | |
901 | kfree(call->print_fmt); | 900 | kfree(tp->event->class.system); |
901 | kfree(call->name); | ||
902 | kfree(call->print_fmt); | ||
903 | kfree(tp->event); | ||
904 | tp->event = NULL; | ||
905 | } | ||
902 | } | 906 | } |
903 | 907 | ||
904 | int trace_probe_init(struct trace_probe *tp, const char *event, | 908 | int trace_probe_init(struct trace_probe *tp, const char *event, |
905 | const char *group) | 909 | const char *group) |
906 | { | 910 | { |
907 | struct trace_event_call *call = trace_probe_event_call(tp); | 911 | struct trace_event_call *call; |
912 | int ret = 0; | ||
908 | 913 | ||
909 | if (!event || !group) | 914 | if (!event || !group) |
910 | return -EINVAL; | 915 | return -EINVAL; |
911 | 916 | ||
912 | call->class = &tp->class; | 917 | tp->event = kzalloc(sizeof(struct trace_probe_event), GFP_KERNEL); |
913 | call->name = kstrdup(event, GFP_KERNEL); | 918 | if (!tp->event) |
914 | if (!call->name) | ||
915 | return -ENOMEM; | 919 | return -ENOMEM; |
916 | 920 | ||
917 | tp->class.system = kstrdup(group, GFP_KERNEL); | 921 | call = trace_probe_event_call(tp); |
918 | if (!tp->class.system) { | 922 | call->class = &tp->event->class; |
919 | kfree(call->name); | 923 | call->name = kstrdup(event, GFP_KERNEL); |
920 | call->name = NULL; | 924 | if (!call->name) { |
921 | return -ENOMEM; | 925 | ret = -ENOMEM; |
926 | goto error; | ||
927 | } | ||
928 | |||
929 | tp->event->class.system = kstrdup(group, GFP_KERNEL); | ||
930 | if (!tp->event->class.system) { | ||
931 | ret = -ENOMEM; | ||
932 | goto error; | ||
922 | } | 933 | } |
923 | INIT_LIST_HEAD(&tp->files); | 934 | INIT_LIST_HEAD(&tp->event->files); |
924 | INIT_LIST_HEAD(&tp->class.fields); | 935 | INIT_LIST_HEAD(&tp->event->class.fields); |
936 | INIT_LIST_HEAD(&tp->event->probes); | ||
937 | INIT_LIST_HEAD(&tp->list); | ||
938 | list_add(&tp->event->probes, &tp->list); | ||
925 | 939 | ||
926 | return 0; | 940 | return 0; |
941 | |||
942 | error: | ||
943 | trace_probe_cleanup(tp); | ||
944 | return ret; | ||
927 | } | 945 | } |
928 | 946 | ||
929 | int trace_probe_register_event_call(struct trace_probe *tp) | 947 | int trace_probe_register_event_call(struct trace_probe *tp) |
@@ -952,7 +970,7 @@ int trace_probe_add_file(struct trace_probe *tp, struct trace_event_file *file) | |||
952 | 970 | ||
953 | link->file = file; | 971 | link->file = file; |
954 | INIT_LIST_HEAD(&link->list); | 972 | INIT_LIST_HEAD(&link->list); |
955 | list_add_tail_rcu(&link->list, &tp->files); | 973 | list_add_tail_rcu(&link->list, &tp->event->files); |
956 | trace_probe_set_flag(tp, TP_FLAG_TRACE); | 974 | trace_probe_set_flag(tp, TP_FLAG_TRACE); |
957 | return 0; | 975 | return 0; |
958 | } | 976 | } |
@@ -983,7 +1001,7 @@ int trace_probe_remove_file(struct trace_probe *tp, | |||
983 | synchronize_rcu(); | 1001 | synchronize_rcu(); |
984 | kfree(link); | 1002 | kfree(link); |
985 | 1003 | ||
986 | if (list_empty(&tp->files)) | 1004 | if (list_empty(&tp->event->files)) |
987 | trace_probe_clear_flag(tp, TP_FLAG_TRACE); | 1005 | trace_probe_clear_flag(tp, TP_FLAG_TRACE); |
988 | 1006 | ||
989 | return 0; | 1007 | return 0; |
diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index d1714820efe1..0b84abb884c2 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h | |||
@@ -222,11 +222,18 @@ struct probe_arg { | |||
222 | const struct fetch_type *type; /* Type of this argument */ | 222 | const struct fetch_type *type; /* Type of this argument */ |
223 | }; | 223 | }; |
224 | 224 | ||
225 | struct trace_probe { | 225 | /* Event call and class holder */ |
226 | struct trace_probe_event { | ||
226 | unsigned int flags; /* For TP_FLAG_* */ | 227 | unsigned int flags; /* For TP_FLAG_* */ |
227 | struct trace_event_class class; | 228 | struct trace_event_class class; |
228 | struct trace_event_call call; | 229 | struct trace_event_call call; |
229 | struct list_head files; | 230 | struct list_head files; |
231 | struct list_head probes; | ||
232 | }; | ||
233 | |||
234 | struct trace_probe { | ||
235 | struct list_head list; | ||
236 | struct trace_probe_event *event; | ||
230 | ssize_t size; /* trace entry size */ | 237 | ssize_t size; /* trace entry size */ |
231 | unsigned int nr_args; | 238 | unsigned int nr_args; |
232 | struct probe_arg args[]; | 239 | struct probe_arg args[]; |
@@ -240,19 +247,19 @@ struct event_file_link { | |||
240 | static inline bool trace_probe_test_flag(struct trace_probe *tp, | 247 | static inline bool trace_probe_test_flag(struct trace_probe *tp, |
241 | unsigned int flag) | 248 | unsigned int flag) |
242 | { | 249 | { |
243 | return !!(tp->flags & flag); | 250 | return !!(tp->event->flags & flag); |
244 | } | 251 | } |
245 | 252 | ||
246 | static inline void trace_probe_set_flag(struct trace_probe *tp, | 253 | static inline void trace_probe_set_flag(struct trace_probe *tp, |
247 | unsigned int flag) | 254 | unsigned int flag) |
248 | { | 255 | { |
249 | tp->flags |= flag; | 256 | tp->event->flags |= flag; |
250 | } | 257 | } |
251 | 258 | ||
252 | static inline void trace_probe_clear_flag(struct trace_probe *tp, | 259 | static inline void trace_probe_clear_flag(struct trace_probe *tp, |
253 | unsigned int flag) | 260 | unsigned int flag) |
254 | { | 261 | { |
255 | tp->flags &= ~flag; | 262 | tp->event->flags &= ~flag; |
256 | } | 263 | } |
257 | 264 | ||
258 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) | 265 | static inline bool trace_probe_is_enabled(struct trace_probe *tp) |
@@ -262,29 +269,48 @@ static inline bool trace_probe_is_enabled(struct trace_probe *tp) | |||
262 | 269 | ||
263 | static inline const char *trace_probe_name(struct trace_probe *tp) | 270 | static inline const char *trace_probe_name(struct trace_probe *tp) |
264 | { | 271 | { |
265 | return trace_event_name(&tp->call); | 272 | return trace_event_name(&tp->event->call); |
266 | } | 273 | } |
267 | 274 | ||
268 | static inline const char *trace_probe_group_name(struct trace_probe *tp) | 275 | static inline const char *trace_probe_group_name(struct trace_probe *tp) |
269 | { | 276 | { |
270 | return tp->call.class->system; | 277 | return tp->event->call.class->system; |
271 | } | 278 | } |
272 | 279 | ||
273 | static inline struct trace_event_call * | 280 | static inline struct trace_event_call * |
274 | trace_probe_event_call(struct trace_probe *tp) | 281 | trace_probe_event_call(struct trace_probe *tp) |
275 | { | 282 | { |
276 | return &tp->call; | 283 | return &tp->event->call; |
284 | } | ||
285 | |||
286 | static inline struct trace_probe_event * | ||
287 | trace_probe_event_from_call(struct trace_event_call *event_call) | ||
288 | { | ||
289 | return container_of(event_call, struct trace_probe_event, call); | ||
290 | } | ||
291 | |||
292 | static inline struct trace_probe * | ||
293 | trace_probe_primary_from_call(struct trace_event_call *call) | ||
294 | { | ||
295 | struct trace_probe_event *tpe = trace_probe_event_from_call(call); | ||
296 | |||
297 | return list_first_entry(&tpe->probes, struct trace_probe, list); | ||
298 | } | ||
299 | |||
300 | static inline struct list_head *trace_probe_probe_list(struct trace_probe *tp) | ||
301 | { | ||
302 | return &tp->event->probes; | ||
277 | } | 303 | } |
278 | 304 | ||
279 | static inline int trace_probe_unregister_event_call(struct trace_probe *tp) | 305 | static inline int trace_probe_unregister_event_call(struct trace_probe *tp) |
280 | { | 306 | { |
281 | /* tp->event is unregistered in trace_remove_event_call() */ | 307 | /* tp->event is unregistered in trace_remove_event_call() */ |
282 | return trace_remove_event_call(&tp->call); | 308 | return trace_remove_event_call(&tp->event->call); |
283 | } | 309 | } |
284 | 310 | ||
285 | static inline bool trace_probe_has_single_file(struct trace_probe *tp) | 311 | static inline bool trace_probe_has_single_file(struct trace_probe *tp) |
286 | { | 312 | { |
287 | return !!list_is_singular(&tp->files); | 313 | return !!list_is_singular(&tp->event->files); |
288 | } | 314 | } |
289 | 315 | ||
290 | int trace_probe_init(struct trace_probe *tp, const char *event, | 316 | int trace_probe_init(struct trace_probe *tp, const char *event, |
@@ -298,9 +324,9 @@ struct event_file_link *trace_probe_get_file_link(struct trace_probe *tp, | |||
298 | struct trace_event_file *file); | 324 | struct trace_event_file *file); |
299 | 325 | ||
300 | #define trace_probe_for_each_link(pos, tp) \ | 326 | #define trace_probe_for_each_link(pos, tp) \ |
301 | list_for_each_entry(pos, &(tp)->files, list) | 327 | list_for_each_entry(pos, &(tp)->event->files, list) |
302 | #define trace_probe_for_each_link_rcu(pos, tp) \ | 328 | #define trace_probe_for_each_link_rcu(pos, tp) \ |
303 | list_for_each_entry_rcu(pos, &(tp)->files, list) | 329 | list_for_each_entry_rcu(pos, &(tp)->event->files, list) |
304 | 330 | ||
305 | /* Check the name is good for event/group/fields */ | 331 | /* Check the name is good for event/group/fields */ |
306 | static inline bool is_good_name(const char *name) | 332 | static inline bool is_good_name(const char *name) |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 1ceedb9146b1..ac799abb7da9 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
@@ -293,6 +293,18 @@ static bool trace_uprobe_match(const char *system, const char *event, | |||
293 | (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0); | 293 | (!system || strcmp(trace_probe_group_name(&tu->tp), system) == 0); |
294 | } | 294 | } |
295 | 295 | ||
296 | static nokprobe_inline struct trace_uprobe * | ||
297 | trace_uprobe_primary_from_call(struct trace_event_call *call) | ||
298 | { | ||
299 | struct trace_probe *tp; | ||
300 | |||
301 | tp = trace_probe_primary_from_call(call); | ||
302 | if (WARN_ON_ONCE(!tp)) | ||
303 | return NULL; | ||
304 | |||
305 | return container_of(tp, struct trace_uprobe, tp); | ||
306 | } | ||
307 | |||
296 | /* | 308 | /* |
297 | * Allocate new trace_uprobe and initialize it (including uprobes). | 309 | * Allocate new trace_uprobe and initialize it (including uprobes). |
298 | */ | 310 | */ |
@@ -897,7 +909,10 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
897 | u8 *data; | 909 | u8 *data; |
898 | 910 | ||
899 | entry = (struct uprobe_trace_entry_head *)iter->ent; | 911 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
900 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 912 | tu = trace_uprobe_primary_from_call( |
913 | container_of(event, struct trace_event_call, event)); | ||
914 | if (unlikely(!tu)) | ||
915 | goto out; | ||
901 | 916 | ||
902 | if (is_ret_probe(tu)) { | 917 | if (is_ret_probe(tu)) { |
903 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", | 918 | trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
@@ -924,27 +939,71 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
924 | enum uprobe_filter_ctx ctx, | 939 | enum uprobe_filter_ctx ctx, |
925 | struct mm_struct *mm); | 940 | struct mm_struct *mm); |
926 | 941 | ||
927 | static int | 942 | static int trace_uprobe_enable(struct trace_uprobe *tu, filter_func_t filter) |
928 | probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, | ||
929 | filter_func_t filter) | ||
930 | { | 943 | { |
931 | bool enabled = trace_probe_is_enabled(&tu->tp); | ||
932 | int ret; | 944 | int ret; |
933 | 945 | ||
946 | tu->consumer.filter = filter; | ||
947 | tu->inode = d_real_inode(tu->path.dentry); | ||
948 | |||
949 | if (tu->ref_ctr_offset) | ||
950 | ret = uprobe_register_refctr(tu->inode, tu->offset, | ||
951 | tu->ref_ctr_offset, &tu->consumer); | ||
952 | else | ||
953 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | ||
954 | |||
955 | if (ret) | ||
956 | tu->inode = NULL; | ||
957 | |||
958 | return ret; | ||
959 | } | ||
960 | |||
961 | static void __probe_event_disable(struct trace_probe *tp) | ||
962 | { | ||
963 | struct trace_probe *pos; | ||
964 | struct trace_uprobe *tu; | ||
965 | |||
966 | list_for_each_entry(pos, trace_probe_probe_list(tp), list) { | ||
967 | tu = container_of(pos, struct trace_uprobe, tp); | ||
968 | if (!tu->inode) | ||
969 | continue; | ||
970 | |||
971 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | ||
972 | |||
973 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | ||
974 | tu->inode = NULL; | ||
975 | } | ||
976 | } | ||
977 | |||
978 | static int probe_event_enable(struct trace_event_call *call, | ||
979 | struct trace_event_file *file, filter_func_t filter) | ||
980 | { | ||
981 | struct trace_probe *pos, *tp; | ||
982 | struct trace_uprobe *tu; | ||
983 | bool enabled; | ||
984 | int ret; | ||
985 | |||
986 | tp = trace_probe_primary_from_call(call); | ||
987 | if (WARN_ON_ONCE(!tp)) | ||
988 | return -ENODEV; | ||
989 | enabled = trace_probe_is_enabled(tp); | ||
990 | |||
991 | /* This may also change "enabled" state */ | ||
934 | if (file) { | 992 | if (file) { |
935 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_PROFILE)) | 993 | if (trace_probe_test_flag(tp, TP_FLAG_PROFILE)) |
936 | return -EINTR; | 994 | return -EINTR; |
937 | 995 | ||
938 | ret = trace_probe_add_file(&tu->tp, file); | 996 | ret = trace_probe_add_file(tp, file); |
939 | if (ret < 0) | 997 | if (ret < 0) |
940 | return ret; | 998 | return ret; |
941 | } else { | 999 | } else { |
942 | if (trace_probe_test_flag(&tu->tp, TP_FLAG_TRACE)) | 1000 | if (trace_probe_test_flag(tp, TP_FLAG_TRACE)) |
943 | return -EINTR; | 1001 | return -EINTR; |
944 | 1002 | ||
945 | trace_probe_set_flag(&tu->tp, TP_FLAG_PROFILE); | 1003 | trace_probe_set_flag(tp, TP_FLAG_PROFILE); |
946 | } | 1004 | } |
947 | 1005 | ||
1006 | tu = container_of(tp, struct trace_uprobe, tp); | ||
948 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 1007 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
949 | 1008 | ||
950 | if (enabled) | 1009 | if (enabled) |
@@ -954,18 +1013,15 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, | |||
954 | if (ret) | 1013 | if (ret) |
955 | goto err_flags; | 1014 | goto err_flags; |
956 | 1015 | ||
957 | tu->consumer.filter = filter; | 1016 | list_for_each_entry(pos, trace_probe_probe_list(tp), list) { |
958 | tu->inode = d_real_inode(tu->path.dentry); | 1017 | tu = container_of(pos, struct trace_uprobe, tp); |
959 | if (tu->ref_ctr_offset) { | 1018 | ret = trace_uprobe_enable(tu, filter); |
960 | ret = uprobe_register_refctr(tu->inode, tu->offset, | 1019 | if (ret) { |
961 | tu->ref_ctr_offset, &tu->consumer); | 1020 | __probe_event_disable(tp); |
962 | } else { | 1021 | goto err_buffer; |
963 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 1022 | } |
964 | } | 1023 | } |
965 | 1024 | ||
966 | if (ret) | ||
967 | goto err_buffer; | ||
968 | |||
969 | return 0; | 1025 | return 0; |
970 | 1026 | ||
971 | err_buffer: | 1027 | err_buffer: |
@@ -973,33 +1029,35 @@ probe_event_enable(struct trace_uprobe *tu, struct trace_event_file *file, | |||
973 | 1029 | ||
974 | err_flags: | 1030 | err_flags: |
975 | if (file) | 1031 | if (file) |
976 | trace_probe_remove_file(&tu->tp, file); | 1032 | trace_probe_remove_file(tp, file); |
977 | else | 1033 | else |
978 | trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE); | 1034 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
979 | 1035 | ||
980 | return ret; | 1036 | return ret; |
981 | } | 1037 | } |
982 | 1038 | ||
983 | static void | 1039 | static void probe_event_disable(struct trace_event_call *call, |
984 | probe_event_disable(struct trace_uprobe *tu, struct trace_event_file *file) | 1040 | struct trace_event_file *file) |
985 | { | 1041 | { |
986 | if (!trace_probe_is_enabled(&tu->tp)) | 1042 | struct trace_probe *tp; |
1043 | |||
1044 | tp = trace_probe_primary_from_call(call); | ||
1045 | if (WARN_ON_ONCE(!tp)) | ||
1046 | return; | ||
1047 | |||
1048 | if (!trace_probe_is_enabled(tp)) | ||
987 | return; | 1049 | return; |
988 | 1050 | ||
989 | if (file) { | 1051 | if (file) { |
990 | if (trace_probe_remove_file(&tu->tp, file) < 0) | 1052 | if (trace_probe_remove_file(tp, file) < 0) |
991 | return; | 1053 | return; |
992 | 1054 | ||
993 | if (trace_probe_is_enabled(&tu->tp)) | 1055 | if (trace_probe_is_enabled(tp)) |
994 | return; | 1056 | return; |
995 | } else | 1057 | } else |
996 | trace_probe_clear_flag(&tu->tp, TP_FLAG_PROFILE); | 1058 | trace_probe_clear_flag(tp, TP_FLAG_PROFILE); |
997 | |||
998 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | ||
999 | |||
1000 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | ||
1001 | tu->inode = NULL; | ||
1002 | 1059 | ||
1060 | __probe_event_disable(tp); | ||
1003 | uprobe_buffer_disable(); | 1061 | uprobe_buffer_disable(); |
1004 | } | 1062 | } |
1005 | 1063 | ||
@@ -1007,7 +1065,11 @@ static int uprobe_event_define_fields(struct trace_event_call *event_call) | |||
1007 | { | 1065 | { |
1008 | int ret, size; | 1066 | int ret, size; |
1009 | struct uprobe_trace_entry_head field; | 1067 | struct uprobe_trace_entry_head field; |
1010 | struct trace_uprobe *tu = event_call->data; | 1068 | struct trace_uprobe *tu; |
1069 | |||
1070 | tu = trace_uprobe_primary_from_call(event_call); | ||
1071 | if (unlikely(!tu)) | ||
1072 | return -ENODEV; | ||
1011 | 1073 | ||
1012 | if (is_ret_probe(tu)) { | 1074 | if (is_ret_probe(tu)) { |
1013 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); | 1075 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); |
@@ -1100,6 +1162,27 @@ static int uprobe_perf_open(struct trace_uprobe *tu, struct perf_event *event) | |||
1100 | return err; | 1162 | return err; |
1101 | } | 1163 | } |
1102 | 1164 | ||
1165 | static int uprobe_perf_multi_call(struct trace_event_call *call, | ||
1166 | struct perf_event *event, | ||
1167 | int (*op)(struct trace_uprobe *tu, struct perf_event *event)) | ||
1168 | { | ||
1169 | struct trace_probe *pos, *tp; | ||
1170 | struct trace_uprobe *tu; | ||
1171 | int ret = 0; | ||
1172 | |||
1173 | tp = trace_probe_primary_from_call(call); | ||
1174 | if (WARN_ON_ONCE(!tp)) | ||
1175 | return -ENODEV; | ||
1176 | |||
1177 | list_for_each_entry(pos, trace_probe_probe_list(tp), list) { | ||
1178 | tu = container_of(pos, struct trace_uprobe, tp); | ||
1179 | ret = op(tu, event); | ||
1180 | if (ret) | ||
1181 | break; | ||
1182 | } | ||
1183 | |||
1184 | return ret; | ||
1185 | } | ||
1103 | static bool uprobe_perf_filter(struct uprobe_consumer *uc, | 1186 | static bool uprobe_perf_filter(struct uprobe_consumer *uc, |
1104 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) | 1187 | enum uprobe_filter_ctx ctx, struct mm_struct *mm) |
1105 | { | 1188 | { |
@@ -1213,30 +1296,29 @@ static int | |||
1213 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, | 1296 | trace_uprobe_register(struct trace_event_call *event, enum trace_reg type, |
1214 | void *data) | 1297 | void *data) |
1215 | { | 1298 | { |
1216 | struct trace_uprobe *tu = event->data; | ||
1217 | struct trace_event_file *file = data; | 1299 | struct trace_event_file *file = data; |
1218 | 1300 | ||
1219 | switch (type) { | 1301 | switch (type) { |
1220 | case TRACE_REG_REGISTER: | 1302 | case TRACE_REG_REGISTER: |
1221 | return probe_event_enable(tu, file, NULL); | 1303 | return probe_event_enable(event, file, NULL); |
1222 | 1304 | ||
1223 | case TRACE_REG_UNREGISTER: | 1305 | case TRACE_REG_UNREGISTER: |
1224 | probe_event_disable(tu, file); | 1306 | probe_event_disable(event, file); |
1225 | return 0; | 1307 | return 0; |
1226 | 1308 | ||
1227 | #ifdef CONFIG_PERF_EVENTS | 1309 | #ifdef CONFIG_PERF_EVENTS |
1228 | case TRACE_REG_PERF_REGISTER: | 1310 | case TRACE_REG_PERF_REGISTER: |
1229 | return probe_event_enable(tu, NULL, uprobe_perf_filter); | 1311 | return probe_event_enable(event, NULL, uprobe_perf_filter); |
1230 | 1312 | ||
1231 | case TRACE_REG_PERF_UNREGISTER: | 1313 | case TRACE_REG_PERF_UNREGISTER: |
1232 | probe_event_disable(tu, NULL); | 1314 | probe_event_disable(event, NULL); |
1233 | return 0; | 1315 | return 0; |
1234 | 1316 | ||
1235 | case TRACE_REG_PERF_OPEN: | 1317 | case TRACE_REG_PERF_OPEN: |
1236 | return uprobe_perf_open(tu, data); | 1318 | return uprobe_perf_multi_call(event, data, uprobe_perf_open); |
1237 | 1319 | ||
1238 | case TRACE_REG_PERF_CLOSE: | 1320 | case TRACE_REG_PERF_CLOSE: |
1239 | return uprobe_perf_close(tu, data); | 1321 | return uprobe_perf_multi_call(event, data, uprobe_perf_close); |
1240 | 1322 | ||
1241 | #endif | 1323 | #endif |
1242 | default: | 1324 | default: |
@@ -1330,7 +1412,6 @@ static inline void init_trace_event_call(struct trace_uprobe *tu) | |||
1330 | 1412 | ||
1331 | call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; | 1413 | call->flags = TRACE_EVENT_FL_UPROBE | TRACE_EVENT_FL_CAP_ANY; |
1332 | call->class->reg = trace_uprobe_register; | 1414 | call->class->reg = trace_uprobe_register; |
1333 | call->data = tu; | ||
1334 | } | 1415 | } |
1335 | 1416 | ||
1336 | static int register_uprobe_event(struct trace_uprobe *tu) | 1417 | static int register_uprobe_event(struct trace_uprobe *tu) |
@@ -1399,7 +1480,7 @@ void destroy_local_trace_uprobe(struct trace_event_call *event_call) | |||
1399 | { | 1480 | { |
1400 | struct trace_uprobe *tu; | 1481 | struct trace_uprobe *tu; |
1401 | 1482 | ||
1402 | tu = container_of(event_call, struct trace_uprobe, tp.call); | 1483 | tu = trace_uprobe_primary_from_call(event_call); |
1403 | 1484 | ||
1404 | free_trace_uprobe(tu); | 1485 | free_trace_uprobe(tu); |
1405 | } | 1486 | } |