diff options
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 139 |
1 files changed, 88 insertions, 51 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index c697c7043349..53cffc0b0801 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -29,11 +29,23 @@ DEFINE_MUTEX(event_mutex); | |||
29 | 29 | ||
30 | LIST_HEAD(ftrace_events); | 30 | LIST_HEAD(ftrace_events); |
31 | 31 | ||
32 | struct list_head * | ||
33 | trace_get_fields(struct ftrace_event_call *event_call) | ||
34 | { | ||
35 | if (!event_call->class->get_fields) | ||
36 | return &event_call->class->fields; | ||
37 | return event_call->class->get_fields(event_call); | ||
38 | } | ||
39 | |||
32 | int trace_define_field(struct ftrace_event_call *call, const char *type, | 40 | int trace_define_field(struct ftrace_event_call *call, const char *type, |
33 | const char *name, int offset, int size, int is_signed, | 41 | const char *name, int offset, int size, int is_signed, |
34 | int filter_type) | 42 | int filter_type) |
35 | { | 43 | { |
36 | struct ftrace_event_field *field; | 44 | struct ftrace_event_field *field; |
45 | struct list_head *head; | ||
46 | |||
47 | if (WARN_ON(!call->class)) | ||
48 | return 0; | ||
37 | 49 | ||
38 | field = kzalloc(sizeof(*field), GFP_KERNEL); | 50 | field = kzalloc(sizeof(*field), GFP_KERNEL); |
39 | if (!field) | 51 | if (!field) |
@@ -56,7 +68,8 @@ int trace_define_field(struct ftrace_event_call *call, const char *type, | |||
56 | field->size = size; | 68 | field->size = size; |
57 | field->is_signed = is_signed; | 69 | field->is_signed = is_signed; |
58 | 70 | ||
59 | list_add(&field->link, &call->fields); | 71 | head = trace_get_fields(call); |
72 | list_add(&field->link, head); | ||
60 | 73 | ||
61 | return 0; | 74 | return 0; |
62 | 75 | ||
@@ -94,8 +107,10 @@ static int trace_define_common_fields(struct ftrace_event_call *call) | |||
94 | void trace_destroy_fields(struct ftrace_event_call *call) | 107 | void trace_destroy_fields(struct ftrace_event_call *call) |
95 | { | 108 | { |
96 | struct ftrace_event_field *field, *next; | 109 | struct ftrace_event_field *field, *next; |
110 | struct list_head *head; | ||
97 | 111 | ||
98 | list_for_each_entry_safe(field, next, &call->fields, link) { | 112 | head = trace_get_fields(call); |
113 | list_for_each_entry_safe(field, next, head, link) { | ||
99 | list_del(&field->link); | 114 | list_del(&field->link); |
100 | kfree(field->type); | 115 | kfree(field->type); |
101 | kfree(field->name); | 116 | kfree(field->name); |
@@ -107,11 +122,9 @@ int trace_event_raw_init(struct ftrace_event_call *call) | |||
107 | { | 122 | { |
108 | int id; | 123 | int id; |
109 | 124 | ||
110 | id = register_ftrace_event(call->event); | 125 | id = register_ftrace_event(&call->event); |
111 | if (!id) | 126 | if (!id) |
112 | return -ENODEV; | 127 | return -ENODEV; |
113 | call->id = id; | ||
114 | INIT_LIST_HEAD(&call->fields); | ||
115 | 128 | ||
116 | return 0; | 129 | return 0; |
117 | } | 130 | } |
@@ -124,23 +137,33 @@ static int ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
124 | 137 | ||
125 | switch (enable) { | 138 | switch (enable) { |
126 | case 0: | 139 | case 0: |
127 | if (call->enabled) { | 140 | if (call->flags & TRACE_EVENT_FL_ENABLED) { |
128 | call->enabled = 0; | 141 | call->flags &= ~TRACE_EVENT_FL_ENABLED; |
129 | tracing_stop_cmdline_record(); | 142 | tracing_stop_cmdline_record(); |
130 | call->unregfunc(call); | 143 | if (call->class->reg) |
144 | call->class->reg(call, TRACE_REG_UNREGISTER); | ||
145 | else | ||
146 | tracepoint_probe_unregister(call->name, | ||
147 | call->class->probe, | ||
148 | call); | ||
131 | } | 149 | } |
132 | break; | 150 | break; |
133 | case 1: | 151 | case 1: |
134 | if (!call->enabled) { | 152 | if (!(call->flags & TRACE_EVENT_FL_ENABLED)) { |
135 | tracing_start_cmdline_record(); | 153 | tracing_start_cmdline_record(); |
136 | ret = call->regfunc(call); | 154 | if (call->class->reg) |
155 | ret = call->class->reg(call, TRACE_REG_REGISTER); | ||
156 | else | ||
157 | ret = tracepoint_probe_register(call->name, | ||
158 | call->class->probe, | ||
159 | call); | ||
137 | if (ret) { | 160 | if (ret) { |
138 | tracing_stop_cmdline_record(); | 161 | tracing_stop_cmdline_record(); |
139 | pr_info("event trace: Could not enable event " | 162 | pr_info("event trace: Could not enable event " |
140 | "%s\n", call->name); | 163 | "%s\n", call->name); |
141 | break; | 164 | break; |
142 | } | 165 | } |
143 | call->enabled = 1; | 166 | call->flags |= TRACE_EVENT_FL_ENABLED; |
144 | } | 167 | } |
145 | break; | 168 | break; |
146 | } | 169 | } |
@@ -171,15 +194,16 @@ static int __ftrace_set_clr_event(const char *match, const char *sub, | |||
171 | mutex_lock(&event_mutex); | 194 | mutex_lock(&event_mutex); |
172 | list_for_each_entry(call, &ftrace_events, list) { | 195 | list_for_each_entry(call, &ftrace_events, list) { |
173 | 196 | ||
174 | if (!call->name || !call->regfunc) | 197 | if (!call->name || !call->class || |
198 | (!call->class->probe && !call->class->reg)) | ||
175 | continue; | 199 | continue; |
176 | 200 | ||
177 | if (match && | 201 | if (match && |
178 | strcmp(match, call->name) != 0 && | 202 | strcmp(match, call->name) != 0 && |
179 | strcmp(match, call->system) != 0) | 203 | strcmp(match, call->class->system) != 0) |
180 | continue; | 204 | continue; |
181 | 205 | ||
182 | if (sub && strcmp(sub, call->system) != 0) | 206 | if (sub && strcmp(sub, call->class->system) != 0) |
183 | continue; | 207 | continue; |
184 | 208 | ||
185 | if (event && strcmp(event, call->name) != 0) | 209 | if (event && strcmp(event, call->name) != 0) |
@@ -297,7 +321,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos) | |||
297 | * The ftrace subsystem is for showing formats only. | 321 | * The ftrace subsystem is for showing formats only. |
298 | * They can not be enabled or disabled via the event files. | 322 | * They can not be enabled or disabled via the event files. |
299 | */ | 323 | */ |
300 | if (call->regfunc) | 324 | if (call->class && (call->class->probe || call->class->reg)) |
301 | return call; | 325 | return call; |
302 | } | 326 | } |
303 | 327 | ||
@@ -328,7 +352,7 @@ s_next(struct seq_file *m, void *v, loff_t *pos) | |||
328 | (*pos)++; | 352 | (*pos)++; |
329 | 353 | ||
330 | list_for_each_entry_continue(call, &ftrace_events, list) { | 354 | list_for_each_entry_continue(call, &ftrace_events, list) { |
331 | if (call->enabled) | 355 | if (call->flags & TRACE_EVENT_FL_ENABLED) |
332 | return call; | 356 | return call; |
333 | } | 357 | } |
334 | 358 | ||
@@ -355,8 +379,8 @@ static int t_show(struct seq_file *m, void *v) | |||
355 | { | 379 | { |
356 | struct ftrace_event_call *call = v; | 380 | struct ftrace_event_call *call = v; |
357 | 381 | ||
358 | if (strcmp(call->system, TRACE_SYSTEM) != 0) | 382 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
359 | seq_printf(m, "%s:", call->system); | 383 | seq_printf(m, "%s:", call->class->system); |
360 | seq_printf(m, "%s\n", call->name); | 384 | seq_printf(m, "%s\n", call->name); |
361 | 385 | ||
362 | return 0; | 386 | return 0; |
@@ -387,7 +411,7 @@ event_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
387 | struct ftrace_event_call *call = filp->private_data; | 411 | struct ftrace_event_call *call = filp->private_data; |
388 | char *buf; | 412 | char *buf; |
389 | 413 | ||
390 | if (call->enabled) | 414 | if (call->flags & TRACE_EVENT_FL_ENABLED) |
391 | buf = "1\n"; | 415 | buf = "1\n"; |
392 | else | 416 | else |
393 | buf = "0\n"; | 417 | buf = "0\n"; |
@@ -450,10 +474,11 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
450 | 474 | ||
451 | mutex_lock(&event_mutex); | 475 | mutex_lock(&event_mutex); |
452 | list_for_each_entry(call, &ftrace_events, list) { | 476 | list_for_each_entry(call, &ftrace_events, list) { |
453 | if (!call->name || !call->regfunc) | 477 | if (!call->name || !call->class || |
478 | (!call->class->probe && !call->class->reg)) | ||
454 | continue; | 479 | continue; |
455 | 480 | ||
456 | if (system && strcmp(call->system, system) != 0) | 481 | if (system && strcmp(call->class->system, system) != 0) |
457 | continue; | 482 | continue; |
458 | 483 | ||
459 | /* | 484 | /* |
@@ -461,7 +486,7 @@ system_enable_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
461 | * or if all events or cleared, or if we have | 486 | * or if all events or cleared, or if we have |
462 | * a mixture. | 487 | * a mixture. |
463 | */ | 488 | */ |
464 | set |= (1 << !!call->enabled); | 489 | set |= (1 << !!(call->flags & TRACE_EVENT_FL_ENABLED)); |
465 | 490 | ||
466 | /* | 491 | /* |
467 | * If we have a mixture, no need to look further. | 492 | * If we have a mixture, no need to look further. |
@@ -525,6 +550,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
525 | { | 550 | { |
526 | struct ftrace_event_call *call = filp->private_data; | 551 | struct ftrace_event_call *call = filp->private_data; |
527 | struct ftrace_event_field *field; | 552 | struct ftrace_event_field *field; |
553 | struct list_head *head; | ||
528 | struct trace_seq *s; | 554 | struct trace_seq *s; |
529 | int common_field_count = 5; | 555 | int common_field_count = 5; |
530 | char *buf; | 556 | char *buf; |
@@ -540,10 +566,11 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
540 | trace_seq_init(s); | 566 | trace_seq_init(s); |
541 | 567 | ||
542 | trace_seq_printf(s, "name: %s\n", call->name); | 568 | trace_seq_printf(s, "name: %s\n", call->name); |
543 | trace_seq_printf(s, "ID: %d\n", call->id); | 569 | trace_seq_printf(s, "ID: %d\n", call->event.type); |
544 | trace_seq_printf(s, "format:\n"); | 570 | trace_seq_printf(s, "format:\n"); |
545 | 571 | ||
546 | list_for_each_entry_reverse(field, &call->fields, link) { | 572 | head = trace_get_fields(call); |
573 | list_for_each_entry_reverse(field, head, link) { | ||
547 | /* | 574 | /* |
548 | * Smartly shows the array type(except dynamic array). | 575 | * Smartly shows the array type(except dynamic array). |
549 | * Normal: | 576 | * Normal: |
@@ -613,7 +640,7 @@ event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) | |||
613 | return -ENOMEM; | 640 | return -ENOMEM; |
614 | 641 | ||
615 | trace_seq_init(s); | 642 | trace_seq_init(s); |
616 | trace_seq_printf(s, "%d\n", call->id); | 643 | trace_seq_printf(s, "%d\n", call->event.type); |
617 | 644 | ||
618 | r = simple_read_from_buffer(ubuf, cnt, ppos, | 645 | r = simple_read_from_buffer(ubuf, cnt, ppos, |
619 | s->buffer, s->len); | 646 | s->buffer, s->len); |
@@ -919,14 +946,15 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
919 | const struct file_operations *filter, | 946 | const struct file_operations *filter, |
920 | const struct file_operations *format) | 947 | const struct file_operations *format) |
921 | { | 948 | { |
949 | struct list_head *head; | ||
922 | int ret; | 950 | int ret; |
923 | 951 | ||
924 | /* | 952 | /* |
925 | * If the trace point header did not define TRACE_SYSTEM | 953 | * If the trace point header did not define TRACE_SYSTEM |
926 | * then the system would be called "TRACE_SYSTEM". | 954 | * then the system would be called "TRACE_SYSTEM". |
927 | */ | 955 | */ |
928 | if (strcmp(call->system, TRACE_SYSTEM) != 0) | 956 | if (strcmp(call->class->system, TRACE_SYSTEM) != 0) |
929 | d_events = event_subsystem_dir(call->system, d_events); | 957 | d_events = event_subsystem_dir(call->class->system, d_events); |
930 | 958 | ||
931 | call->dir = debugfs_create_dir(call->name, d_events); | 959 | call->dir = debugfs_create_dir(call->name, d_events); |
932 | if (!call->dir) { | 960 | if (!call->dir) { |
@@ -935,22 +963,31 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
935 | return -1; | 963 | return -1; |
936 | } | 964 | } |
937 | 965 | ||
938 | if (call->regfunc) | 966 | if (call->class->probe || call->class->reg) |
939 | trace_create_file("enable", 0644, call->dir, call, | 967 | trace_create_file("enable", 0644, call->dir, call, |
940 | enable); | 968 | enable); |
941 | 969 | ||
942 | if (call->id && call->perf_event_enable) | 970 | #ifdef CONFIG_PERF_EVENTS |
971 | if (call->event.type && (call->class->perf_probe || call->class->reg)) | ||
943 | trace_create_file("id", 0444, call->dir, call, | 972 | trace_create_file("id", 0444, call->dir, call, |
944 | id); | 973 | id); |
974 | #endif | ||
945 | 975 | ||
946 | if (call->define_fields) { | 976 | if (call->class->define_fields) { |
947 | ret = trace_define_common_fields(call); | 977 | /* |
948 | if (!ret) | 978 | * Other events may have the same class. Only update |
949 | ret = call->define_fields(call); | 979 | * the fields if they are not already defined. |
950 | if (ret < 0) { | 980 | */ |
951 | pr_warning("Could not initialize trace point" | 981 | head = trace_get_fields(call); |
952 | " events/%s\n", call->name); | 982 | if (list_empty(head)) { |
953 | return ret; | 983 | ret = trace_define_common_fields(call); |
984 | if (!ret) | ||
985 | ret = call->class->define_fields(call); | ||
986 | if (ret < 0) { | ||
987 | pr_warning("Could not initialize trace point" | ||
988 | " events/%s\n", call->name); | ||
989 | return ret; | ||
990 | } | ||
954 | } | 991 | } |
955 | trace_create_file("filter", 0644, call->dir, call, | 992 | trace_create_file("filter", 0644, call->dir, call, |
956 | filter); | 993 | filter); |
@@ -970,8 +1007,8 @@ static int __trace_add_event_call(struct ftrace_event_call *call) | |||
970 | if (!call->name) | 1007 | if (!call->name) |
971 | return -EINVAL; | 1008 | return -EINVAL; |
972 | 1009 | ||
973 | if (call->raw_init) { | 1010 | if (call->class->raw_init) { |
974 | ret = call->raw_init(call); | 1011 | ret = call->class->raw_init(call); |
975 | if (ret < 0) { | 1012 | if (ret < 0) { |
976 | if (ret != -ENOSYS) | 1013 | if (ret != -ENOSYS) |
977 | pr_warning("Could not initialize trace " | 1014 | pr_warning("Could not initialize trace " |
@@ -1035,13 +1072,13 @@ static void remove_subsystem_dir(const char *name) | |||
1035 | static void __trace_remove_event_call(struct ftrace_event_call *call) | 1072 | static void __trace_remove_event_call(struct ftrace_event_call *call) |
1036 | { | 1073 | { |
1037 | ftrace_event_enable_disable(call, 0); | 1074 | ftrace_event_enable_disable(call, 0); |
1038 | if (call->event) | 1075 | if (call->event.funcs) |
1039 | __unregister_ftrace_event(call->event); | 1076 | __unregister_ftrace_event(&call->event); |
1040 | debugfs_remove_recursive(call->dir); | 1077 | debugfs_remove_recursive(call->dir); |
1041 | list_del(&call->list); | 1078 | list_del(&call->list); |
1042 | trace_destroy_fields(call); | 1079 | trace_destroy_fields(call); |
1043 | destroy_preds(call); | 1080 | destroy_preds(call); |
1044 | remove_subsystem_dir(call->system); | 1081 | remove_subsystem_dir(call->class->system); |
1045 | } | 1082 | } |
1046 | 1083 | ||
1047 | /* Remove an event_call */ | 1084 | /* Remove an event_call */ |
@@ -1132,8 +1169,8 @@ static void trace_module_add_events(struct module *mod) | |||
1132 | /* The linker may leave blanks */ | 1169 | /* The linker may leave blanks */ |
1133 | if (!call->name) | 1170 | if (!call->name) |
1134 | continue; | 1171 | continue; |
1135 | if (call->raw_init) { | 1172 | if (call->class->raw_init) { |
1136 | ret = call->raw_init(call); | 1173 | ret = call->class->raw_init(call); |
1137 | if (ret < 0) { | 1174 | if (ret < 0) { |
1138 | if (ret != -ENOSYS) | 1175 | if (ret != -ENOSYS) |
1139 | pr_warning("Could not initialize trace " | 1176 | pr_warning("Could not initialize trace " |
@@ -1286,8 +1323,8 @@ static __init int event_trace_init(void) | |||
1286 | /* The linker may leave blanks */ | 1323 | /* The linker may leave blanks */ |
1287 | if (!call->name) | 1324 | if (!call->name) |
1288 | continue; | 1325 | continue; |
1289 | if (call->raw_init) { | 1326 | if (call->class->raw_init) { |
1290 | ret = call->raw_init(call); | 1327 | ret = call->class->raw_init(call); |
1291 | if (ret < 0) { | 1328 | if (ret < 0) { |
1292 | if (ret != -ENOSYS) | 1329 | if (ret != -ENOSYS) |
1293 | pr_warning("Could not initialize trace " | 1330 | pr_warning("Could not initialize trace " |
@@ -1388,8 +1425,8 @@ static __init void event_trace_self_tests(void) | |||
1388 | 1425 | ||
1389 | list_for_each_entry(call, &ftrace_events, list) { | 1426 | list_for_each_entry(call, &ftrace_events, list) { |
1390 | 1427 | ||
1391 | /* Only test those that have a regfunc */ | 1428 | /* Only test those that have a probe */ |
1392 | if (!call->regfunc) | 1429 | if (!call->class || !call->class->probe) |
1393 | continue; | 1430 | continue; |
1394 | 1431 | ||
1395 | /* | 1432 | /* |
@@ -1399,8 +1436,8 @@ static __init void event_trace_self_tests(void) | |||
1399 | * syscalls as we test. | 1436 | * syscalls as we test. |
1400 | */ | 1437 | */ |
1401 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS | 1438 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS |
1402 | if (call->system && | 1439 | if (call->class->system && |
1403 | strcmp(call->system, "syscalls") == 0) | 1440 | strcmp(call->class->system, "syscalls") == 0) |
1404 | continue; | 1441 | continue; |
1405 | #endif | 1442 | #endif |
1406 | 1443 | ||
@@ -1410,7 +1447,7 @@ static __init void event_trace_self_tests(void) | |||
1410 | * If an event is already enabled, someone is using | 1447 | * If an event is already enabled, someone is using |
1411 | * it and the self test should not be on. | 1448 | * it and the self test should not be on. |
1412 | */ | 1449 | */ |
1413 | if (call->enabled) { | 1450 | if (call->flags & TRACE_EVENT_FL_ENABLED) { |
1414 | pr_warning("Enabled event during self test!\n"); | 1451 | pr_warning("Enabled event during self test!\n"); |
1415 | WARN_ON_ONCE(1); | 1452 | WARN_ON_ONCE(1); |
1416 | continue; | 1453 | continue; |