diff options
Diffstat (limited to 'kernel/trace/trace_events.c')
| -rw-r--r-- | kernel/trace/trace_events.c | 283 |
1 files changed, 171 insertions, 112 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e75276a49cf5..d128f65778e6 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
| @@ -17,16 +17,20 @@ | |||
| 17 | #include <linux/ctype.h> | 17 | #include <linux/ctype.h> |
| 18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 19 | 19 | ||
| 20 | #include <asm/setup.h> | ||
| 21 | |||
| 20 | #include "trace_output.h" | 22 | #include "trace_output.h" |
| 21 | 23 | ||
| 24 | #undef TRACE_SYSTEM | ||
| 22 | #define TRACE_SYSTEM "TRACE_SYSTEM" | 25 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
| 23 | 26 | ||
| 24 | DEFINE_MUTEX(event_mutex); | 27 | DEFINE_MUTEX(event_mutex); |
| 25 | 28 | ||
| 26 | LIST_HEAD(ftrace_events); | 29 | LIST_HEAD(ftrace_events); |
| 27 | 30 | ||
| 28 | int trace_define_field(struct ftrace_event_call *call, char *type, | 31 | int trace_define_field(struct ftrace_event_call *call, const char *type, |
| 29 | char *name, int offset, int size, int is_signed) | 32 | const char *name, int offset, int size, int is_signed, |
| 33 | int filter_type) | ||
| 30 | { | 34 | { |
| 31 | struct ftrace_event_field *field; | 35 | struct ftrace_event_field *field; |
| 32 | 36 | ||
| @@ -42,9 +46,15 @@ int trace_define_field(struct ftrace_event_call *call, char *type, | |||
| 42 | if (!field->type) | 46 | if (!field->type) |
| 43 | goto err; | 47 | goto err; |
| 44 | 48 | ||
| 49 | if (filter_type == FILTER_OTHER) | ||
| 50 | field->filter_type = filter_assign_type(type); | ||
| 51 | else | ||
| 52 | field->filter_type = filter_type; | ||
| 53 | |||
| 45 | field->offset = offset; | 54 | field->offset = offset; |
| 46 | field->size = size; | 55 | field->size = size; |
| 47 | field->is_signed = is_signed; | 56 | field->is_signed = is_signed; |
| 57 | |||
| 48 | list_add(&field->link, &call->fields); | 58 | list_add(&field->link, &call->fields); |
| 49 | 59 | ||
| 50 | return 0; | 60 | return 0; |
| @@ -60,6 +70,29 @@ err: | |||
| 60 | } | 70 | } |
| 61 | EXPORT_SYMBOL_GPL(trace_define_field); | 71 | EXPORT_SYMBOL_GPL(trace_define_field); |
| 62 | 72 | ||
| 73 | #define __common_field(type, item) \ | ||
| 74 | ret = trace_define_field(call, #type, "common_" #item, \ | ||
| 75 | offsetof(typeof(ent), item), \ | ||
| 76 | sizeof(ent.item), \ | ||
| 77 | is_signed_type(type), FILTER_OTHER); \ | ||
| 78 | if (ret) \ | ||
| 79 | return ret; | ||
| 80 | |||
| 81 | int trace_define_common_fields(struct ftrace_event_call *call) | ||
| 82 | { | ||
| 83 | int ret; | ||
| 84 | struct trace_entry ent; | ||
| 85 | |||
| 86 | __common_field(unsigned short, type); | ||
| 87 | __common_field(unsigned char, flags); | ||
| 88 | __common_field(unsigned char, preempt_count); | ||
| 89 | __common_field(int, pid); | ||
| 90 | __common_field(int, lock_depth); | ||
| 91 | |||
| 92 | return ret; | ||
| 93 | } | ||
| 94 | EXPORT_SYMBOL_GPL(trace_define_common_fields); | ||
| 95 | |||
| 63 | #ifdef CONFIG_MODULES | 96 | #ifdef CONFIG_MODULES |
| 64 | 97 | ||
| 65 | static void trace_destroy_fields(struct ftrace_event_call *call) | 98 | static void trace_destroy_fields(struct ftrace_event_call *call) |
| @@ -84,14 +117,14 @@ static void ftrace_event_enable_disable(struct ftrace_event_call *call, | |||
| 84 | if (call->enabled) { | 117 | if (call->enabled) { |
| 85 | call->enabled = 0; | 118 | call->enabled = 0; |
| 86 | tracing_stop_cmdline_record(); | 119 | tracing_stop_cmdline_record(); |
| 87 | call->unregfunc(); | 120 | call->unregfunc(call->data); |
| 88 | } | 121 | } |
| 89 | break; | 122 | break; |
| 90 | case 1: | 123 | case 1: |
| 91 | if (!call->enabled) { | 124 | if (!call->enabled) { |
| 92 | call->enabled = 1; | 125 | call->enabled = 1; |
| 93 | tracing_start_cmdline_record(); | 126 | tracing_start_cmdline_record(); |
| 94 | call->regfunc(); | 127 | call->regfunc(call->data); |
| 95 | } | 128 | } |
| 96 | break; | 129 | break; |
| 97 | } | 130 | } |
| @@ -198,73 +231,38 @@ static ssize_t | |||
| 198 | ftrace_event_write(struct file *file, const char __user *ubuf, | 231 | ftrace_event_write(struct file *file, const char __user *ubuf, |
| 199 | size_t cnt, loff_t *ppos) | 232 | size_t cnt, loff_t *ppos) |
| 200 | { | 233 | { |
| 201 | size_t read = 0; | 234 | struct trace_parser parser; |
| 202 | int i, set = 1; | 235 | ssize_t read, ret; |
| 203 | ssize_t ret; | ||
| 204 | char *buf; | ||
| 205 | char ch; | ||
| 206 | 236 | ||
| 207 | if (!cnt || cnt < 0) | 237 | if (!cnt) |
| 208 | return 0; | 238 | return 0; |
| 209 | 239 | ||
| 210 | ret = tracing_update_buffers(); | 240 | ret = tracing_update_buffers(); |
| 211 | if (ret < 0) | 241 | if (ret < 0) |
| 212 | return ret; | 242 | return ret; |
| 213 | 243 | ||
| 214 | ret = get_user(ch, ubuf++); | 244 | if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) |
| 215 | if (ret) | ||
| 216 | return ret; | ||
| 217 | read++; | ||
| 218 | cnt--; | ||
| 219 | |||
| 220 | /* skip white space */ | ||
| 221 | while (cnt && isspace(ch)) { | ||
| 222 | ret = get_user(ch, ubuf++); | ||
| 223 | if (ret) | ||
| 224 | return ret; | ||
| 225 | read++; | ||
| 226 | cnt--; | ||
| 227 | } | ||
| 228 | |||
| 229 | /* Only white space found? */ | ||
| 230 | if (isspace(ch)) { | ||
| 231 | file->f_pos += read; | ||
| 232 | ret = read; | ||
| 233 | return ret; | ||
| 234 | } | ||
| 235 | |||
| 236 | buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL); | ||
| 237 | if (!buf) | ||
| 238 | return -ENOMEM; | 245 | return -ENOMEM; |
| 239 | 246 | ||
| 240 | if (cnt > EVENT_BUF_SIZE) | 247 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
| 241 | cnt = EVENT_BUF_SIZE; | ||
| 242 | 248 | ||
| 243 | i = 0; | 249 | if (read >= 0 && trace_parser_loaded((&parser))) { |
| 244 | while (cnt && !isspace(ch)) { | 250 | int set = 1; |
| 245 | if (!i && ch == '!') | 251 | |
| 252 | if (*parser.buffer == '!') | ||
| 246 | set = 0; | 253 | set = 0; |
| 247 | else | ||
| 248 | buf[i++] = ch; | ||
| 249 | 254 | ||
| 250 | ret = get_user(ch, ubuf++); | 255 | parser.buffer[parser.idx] = 0; |
| 256 | |||
| 257 | ret = ftrace_set_clr_event(parser.buffer + !set, set); | ||
| 251 | if (ret) | 258 | if (ret) |
| 252 | goto out_free; | 259 | goto out_put; |
| 253 | read++; | ||
| 254 | cnt--; | ||
| 255 | } | 260 | } |
| 256 | buf[i] = 0; | ||
| 257 | |||
| 258 | file->f_pos += read; | ||
| 259 | |||
| 260 | ret = ftrace_set_clr_event(buf, set); | ||
| 261 | if (ret) | ||
| 262 | goto out_free; | ||
| 263 | 261 | ||
| 264 | ret = read; | 262 | ret = read; |
| 265 | 263 | ||
| 266 | out_free: | 264 | out_put: |
| 267 | kfree(buf); | 265 | trace_parser_put(&parser); |
| 268 | 266 | ||
| 269 | return ret; | 267 | return ret; |
| 270 | } | 268 | } |
| @@ -272,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
| 272 | static void * | 270 | static void * |
| 273 | t_next(struct seq_file *m, void *v, loff_t *pos) | 271 | t_next(struct seq_file *m, void *v, loff_t *pos) |
| 274 | { | 272 | { |
| 275 | struct list_head *list = m->private; | 273 | struct ftrace_event_call *call = v; |
| 276 | struct ftrace_event_call *call; | ||
| 277 | 274 | ||
| 278 | (*pos)++; | 275 | (*pos)++; |
| 279 | 276 | ||
| 280 | for (;;) { | 277 | list_for_each_entry_continue(call, &ftrace_events, list) { |
| 281 | if (list == &ftrace_events) | ||
| 282 | return NULL; | ||
| 283 | |||
| 284 | call = list_entry(list, struct ftrace_event_call, list); | ||
| 285 | |||
| 286 | /* | 278 | /* |
| 287 | * The ftrace subsystem is for showing formats only. | 279 | * The ftrace subsystem is for showing formats only. |
| 288 | * They can not be enabled or disabled via the event files. | 280 | * They can not be enabled or disabled via the event files. |
| 289 | */ | 281 | */ |
| 290 | if (call->regfunc) | 282 | if (call->regfunc) |
| 291 | break; | 283 | return call; |
| 292 | |||
| 293 | list = list->next; | ||
| 294 | } | 284 | } |
| 295 | 285 | ||
| 296 | m->private = list->next; | 286 | return NULL; |
| 297 | |||
| 298 | return call; | ||
| 299 | } | 287 | } |
| 300 | 288 | ||
| 301 | static void *t_start(struct seq_file *m, loff_t *pos) | 289 | static void *t_start(struct seq_file *m, loff_t *pos) |
| 302 | { | 290 | { |
| 303 | struct ftrace_event_call *call = NULL; | 291 | struct ftrace_event_call *call; |
| 304 | loff_t l; | 292 | loff_t l; |
| 305 | 293 | ||
| 306 | mutex_lock(&event_mutex); | 294 | mutex_lock(&event_mutex); |
| 307 | 295 | ||
| 308 | m->private = ftrace_events.next; | 296 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
| 309 | for (l = 0; l <= *pos; ) { | 297 | for (l = 0; l <= *pos; ) { |
| 310 | call = t_next(m, NULL, &l); | 298 | call = t_next(m, call, &l); |
| 311 | if (!call) | 299 | if (!call) |
| 312 | break; | 300 | break; |
| 313 | } | 301 | } |
| @@ -317,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
| 317 | static void * | 305 | static void * |
| 318 | s_next(struct seq_file *m, void *v, loff_t *pos) | 306 | s_next(struct seq_file *m, void *v, loff_t *pos) |
| 319 | { | 307 | { |
| 320 | struct list_head *list = m->private; | 308 | struct ftrace_event_call *call = v; |
| 321 | struct ftrace_event_call *call; | ||
| 322 | 309 | ||
| 323 | (*pos)++; | 310 | (*pos)++; |
| 324 | 311 | ||
| 325 | retry: | 312 | list_for_each_entry_continue(call, &ftrace_events, list) { |
| 326 | if (list == &ftrace_events) | 313 | if (call->enabled) |
| 327 | return NULL; | 314 | return call; |
| 328 | |||
| 329 | call = list_entry(list, struct ftrace_event_call, list); | ||
| 330 | |||
| 331 | if (!call->enabled) { | ||
| 332 | list = list->next; | ||
| 333 | goto retry; | ||
| 334 | } | 315 | } |
| 335 | 316 | ||
| 336 | m->private = list->next; | 317 | return NULL; |
| 337 | |||
| 338 | return call; | ||
| 339 | } | 318 | } |
| 340 | 319 | ||
| 341 | static void *s_start(struct seq_file *m, loff_t *pos) | 320 | static void *s_start(struct seq_file *m, loff_t *pos) |
| 342 | { | 321 | { |
| 343 | struct ftrace_event_call *call = NULL; | 322 | struct ftrace_event_call *call; |
| 344 | loff_t l; | 323 | loff_t l; |
| 345 | 324 | ||
| 346 | mutex_lock(&event_mutex); | 325 | mutex_lock(&event_mutex); |
| 347 | 326 | ||
| 348 | m->private = ftrace_events.next; | 327 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
| 349 | for (l = 0; l <= *pos; ) { | 328 | for (l = 0; l <= *pos; ) { |
| 350 | call = s_next(m, NULL, &l); | 329 | call = s_next(m, call, &l); |
| 351 | if (!call) | 330 | if (!call) |
| 352 | break; | 331 | break; |
| 353 | } | 332 | } |
| @@ -546,7 +525,7 @@ static int trace_write_header(struct trace_seq *s) | |||
| 546 | FIELD(unsigned char, flags), | 525 | FIELD(unsigned char, flags), |
| 547 | FIELD(unsigned char, preempt_count), | 526 | FIELD(unsigned char, preempt_count), |
| 548 | FIELD(int, pid), | 527 | FIELD(int, pid), |
| 549 | FIELD(int, tgid)); | 528 | FIELD(int, lock_depth)); |
| 550 | } | 529 | } |
| 551 | 530 | ||
| 552 | static ssize_t | 531 | static ssize_t |
| @@ -574,7 +553,7 @@ event_format_read(struct file *filp, char __user *ubuf, size_t cnt, | |||
| 574 | trace_seq_printf(s, "format:\n"); | 553 | trace_seq_printf(s, "format:\n"); |
| 575 | trace_write_header(s); | 554 | trace_write_header(s); |
| 576 | 555 | ||
| 577 | r = call->show_format(s); | 556 | r = call->show_format(call, s); |
| 578 | if (!r) { | 557 | if (!r) { |
| 579 | /* | 558 | /* |
| 580 | * ug! The format output is bigger than a PAGE!! | 559 | * ug! The format output is bigger than a PAGE!! |
| @@ -849,8 +828,10 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
| 849 | 828 | ||
| 850 | /* First see if we did not already create this dir */ | 829 | /* First see if we did not already create this dir */ |
| 851 | list_for_each_entry(system, &event_subsystems, list) { | 830 | list_for_each_entry(system, &event_subsystems, list) { |
| 852 | if (strcmp(system->name, name) == 0) | 831 | if (strcmp(system->name, name) == 0) { |
| 832 | system->nr_events++; | ||
| 853 | return system->entry; | 833 | return system->entry; |
| 834 | } | ||
| 854 | } | 835 | } |
| 855 | 836 | ||
| 856 | /* need to create new entry */ | 837 | /* need to create new entry */ |
| @@ -869,6 +850,7 @@ event_subsystem_dir(const char *name, struct dentry *d_events) | |||
| 869 | return d_events; | 850 | return d_events; |
| 870 | } | 851 | } |
| 871 | 852 | ||
| 853 | system->nr_events = 1; | ||
| 872 | system->name = kstrdup(name, GFP_KERNEL); | 854 | system->name = kstrdup(name, GFP_KERNEL); |
| 873 | if (!system->name) { | 855 | if (!system->name) { |
| 874 | debugfs_remove(system->entry); | 856 | debugfs_remove(system->entry); |
| @@ -920,15 +902,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 920 | if (strcmp(call->system, TRACE_SYSTEM) != 0) | 902 | if (strcmp(call->system, TRACE_SYSTEM) != 0) |
| 921 | d_events = event_subsystem_dir(call->system, d_events); | 903 | d_events = event_subsystem_dir(call->system, d_events); |
| 922 | 904 | ||
| 923 | if (call->raw_init) { | ||
| 924 | ret = call->raw_init(); | ||
| 925 | if (ret < 0) { | ||
| 926 | pr_warning("Could not initialize trace point" | ||
| 927 | " events/%s\n", call->name); | ||
| 928 | return ret; | ||
| 929 | } | ||
| 930 | } | ||
| 931 | |||
| 932 | call->dir = debugfs_create_dir(call->name, d_events); | 905 | call->dir = debugfs_create_dir(call->name, d_events); |
| 933 | if (!call->dir) { | 906 | if (!call->dir) { |
| 934 | pr_warning("Could not create debugfs " | 907 | pr_warning("Could not create debugfs " |
| @@ -945,7 +918,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events, | |||
| 945 | id); | 918 | id); |
| 946 | 919 | ||
| 947 | if (call->define_fields) { | 920 | if (call->define_fields) { |
| 948 | ret = call->define_fields(); | 921 | ret = call->define_fields(call); |
| 949 | if (ret < 0) { | 922 | if (ret < 0) { |
| 950 | pr_warning("Could not initialize trace point" | 923 | pr_warning("Could not initialize trace point" |
| 951 | " events/%s\n", call->name); | 924 | " events/%s\n", call->name); |
| @@ -987,6 +960,32 @@ struct ftrace_module_file_ops { | |||
| 987 | struct file_operations filter; | 960 | struct file_operations filter; |
| 988 | }; | 961 | }; |
| 989 | 962 | ||
| 963 | static void remove_subsystem_dir(const char *name) | ||
| 964 | { | ||
| 965 | struct event_subsystem *system; | ||
| 966 | |||
| 967 | if (strcmp(name, TRACE_SYSTEM) == 0) | ||
| 968 | return; | ||
| 969 | |||
| 970 | list_for_each_entry(system, &event_subsystems, list) { | ||
| 971 | if (strcmp(system->name, name) == 0) { | ||
| 972 | if (!--system->nr_events) { | ||
| 973 | struct event_filter *filter = system->filter; | ||
| 974 | |||
| 975 | debugfs_remove_recursive(system->entry); | ||
| 976 | list_del(&system->list); | ||
| 977 | if (filter) { | ||
| 978 | kfree(filter->filter_string); | ||
| 979 | kfree(filter); | ||
| 980 | } | ||
| 981 | kfree(system->name); | ||
| 982 | kfree(system); | ||
| 983 | } | ||
| 984 | break; | ||
| 985 | } | ||
| 986 | } | ||
| 987 | } | ||
| 988 | |||
| 990 | static struct ftrace_module_file_ops * | 989 | static struct ftrace_module_file_ops * |
| 991 | trace_create_file_ops(struct module *mod) | 990 | trace_create_file_ops(struct module *mod) |
| 992 | { | 991 | { |
| @@ -1027,6 +1026,7 @@ static void trace_module_add_events(struct module *mod) | |||
| 1027 | struct ftrace_module_file_ops *file_ops = NULL; | 1026 | struct ftrace_module_file_ops *file_ops = NULL; |
| 1028 | struct ftrace_event_call *call, *start, *end; | 1027 | struct ftrace_event_call *call, *start, *end; |
| 1029 | struct dentry *d_events; | 1028 | struct dentry *d_events; |
| 1029 | int ret; | ||
| 1030 | 1030 | ||
| 1031 | start = mod->trace_events; | 1031 | start = mod->trace_events; |
| 1032 | end = mod->trace_events + mod->num_trace_events; | 1032 | end = mod->trace_events + mod->num_trace_events; |
| @@ -1042,7 +1042,15 @@ static void trace_module_add_events(struct module *mod) | |||
| 1042 | /* The linker may leave blanks */ | 1042 | /* The linker may leave blanks */ |
| 1043 | if (!call->name) | 1043 | if (!call->name) |
| 1044 | continue; | 1044 | continue; |
| 1045 | 1045 | if (call->raw_init) { | |
| 1046 | ret = call->raw_init(); | ||
| 1047 | if (ret < 0) { | ||
| 1048 | if (ret != -ENOSYS) | ||
| 1049 | pr_warning("Could not initialize trace " | ||
| 1050 | "point events/%s\n", call->name); | ||
| 1051 | continue; | ||
| 1052 | } | ||
| 1053 | } | ||
| 1046 | /* | 1054 | /* |
| 1047 | * This module has events, create file ops for this module | 1055 | * This module has events, create file ops for this module |
| 1048 | * if not already done. | 1056 | * if not already done. |
| @@ -1077,6 +1085,7 @@ static void trace_module_remove_events(struct module *mod) | |||
| 1077 | list_del(&call->list); | 1085 | list_del(&call->list); |
| 1078 | trace_destroy_fields(call); | 1086 | trace_destroy_fields(call); |
| 1079 | destroy_preds(call); | 1087 | destroy_preds(call); |
| 1088 | remove_subsystem_dir(call->system); | ||
| 1080 | } | 1089 | } |
| 1081 | } | 1090 | } |
| 1082 | 1091 | ||
| @@ -1125,7 +1134,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
| 1125 | } | 1134 | } |
| 1126 | #endif /* CONFIG_MODULES */ | 1135 | #endif /* CONFIG_MODULES */ |
| 1127 | 1136 | ||
| 1128 | struct notifier_block trace_module_nb = { | 1137 | static struct notifier_block trace_module_nb = { |
| 1129 | .notifier_call = trace_module_notify, | 1138 | .notifier_call = trace_module_notify, |
| 1130 | .priority = 0, | 1139 | .priority = 0, |
| 1131 | }; | 1140 | }; |
| @@ -1133,6 +1142,18 @@ struct notifier_block trace_module_nb = { | |||
| 1133 | extern struct ftrace_event_call __start_ftrace_events[]; | 1142 | extern struct ftrace_event_call __start_ftrace_events[]; |
| 1134 | extern struct ftrace_event_call __stop_ftrace_events[]; | 1143 | extern struct ftrace_event_call __stop_ftrace_events[]; |
| 1135 | 1144 | ||
| 1145 | static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata; | ||
| 1146 | |||
| 1147 | static __init int setup_trace_event(char *str) | ||
| 1148 | { | ||
| 1149 | strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE); | ||
| 1150 | ring_buffer_expanded = 1; | ||
| 1151 | tracing_selftest_disabled = 1; | ||
| 1152 | |||
| 1153 | return 1; | ||
| 1154 | } | ||
| 1155 | __setup("trace_event=", setup_trace_event); | ||
| 1156 | |||
| 1136 | static __init int event_trace_init(void) | 1157 | static __init int event_trace_init(void) |
| 1137 | { | 1158 | { |
| 1138 | struct ftrace_event_call *call; | 1159 | struct ftrace_event_call *call; |
| @@ -1140,6 +1161,8 @@ static __init int event_trace_init(void) | |||
| 1140 | struct dentry *entry; | 1161 | struct dentry *entry; |
| 1141 | struct dentry *d_events; | 1162 | struct dentry *d_events; |
| 1142 | int ret; | 1163 | int ret; |
| 1164 | char *buf = bootup_event_buf; | ||
| 1165 | char *token; | ||
| 1143 | 1166 | ||
| 1144 | d_tracer = tracing_init_dentry(); | 1167 | d_tracer = tracing_init_dentry(); |
| 1145 | if (!d_tracer) | 1168 | if (!d_tracer) |
| @@ -1179,12 +1202,34 @@ static __init int event_trace_init(void) | |||
| 1179 | /* The linker may leave blanks */ | 1202 | /* The linker may leave blanks */ |
| 1180 | if (!call->name) | 1203 | if (!call->name) |
| 1181 | continue; | 1204 | continue; |
| 1205 | if (call->raw_init) { | ||
| 1206 | ret = call->raw_init(); | ||
| 1207 | if (ret < 0) { | ||
| 1208 | if (ret != -ENOSYS) | ||
| 1209 | pr_warning("Could not initialize trace " | ||
| 1210 | "point events/%s\n", call->name); | ||
| 1211 | continue; | ||
| 1212 | } | ||
| 1213 | } | ||
| 1182 | list_add(&call->list, &ftrace_events); | 1214 | list_add(&call->list, &ftrace_events); |
| 1183 | event_create_dir(call, d_events, &ftrace_event_id_fops, | 1215 | event_create_dir(call, d_events, &ftrace_event_id_fops, |
| 1184 | &ftrace_enable_fops, &ftrace_event_filter_fops, | 1216 | &ftrace_enable_fops, &ftrace_event_filter_fops, |
| 1185 | &ftrace_event_format_fops); | 1217 | &ftrace_event_format_fops); |
| 1186 | } | 1218 | } |
| 1187 | 1219 | ||
| 1220 | while (true) { | ||
| 1221 | token = strsep(&buf, ","); | ||
| 1222 | |||
| 1223 | if (!token) | ||
| 1224 | break; | ||
| 1225 | if (!*token) | ||
| 1226 | continue; | ||
| 1227 | |||
| 1228 | ret = ftrace_set_clr_event(token, 1); | ||
| 1229 | if (ret) | ||
| 1230 | pr_warning("Failed to enable trace event: %s\n", token); | ||
| 1231 | } | ||
| 1232 | |||
| 1188 | ret = register_module_notifier(&trace_module_nb); | 1233 | ret = register_module_notifier(&trace_module_nb); |
| 1189 | if (ret) | 1234 | if (ret) |
| 1190 | pr_warning("Failed to register trace events module notifier\n"); | 1235 | pr_warning("Failed to register trace events module notifier\n"); |
| @@ -1261,6 +1306,18 @@ static __init void event_trace_self_tests(void) | |||
| 1261 | if (!call->regfunc) | 1306 | if (!call->regfunc) |
| 1262 | continue; | 1307 | continue; |
| 1263 | 1308 | ||
| 1309 | /* | ||
| 1310 | * Testing syscall events here is pretty useless, but | ||
| 1311 | * we still do it if configured. But this is time consuming. | ||
| 1312 | * What we really need is a user thread to perform the | ||
| 1313 | * syscalls as we test. | ||
| 1314 | */ | ||
| 1315 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS | ||
| 1316 | if (call->system && | ||
| 1317 | strcmp(call->system, "syscalls") == 0) | ||
| 1318 | continue; | ||
| 1319 | #endif | ||
| 1320 | |||
| 1264 | pr_info("Testing event %s: ", call->name); | 1321 | pr_info("Testing event %s: ", call->name); |
| 1265 | 1322 | ||
| 1266 | /* | 1323 | /* |
| @@ -1334,12 +1391,13 @@ static __init void event_trace_self_tests(void) | |||
| 1334 | 1391 | ||
| 1335 | #ifdef CONFIG_FUNCTION_TRACER | 1392 | #ifdef CONFIG_FUNCTION_TRACER |
| 1336 | 1393 | ||
| 1337 | static DEFINE_PER_CPU(atomic_t, test_event_disable); | 1394 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
| 1338 | 1395 | ||
| 1339 | static void | 1396 | static void |
| 1340 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1397 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
| 1341 | { | 1398 | { |
| 1342 | struct ring_buffer_event *event; | 1399 | struct ring_buffer_event *event; |
| 1400 | struct ring_buffer *buffer; | ||
| 1343 | struct ftrace_entry *entry; | 1401 | struct ftrace_entry *entry; |
| 1344 | unsigned long flags; | 1402 | unsigned long flags; |
| 1345 | long disabled; | 1403 | long disabled; |
| @@ -1350,14 +1408,15 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
| 1350 | pc = preempt_count(); | 1408 | pc = preempt_count(); |
| 1351 | resched = ftrace_preempt_disable(); | 1409 | resched = ftrace_preempt_disable(); |
| 1352 | cpu = raw_smp_processor_id(); | 1410 | cpu = raw_smp_processor_id(); |
| 1353 | disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | 1411 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
| 1354 | 1412 | ||
| 1355 | if (disabled != 1) | 1413 | if (disabled != 1) |
| 1356 | goto out; | 1414 | goto out; |
| 1357 | 1415 | ||
| 1358 | local_save_flags(flags); | 1416 | local_save_flags(flags); |
| 1359 | 1417 | ||
| 1360 | event = trace_current_buffer_lock_reserve(TRACE_FN, sizeof(*entry), | 1418 | event = trace_current_buffer_lock_reserve(&buffer, |
| 1419 | TRACE_FN, sizeof(*entry), | ||
| 1361 | flags, pc); | 1420 | flags, pc); |
| 1362 | if (!event) | 1421 | if (!event) |
| 1363 | goto out; | 1422 | goto out; |
| @@ -1365,10 +1424,10 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
| 1365 | entry->ip = ip; | 1424 | entry->ip = ip; |
| 1366 | entry->parent_ip = parent_ip; | 1425 | entry->parent_ip = parent_ip; |
| 1367 | 1426 | ||
| 1368 | trace_nowake_buffer_unlock_commit(event, flags, pc); | 1427 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); |
| 1369 | 1428 | ||
| 1370 | out: | 1429 | out: |
| 1371 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1430 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
| 1372 | ftrace_preempt_enable(resched); | 1431 | ftrace_preempt_enable(resched); |
| 1373 | } | 1432 | } |
| 1374 | 1433 | ||
| @@ -1392,10 +1451,10 @@ static __init void event_trace_self_test_with_function(void) | |||
| 1392 | 1451 | ||
| 1393 | static __init int event_trace_self_tests_init(void) | 1452 | static __init int event_trace_self_tests_init(void) |
| 1394 | { | 1453 | { |
| 1395 | 1454 | if (!tracing_selftest_disabled) { | |
| 1396 | event_trace_self_tests(); | 1455 | event_trace_self_tests(); |
| 1397 | 1456 | event_trace_self_test_with_function(); | |
| 1398 | event_trace_self_test_with_function(); | 1457 | } |
| 1399 | 1458 | ||
| 1400 | return 0; | 1459 | return 0; |
| 1401 | } | 1460 | } |
