aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c184
1 files changed, 71 insertions, 113 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..7c18d154ea28 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -21,6 +21,7 @@
21 21
22#include "trace_output.h" 22#include "trace_output.h"
23 23
24#undef TRACE_SYSTEM
24#define TRACE_SYSTEM "TRACE_SYSTEM" 25#define TRACE_SYSTEM "TRACE_SYSTEM"
25 26
26DEFINE_MUTEX(event_mutex); 27DEFINE_MUTEX(event_mutex);
@@ -86,7 +87,7 @@ int trace_define_common_fields(struct ftrace_event_call *call)
86 __common_field(unsigned char, flags); 87 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count); 88 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid); 89 __common_field(int, pid);
89 __common_field(int, tgid); 90 __common_field(int, lock_depth);
90 91
91 return ret; 92 return ret;
92} 93}
@@ -230,73 +231,38 @@ static ssize_t
230ftrace_event_write(struct file *file, const char __user *ubuf, 231ftrace_event_write(struct file *file, const char __user *ubuf,
231 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
232{ 233{
233 size_t read = 0; 234 struct trace_parser parser;
234 int i, set = 1; 235 ssize_t read, ret;
235 ssize_t ret;
236 char *buf;
237 char ch;
238 236
239 if (!cnt || cnt < 0) 237 if (!cnt)
240 return 0; 238 return 0;
241 239
242 ret = tracing_update_buffers(); 240 ret = tracing_update_buffers();
243 if (ret < 0) 241 if (ret < 0)
244 return ret; 242 return ret;
245 243
246 ret = get_user(ch, ubuf++); 244 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
247 if (ret)
248 return ret;
249 read++;
250 cnt--;
251
252 /* skip white space */
253 while (cnt && isspace(ch)) {
254 ret = get_user(ch, ubuf++);
255 if (ret)
256 return ret;
257 read++;
258 cnt--;
259 }
260
261 /* Only white space found? */
262 if (isspace(ch)) {
263 file->f_pos += read;
264 ret = read;
265 return ret;
266 }
267
268 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
269 if (!buf)
270 return -ENOMEM; 245 return -ENOMEM;
271 246
272 if (cnt > EVENT_BUF_SIZE) 247 read = trace_get_user(&parser, ubuf, cnt, ppos);
273 cnt = EVENT_BUF_SIZE; 248
249 if (read >= 0 && trace_parser_loaded((&parser))) {
250 int set = 1;
274 251
275 i = 0; 252 if (*parser.buffer == '!')
276 while (cnt && !isspace(ch)) {
277 if (!i && ch == '!')
278 set = 0; 253 set = 0;
279 else
280 buf[i++] = ch;
281 254
282 ret = get_user(ch, ubuf++); 255 parser.buffer[parser.idx] = 0;
256
257 ret = ftrace_set_clr_event(parser.buffer + !set, set);
283 if (ret) 258 if (ret)
284 goto out_free; 259 goto out_put;
285 read++;
286 cnt--;
287 } 260 }
288 buf[i] = 0;
289
290 file->f_pos += read;
291
292 ret = ftrace_set_clr_event(buf, set);
293 if (ret)
294 goto out_free;
295 261
296 ret = read; 262 ret = read;
297 263
298 out_free: 264 out_put:
299 kfree(buf); 265 trace_parser_put(&parser);
300 266
301 return ret; 267 return ret;
302} 268}
@@ -304,42 +270,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
304static void * 270static void *
305t_next(struct seq_file *m, void *v, loff_t *pos) 271t_next(struct seq_file *m, void *v, loff_t *pos)
306{ 272{
307 struct list_head *list = m->private; 273 struct ftrace_event_call *call = v;
308 struct ftrace_event_call *call;
309 274
310 (*pos)++; 275 (*pos)++;
311 276
312 for (;;) { 277 list_for_each_entry_continue(call, &ftrace_events, list) {
313 if (list == &ftrace_events)
314 return NULL;
315
316 call = list_entry(list, struct ftrace_event_call, list);
317
318 /* 278 /*
319 * The ftrace subsystem is for showing formats only. 279 * The ftrace subsystem is for showing formats only.
320 * They can not be enabled or disabled via the event files. 280 * They can not be enabled or disabled via the event files.
321 */ 281 */
322 if (call->regfunc) 282 if (call->regfunc)
323 break; 283 return call;
324
325 list = list->next;
326 } 284 }
327 285
328 m->private = list->next; 286 return NULL;
329
330 return call;
331} 287}
332 288
333static void *t_start(struct seq_file *m, loff_t *pos) 289static void *t_start(struct seq_file *m, loff_t *pos)
334{ 290{
335 struct ftrace_event_call *call = NULL; 291 struct ftrace_event_call *call;
336 loff_t l; 292 loff_t l;
337 293
338 mutex_lock(&event_mutex); 294 mutex_lock(&event_mutex);
339 295
340 m->private = ftrace_events.next; 296 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
341 for (l = 0; l <= *pos; ) { 297 for (l = 0; l <= *pos; ) {
342 call = t_next(m, NULL, &l); 298 call = t_next(m, call, &l);
343 if (!call) 299 if (!call)
344 break; 300 break;
345 } 301 }
@@ -349,37 +305,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
349static void * 305static void *
350s_next(struct seq_file *m, void *v, loff_t *pos) 306s_next(struct seq_file *m, void *v, loff_t *pos)
351{ 307{
352 struct list_head *list = m->private; 308 struct ftrace_event_call *call = v;
353 struct ftrace_event_call *call;
354 309
355 (*pos)++; 310 (*pos)++;
356 311
357 retry: 312 list_for_each_entry_continue(call, &ftrace_events, list) {
358 if (list == &ftrace_events) 313 if (call->enabled)
359 return NULL; 314 return call;
360
361 call = list_entry(list, struct ftrace_event_call, list);
362
363 if (!call->enabled) {
364 list = list->next;
365 goto retry;
366 } 315 }
367 316
368 m->private = list->next; 317 return NULL;
369
370 return call;
371} 318}
372 319
373static void *s_start(struct seq_file *m, loff_t *pos) 320static void *s_start(struct seq_file *m, loff_t *pos)
374{ 321{
375 struct ftrace_event_call *call = NULL; 322 struct ftrace_event_call *call;
376 loff_t l; 323 loff_t l;
377 324
378 mutex_lock(&event_mutex); 325 mutex_lock(&event_mutex);
379 326
380 m->private = ftrace_events.next; 327 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
381 for (l = 0; l <= *pos; ) { 328 for (l = 0; l <= *pos; ) {
382 call = s_next(m, NULL, &l); 329 call = s_next(m, call, &l);
383 if (!call) 330 if (!call)
384 break; 331 break;
385 } 332 }
@@ -560,7 +507,7 @@ extern char *__bad_type_size(void);
560#define FIELD(type, name) \ 507#define FIELD(type, name) \
561 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \ 508 sizeof(type) != sizeof(field.name) ? __bad_type_size() : \
562 #type, "common_" #name, offsetof(typeof(field), name), \ 509 #type, "common_" #name, offsetof(typeof(field), name), \
563 sizeof(field.name) 510 sizeof(field.name), is_signed_type(type)
564 511
565static int trace_write_header(struct trace_seq *s) 512static int trace_write_header(struct trace_seq *s)
566{ 513{
@@ -568,17 +515,17 @@ static int trace_write_header(struct trace_seq *s)
568 515
569 /* struct trace_entry */ 516 /* struct trace_entry */
570 return trace_seq_printf(s, 517 return trace_seq_printf(s,
571 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 518 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
572 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 519 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
573 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 520 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
574 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 521 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
575 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\n" 522 "\tfield:%s %s;\toffset:%zu;\tsize:%zu;\tsigned:%u;\n"
576 "\n", 523 "\n",
577 FIELD(unsigned short, type), 524 FIELD(unsigned short, type),
578 FIELD(unsigned char, flags), 525 FIELD(unsigned char, flags),
579 FIELD(unsigned char, preempt_count), 526 FIELD(unsigned char, preempt_count),
580 FIELD(int, pid), 527 FIELD(int, pid),
581 FIELD(int, tgid)); 528 FIELD(int, lock_depth));
582} 529}
583 530
584static ssize_t 531static ssize_t
@@ -931,9 +878,9 @@ event_subsystem_dir(const char *name, struct dentry *d_events)
931 "'%s/filter' entry\n", name); 878 "'%s/filter' entry\n", name);
932 } 879 }
933 880
934 entry = trace_create_file("enable", 0644, system->entry, 881 trace_create_file("enable", 0644, system->entry,
935 (void *)system->name, 882 (void *)system->name,
936 &ftrace_system_enable_fops); 883 &ftrace_system_enable_fops);
937 884
938 return system->entry; 885 return system->entry;
939} 886}
@@ -945,7 +892,6 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
945 const struct file_operations *filter, 892 const struct file_operations *filter,
946 const struct file_operations *format) 893 const struct file_operations *format)
947{ 894{
948 struct dentry *entry;
949 int ret; 895 int ret;
950 896
951 /* 897 /*
@@ -963,12 +909,12 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
963 } 909 }
964 910
965 if (call->regfunc) 911 if (call->regfunc)
966 entry = trace_create_file("enable", 0644, call->dir, call, 912 trace_create_file("enable", 0644, call->dir, call,
967 enable); 913 enable);
968 914
969 if (call->id && call->profile_enable) 915 if (call->id && call->profile_enable)
970 entry = trace_create_file("id", 0444, call->dir, call, 916 trace_create_file("id", 0444, call->dir, call,
971 id); 917 id);
972 918
973 if (call->define_fields) { 919 if (call->define_fields) {
974 ret = call->define_fields(call); 920 ret = call->define_fields(call);
@@ -977,16 +923,16 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
977 " events/%s\n", call->name); 923 " events/%s\n", call->name);
978 return ret; 924 return ret;
979 } 925 }
980 entry = trace_create_file("filter", 0644, call->dir, call, 926 trace_create_file("filter", 0644, call->dir, call,
981 filter); 927 filter);
982 } 928 }
983 929
984 /* A trace may not want to export its format */ 930 /* A trace may not want to export its format */
985 if (!call->show_format) 931 if (!call->show_format)
986 return 0; 932 return 0;
987 933
988 entry = trace_create_file("format", 0444, call->dir, call, 934 trace_create_file("format", 0444, call->dir, call,
989 format); 935 format);
990 936
991 return 0; 937 return 0;
992} 938}
@@ -1187,7 +1133,7 @@ static int trace_module_notify(struct notifier_block *self,
1187} 1133}
1188#endif /* CONFIG_MODULES */ 1134#endif /* CONFIG_MODULES */
1189 1135
1190struct notifier_block trace_module_nb = { 1136static struct notifier_block trace_module_nb = {
1191 .notifier_call = trace_module_notify, 1137 .notifier_call = trace_module_notify,
1192 .priority = 0, 1138 .priority = 0,
1193}; 1139};
@@ -1359,6 +1305,18 @@ static __init void event_trace_self_tests(void)
1359 if (!call->regfunc) 1305 if (!call->regfunc)
1360 continue; 1306 continue;
1361 1307
1308/*
1309 * Testing syscall events here is pretty useless, but
1310 * we still do it if configured. But this is time consuming.
1311 * What we really need is a user thread to perform the
1312 * syscalls as we test.
1313 */
1314#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1315 if (call->system &&
1316 strcmp(call->system, "syscalls") == 0)
1317 continue;
1318#endif
1319
1362 pr_info("Testing event %s: ", call->name); 1320 pr_info("Testing event %s: ", call->name);
1363 1321
1364 /* 1322 /*
@@ -1432,7 +1390,7 @@ static __init void event_trace_self_tests(void)
1432 1390
1433#ifdef CONFIG_FUNCTION_TRACER 1391#ifdef CONFIG_FUNCTION_TRACER
1434 1392
1435static DEFINE_PER_CPU(atomic_t, test_event_disable); 1393static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1436 1394
1437static void 1395static void
1438function_test_events_call(unsigned long ip, unsigned long parent_ip) 1396function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1407,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1449 pc = preempt_count(); 1407 pc = preempt_count();
1450 resched = ftrace_preempt_disable(); 1408 resched = ftrace_preempt_disable();
1451 cpu = raw_smp_processor_id(); 1409 cpu = raw_smp_processor_id();
1452 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1410 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1453 1411
1454 if (disabled != 1) 1412 if (disabled != 1)
1455 goto out; 1413 goto out;
@@ -1468,7 +1426,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1426 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1469 1427
1470 out: 1428 out:
1471 atomic_dec(&per_cpu(test_event_disable, cpu)); 1429 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1472 ftrace_preempt_enable(resched); 1430 ftrace_preempt_enable(resched);
1473} 1431}
1474 1432