aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_events.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r--kernel/trace/trace_events.c134
1 files changed, 47 insertions, 87 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 78b1ed230177..6f03c8a1105e 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -21,6 +21,7 @@
21 21
22#include "trace_output.h" 22#include "trace_output.h"
23 23
24#undef TRACE_SYSTEM
24#define TRACE_SYSTEM "TRACE_SYSTEM" 25#define TRACE_SYSTEM "TRACE_SYSTEM"
25 26
26DEFINE_MUTEX(event_mutex); 27DEFINE_MUTEX(event_mutex);
@@ -86,7 +87,7 @@ int trace_define_common_fields(struct ftrace_event_call *call)
86 __common_field(unsigned char, flags); 87 __common_field(unsigned char, flags);
87 __common_field(unsigned char, preempt_count); 88 __common_field(unsigned char, preempt_count);
88 __common_field(int, pid); 89 __common_field(int, pid);
89 __common_field(int, tgid); 90 __common_field(int, lock_depth);
90 91
91 return ret; 92 return ret;
92} 93}
@@ -230,11 +231,9 @@ static ssize_t
230ftrace_event_write(struct file *file, const char __user *ubuf, 231ftrace_event_write(struct file *file, const char __user *ubuf,
231 size_t cnt, loff_t *ppos) 232 size_t cnt, loff_t *ppos)
232{ 233{
234 struct trace_parser parser;
233 size_t read = 0; 235 size_t read = 0;
234 int i, set = 1;
235 ssize_t ret; 236 ssize_t ret;
236 char *buf;
237 char ch;
238 237
239 if (!cnt || cnt < 0) 238 if (!cnt || cnt < 0)
240 return 0; 239 return 0;
@@ -243,60 +242,28 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
243 if (ret < 0) 242 if (ret < 0)
244 return ret; 243 return ret;
245 244
246 ret = get_user(ch, ubuf++); 245 if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
247 if (ret)
248 return ret;
249 read++;
250 cnt--;
251
252 /* skip white space */
253 while (cnt && isspace(ch)) {
254 ret = get_user(ch, ubuf++);
255 if (ret)
256 return ret;
257 read++;
258 cnt--;
259 }
260
261 /* Only white space found? */
262 if (isspace(ch)) {
263 file->f_pos += read;
264 ret = read;
265 return ret;
266 }
267
268 buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL);
269 if (!buf)
270 return -ENOMEM; 246 return -ENOMEM;
271 247
272 if (cnt > EVENT_BUF_SIZE) 248 read = trace_get_user(&parser, ubuf, cnt, ppos);
273 cnt = EVENT_BUF_SIZE; 249
250 if (trace_parser_loaded((&parser))) {
251 int set = 1;
274 252
275 i = 0; 253 if (*parser.buffer == '!')
276 while (cnt && !isspace(ch)) {
277 if (!i && ch == '!')
278 set = 0; 254 set = 0;
279 else
280 buf[i++] = ch;
281 255
282 ret = get_user(ch, ubuf++); 256 parser.buffer[parser.idx] = 0;
257
258 ret = ftrace_set_clr_event(parser.buffer + !set, set);
283 if (ret) 259 if (ret)
284 goto out_free; 260 goto out_put;
285 read++;
286 cnt--;
287 } 261 }
288 buf[i] = 0;
289
290 file->f_pos += read;
291
292 ret = ftrace_set_clr_event(buf, set);
293 if (ret)
294 goto out_free;
295 262
296 ret = read; 263 ret = read;
297 264
298 out_free: 265 out_put:
299 kfree(buf); 266 trace_parser_put(&parser);
300 267
301 return ret; 268 return ret;
302} 269}
@@ -304,42 +271,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf,
304static void * 271static void *
305t_next(struct seq_file *m, void *v, loff_t *pos) 272t_next(struct seq_file *m, void *v, loff_t *pos)
306{ 273{
307 struct list_head *list = m->private; 274 struct ftrace_event_call *call = v;
308 struct ftrace_event_call *call;
309 275
310 (*pos)++; 276 (*pos)++;
311 277
312 for (;;) { 278 list_for_each_entry_continue(call, &ftrace_events, list) {
313 if (list == &ftrace_events)
314 return NULL;
315
316 call = list_entry(list, struct ftrace_event_call, list);
317
318 /* 279 /*
319 * The ftrace subsystem is for showing formats only. 280 * The ftrace subsystem is for showing formats only.
320 * They can not be enabled or disabled via the event files. 281 * They can not be enabled or disabled via the event files.
321 */ 282 */
322 if (call->regfunc) 283 if (call->regfunc)
323 break; 284 return call;
324
325 list = list->next;
326 } 285 }
327 286
328 m->private = list->next; 287 return NULL;
329
330 return call;
331} 288}
332 289
333static void *t_start(struct seq_file *m, loff_t *pos) 290static void *t_start(struct seq_file *m, loff_t *pos)
334{ 291{
335 struct ftrace_event_call *call = NULL; 292 struct ftrace_event_call *call;
336 loff_t l; 293 loff_t l;
337 294
338 mutex_lock(&event_mutex); 295 mutex_lock(&event_mutex);
339 296
340 m->private = ftrace_events.next; 297 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
341 for (l = 0; l <= *pos; ) { 298 for (l = 0; l <= *pos; ) {
342 call = t_next(m, NULL, &l); 299 call = t_next(m, call, &l);
343 if (!call) 300 if (!call)
344 break; 301 break;
345 } 302 }
@@ -349,37 +306,28 @@ static void *t_start(struct seq_file *m, loff_t *pos)
349static void * 306static void *
350s_next(struct seq_file *m, void *v, loff_t *pos) 307s_next(struct seq_file *m, void *v, loff_t *pos)
351{ 308{
352 struct list_head *list = m->private; 309 struct ftrace_event_call *call = v;
353 struct ftrace_event_call *call;
354 310
355 (*pos)++; 311 (*pos)++;
356 312
357 retry: 313 list_for_each_entry_continue(call, &ftrace_events, list) {
358 if (list == &ftrace_events) 314 if (call->enabled)
359 return NULL; 315 return call;
360
361 call = list_entry(list, struct ftrace_event_call, list);
362
363 if (!call->enabled) {
364 list = list->next;
365 goto retry;
366 } 316 }
367 317
368 m->private = list->next; 318 return NULL;
369
370 return call;
371} 319}
372 320
373static void *s_start(struct seq_file *m, loff_t *pos) 321static void *s_start(struct seq_file *m, loff_t *pos)
374{ 322{
375 struct ftrace_event_call *call = NULL; 323 struct ftrace_event_call *call;
376 loff_t l; 324 loff_t l;
377 325
378 mutex_lock(&event_mutex); 326 mutex_lock(&event_mutex);
379 327
380 m->private = ftrace_events.next; 328 call = list_entry(&ftrace_events, struct ftrace_event_call, list);
381 for (l = 0; l <= *pos; ) { 329 for (l = 0; l <= *pos; ) {
382 call = s_next(m, NULL, &l); 330 call = s_next(m, call, &l);
383 if (!call) 331 if (!call)
384 break; 332 break;
385 } 333 }
@@ -578,7 +526,7 @@ static int trace_write_header(struct trace_seq *s)
578 FIELD(unsigned char, flags), 526 FIELD(unsigned char, flags),
579 FIELD(unsigned char, preempt_count), 527 FIELD(unsigned char, preempt_count),
580 FIELD(int, pid), 528 FIELD(int, pid),
581 FIELD(int, tgid)); 529 FIELD(int, lock_depth));
582} 530}
583 531
584static ssize_t 532static ssize_t
@@ -1187,7 +1135,7 @@ static int trace_module_notify(struct notifier_block *self,
1187} 1135}
1188#endif /* CONFIG_MODULES */ 1136#endif /* CONFIG_MODULES */
1189 1137
1190struct notifier_block trace_module_nb = { 1138static struct notifier_block trace_module_nb = {
1191 .notifier_call = trace_module_notify, 1139 .notifier_call = trace_module_notify,
1192 .priority = 0, 1140 .priority = 0,
1193}; 1141};
@@ -1359,6 +1307,18 @@ static __init void event_trace_self_tests(void)
1359 if (!call->regfunc) 1307 if (!call->regfunc)
1360 continue; 1308 continue;
1361 1309
1310/*
1311 * Testing syscall events here is pretty useless, but
1312 * we still do it if configured. But this is time consuming.
1313 * What we really need is a user thread to perform the
1314 * syscalls as we test.
1315 */
1316#ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
1317 if (call->system &&
1318 strcmp(call->system, "syscalls") == 0)
1319 continue;
1320#endif
1321
1362 pr_info("Testing event %s: ", call->name); 1322 pr_info("Testing event %s: ", call->name);
1363 1323
1364 /* 1324 /*
@@ -1432,7 +1392,7 @@ static __init void event_trace_self_tests(void)
1432 1392
1433#ifdef CONFIG_FUNCTION_TRACER 1393#ifdef CONFIG_FUNCTION_TRACER
1434 1394
1435static DEFINE_PER_CPU(atomic_t, test_event_disable); 1395static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
1436 1396
1437static void 1397static void
1438function_test_events_call(unsigned long ip, unsigned long parent_ip) 1398function_test_events_call(unsigned long ip, unsigned long parent_ip)
@@ -1449,7 +1409,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1449 pc = preempt_count(); 1409 pc = preempt_count();
1450 resched = ftrace_preempt_disable(); 1410 resched = ftrace_preempt_disable();
1451 cpu = raw_smp_processor_id(); 1411 cpu = raw_smp_processor_id();
1452 disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); 1412 disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
1453 1413
1454 if (disabled != 1) 1414 if (disabled != 1)
1455 goto out; 1415 goto out;
@@ -1468,7 +1428,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
1468 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); 1428 trace_nowake_buffer_unlock_commit(buffer, event, flags, pc);
1469 1429
1470 out: 1430 out:
1471 atomic_dec(&per_cpu(test_event_disable, cpu)); 1431 atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
1472 ftrace_preempt_enable(resched); 1432 ftrace_preempt_enable(resched);
1473} 1433}
1474 1434