diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-23 17:08:43 -0400 |
---|---|---|
committer | Frederic Weisbecker <fweisbec@gmail.com> | 2009-09-23 17:08:43 -0400 |
commit | d7a4b414eed51f1653bb05ebe84122bf9a7ae18b (patch) | |
tree | bd6603a0c27de4c138a1767871897e9cd3e1a1d2 /kernel/trace/trace_events.c | |
parent | 1f0ab40976460bc4673fa204ce917a725185d8f2 (diff) | |
parent | a724eada8c2a7b62463b73ccf73fd0bb6e928aeb (diff) |
Merge commit 'linus/master' into tracing/kprobes
Conflicts:
kernel/trace/Makefile
kernel/trace/trace.h
kernel/trace/trace_event_types.h
kernel/trace/trace_export.c
Merge reason:
Sync with latest significant tracing core changes.
Diffstat (limited to 'kernel/trace/trace_events.c')
-rw-r--r-- | kernel/trace/trace_events.c | 134 |
1 files changed, 47 insertions, 87 deletions
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index f85b0f1cb942..a4b7c9a9130c 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -21,6 +21,7 @@ | |||
21 | 21 | ||
22 | #include "trace_output.h" | 22 | #include "trace_output.h" |
23 | 23 | ||
24 | #undef TRACE_SYSTEM | ||
24 | #define TRACE_SYSTEM "TRACE_SYSTEM" | 25 | #define TRACE_SYSTEM "TRACE_SYSTEM" |
25 | 26 | ||
26 | DEFINE_MUTEX(event_mutex); | 27 | DEFINE_MUTEX(event_mutex); |
@@ -86,7 +87,7 @@ int trace_define_common_fields(struct ftrace_event_call *call) | |||
86 | __common_field(unsigned char, flags); | 87 | __common_field(unsigned char, flags); |
87 | __common_field(unsigned char, preempt_count); | 88 | __common_field(unsigned char, preempt_count); |
88 | __common_field(int, pid); | 89 | __common_field(int, pid); |
89 | __common_field(int, tgid); | 90 | __common_field(int, lock_depth); |
90 | 91 | ||
91 | return ret; | 92 | return ret; |
92 | } | 93 | } |
@@ -226,11 +227,9 @@ static ssize_t | |||
226 | ftrace_event_write(struct file *file, const char __user *ubuf, | 227 | ftrace_event_write(struct file *file, const char __user *ubuf, |
227 | size_t cnt, loff_t *ppos) | 228 | size_t cnt, loff_t *ppos) |
228 | { | 229 | { |
230 | struct trace_parser parser; | ||
229 | size_t read = 0; | 231 | size_t read = 0; |
230 | int i, set = 1; | ||
231 | ssize_t ret; | 232 | ssize_t ret; |
232 | char *buf; | ||
233 | char ch; | ||
234 | 233 | ||
235 | if (!cnt || cnt < 0) | 234 | if (!cnt || cnt < 0) |
236 | return 0; | 235 | return 0; |
@@ -239,60 +238,28 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
239 | if (ret < 0) | 238 | if (ret < 0) |
240 | return ret; | 239 | return ret; |
241 | 240 | ||
242 | ret = get_user(ch, ubuf++); | 241 | if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1)) |
243 | if (ret) | ||
244 | return ret; | ||
245 | read++; | ||
246 | cnt--; | ||
247 | |||
248 | /* skip white space */ | ||
249 | while (cnt && isspace(ch)) { | ||
250 | ret = get_user(ch, ubuf++); | ||
251 | if (ret) | ||
252 | return ret; | ||
253 | read++; | ||
254 | cnt--; | ||
255 | } | ||
256 | |||
257 | /* Only white space found? */ | ||
258 | if (isspace(ch)) { | ||
259 | file->f_pos += read; | ||
260 | ret = read; | ||
261 | return ret; | ||
262 | } | ||
263 | |||
264 | buf = kmalloc(EVENT_BUF_SIZE+1, GFP_KERNEL); | ||
265 | if (!buf) | ||
266 | return -ENOMEM; | 242 | return -ENOMEM; |
267 | 243 | ||
268 | if (cnt > EVENT_BUF_SIZE) | 244 | read = trace_get_user(&parser, ubuf, cnt, ppos); |
269 | cnt = EVENT_BUF_SIZE; | 245 | |
246 | if (trace_parser_loaded((&parser))) { | ||
247 | int set = 1; | ||
270 | 248 | ||
271 | i = 0; | 249 | if (*parser.buffer == '!') |
272 | while (cnt && !isspace(ch)) { | ||
273 | if (!i && ch == '!') | ||
274 | set = 0; | 250 | set = 0; |
275 | else | ||
276 | buf[i++] = ch; | ||
277 | 251 | ||
278 | ret = get_user(ch, ubuf++); | 252 | parser.buffer[parser.idx] = 0; |
253 | |||
254 | ret = ftrace_set_clr_event(parser.buffer + !set, set); | ||
279 | if (ret) | 255 | if (ret) |
280 | goto out_free; | 256 | goto out_put; |
281 | read++; | ||
282 | cnt--; | ||
283 | } | 257 | } |
284 | buf[i] = 0; | ||
285 | |||
286 | file->f_pos += read; | ||
287 | |||
288 | ret = ftrace_set_clr_event(buf, set); | ||
289 | if (ret) | ||
290 | goto out_free; | ||
291 | 258 | ||
292 | ret = read; | 259 | ret = read; |
293 | 260 | ||
294 | out_free: | 261 | out_put: |
295 | kfree(buf); | 262 | trace_parser_put(&parser); |
296 | 263 | ||
297 | return ret; | 264 | return ret; |
298 | } | 265 | } |
@@ -300,42 +267,32 @@ ftrace_event_write(struct file *file, const char __user *ubuf, | |||
300 | static void * | 267 | static void * |
301 | t_next(struct seq_file *m, void *v, loff_t *pos) | 268 | t_next(struct seq_file *m, void *v, loff_t *pos) |
302 | { | 269 | { |
303 | struct list_head *list = m->private; | 270 | struct ftrace_event_call *call = v; |
304 | struct ftrace_event_call *call; | ||
305 | 271 | ||
306 | (*pos)++; | 272 | (*pos)++; |
307 | 273 | ||
308 | for (;;) { | 274 | list_for_each_entry_continue(call, &ftrace_events, list) { |
309 | if (list == &ftrace_events) | ||
310 | return NULL; | ||
311 | |||
312 | call = list_entry(list, struct ftrace_event_call, list); | ||
313 | |||
314 | /* | 275 | /* |
315 | * The ftrace subsystem is for showing formats only. | 276 | * The ftrace subsystem is for showing formats only. |
316 | * They can not be enabled or disabled via the event files. | 277 | * They can not be enabled or disabled via the event files. |
317 | */ | 278 | */ |
318 | if (call->regfunc) | 279 | if (call->regfunc) |
319 | break; | 280 | return call; |
320 | |||
321 | list = list->next; | ||
322 | } | 281 | } |
323 | 282 | ||
324 | m->private = list->next; | 283 | return NULL; |
325 | |||
326 | return call; | ||
327 | } | 284 | } |
328 | 285 | ||
329 | static void *t_start(struct seq_file *m, loff_t *pos) | 286 | static void *t_start(struct seq_file *m, loff_t *pos) |
330 | { | 287 | { |
331 | struct ftrace_event_call *call = NULL; | 288 | struct ftrace_event_call *call; |
332 | loff_t l; | 289 | loff_t l; |
333 | 290 | ||
334 | mutex_lock(&event_mutex); | 291 | mutex_lock(&event_mutex); |
335 | 292 | ||
336 | m->private = ftrace_events.next; | 293 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
337 | for (l = 0; l <= *pos; ) { | 294 | for (l = 0; l <= *pos; ) { |
338 | call = t_next(m, NULL, &l); | 295 | call = t_next(m, call, &l); |
339 | if (!call) | 296 | if (!call) |
340 | break; | 297 | break; |
341 | } | 298 | } |
@@ -345,37 +302,28 @@ static void *t_start(struct seq_file *m, loff_t *pos) | |||
345 | static void * | 302 | static void * |
346 | s_next(struct seq_file *m, void *v, loff_t *pos) | 303 | s_next(struct seq_file *m, void *v, loff_t *pos) |
347 | { | 304 | { |
348 | struct list_head *list = m->private; | 305 | struct ftrace_event_call *call = v; |
349 | struct ftrace_event_call *call; | ||
350 | 306 | ||
351 | (*pos)++; | 307 | (*pos)++; |
352 | 308 | ||
353 | retry: | 309 | list_for_each_entry_continue(call, &ftrace_events, list) { |
354 | if (list == &ftrace_events) | 310 | if (call->enabled) |
355 | return NULL; | 311 | return call; |
356 | |||
357 | call = list_entry(list, struct ftrace_event_call, list); | ||
358 | |||
359 | if (!call->enabled) { | ||
360 | list = list->next; | ||
361 | goto retry; | ||
362 | } | 312 | } |
363 | 313 | ||
364 | m->private = list->next; | 314 | return NULL; |
365 | |||
366 | return call; | ||
367 | } | 315 | } |
368 | 316 | ||
369 | static void *s_start(struct seq_file *m, loff_t *pos) | 317 | static void *s_start(struct seq_file *m, loff_t *pos) |
370 | { | 318 | { |
371 | struct ftrace_event_call *call = NULL; | 319 | struct ftrace_event_call *call; |
372 | loff_t l; | 320 | loff_t l; |
373 | 321 | ||
374 | mutex_lock(&event_mutex); | 322 | mutex_lock(&event_mutex); |
375 | 323 | ||
376 | m->private = ftrace_events.next; | 324 | call = list_entry(&ftrace_events, struct ftrace_event_call, list); |
377 | for (l = 0; l <= *pos; ) { | 325 | for (l = 0; l <= *pos; ) { |
378 | call = s_next(m, NULL, &l); | 326 | call = s_next(m, call, &l); |
379 | if (!call) | 327 | if (!call) |
380 | break; | 328 | break; |
381 | } | 329 | } |
@@ -574,7 +522,7 @@ static int trace_write_header(struct trace_seq *s) | |||
574 | FIELD(unsigned char, flags), | 522 | FIELD(unsigned char, flags), |
575 | FIELD(unsigned char, preempt_count), | 523 | FIELD(unsigned char, preempt_count), |
576 | FIELD(int, pid), | 524 | FIELD(int, pid), |
577 | FIELD(int, tgid)); | 525 | FIELD(int, lock_depth)); |
578 | } | 526 | } |
579 | 527 | ||
580 | static ssize_t | 528 | static ssize_t |
@@ -1242,7 +1190,7 @@ static int trace_module_notify(struct notifier_block *self, | |||
1242 | } | 1190 | } |
1243 | #endif /* CONFIG_MODULES */ | 1191 | #endif /* CONFIG_MODULES */ |
1244 | 1192 | ||
1245 | struct notifier_block trace_module_nb = { | 1193 | static struct notifier_block trace_module_nb = { |
1246 | .notifier_call = trace_module_notify, | 1194 | .notifier_call = trace_module_notify, |
1247 | .priority = 0, | 1195 | .priority = 0, |
1248 | }; | 1196 | }; |
@@ -1414,6 +1362,18 @@ static __init void event_trace_self_tests(void) | |||
1414 | if (!call->regfunc) | 1362 | if (!call->regfunc) |
1415 | continue; | 1363 | continue; |
1416 | 1364 | ||
1365 | /* | ||
1366 | * Testing syscall events here is pretty useless, but | ||
1367 | * we still do it if configured. But this is time consuming. | ||
1368 | * What we really need is a user thread to perform the | ||
1369 | * syscalls as we test. | ||
1370 | */ | ||
1371 | #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS | ||
1372 | if (call->system && | ||
1373 | strcmp(call->system, "syscalls") == 0) | ||
1374 | continue; | ||
1375 | #endif | ||
1376 | |||
1417 | pr_info("Testing event %s: ", call->name); | 1377 | pr_info("Testing event %s: ", call->name); |
1418 | 1378 | ||
1419 | /* | 1379 | /* |
@@ -1487,7 +1447,7 @@ static __init void event_trace_self_tests(void) | |||
1487 | 1447 | ||
1488 | #ifdef CONFIG_FUNCTION_TRACER | 1448 | #ifdef CONFIG_FUNCTION_TRACER |
1489 | 1449 | ||
1490 | static DEFINE_PER_CPU(atomic_t, test_event_disable); | 1450 | static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); |
1491 | 1451 | ||
1492 | static void | 1452 | static void |
1493 | function_test_events_call(unsigned long ip, unsigned long parent_ip) | 1453 | function_test_events_call(unsigned long ip, unsigned long parent_ip) |
@@ -1504,7 +1464,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1504 | pc = preempt_count(); | 1464 | pc = preempt_count(); |
1505 | resched = ftrace_preempt_disable(); | 1465 | resched = ftrace_preempt_disable(); |
1506 | cpu = raw_smp_processor_id(); | 1466 | cpu = raw_smp_processor_id(); |
1507 | disabled = atomic_inc_return(&per_cpu(test_event_disable, cpu)); | 1467 | disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu)); |
1508 | 1468 | ||
1509 | if (disabled != 1) | 1469 | if (disabled != 1) |
1510 | goto out; | 1470 | goto out; |
@@ -1523,7 +1483,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) | |||
1523 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); | 1483 | trace_nowake_buffer_unlock_commit(buffer, event, flags, pc); |
1524 | 1484 | ||
1525 | out: | 1485 | out: |
1526 | atomic_dec(&per_cpu(test_event_disable, cpu)); | 1486 | atomic_dec(&per_cpu(ftrace_test_event_disable, cpu)); |
1527 | ftrace_preempt_enable(resched); | 1487 | ftrace_preempt_enable(resched); |
1528 | } | 1488 | } |
1529 | 1489 | ||