diff options
author | Tom Zanussi <tom.zanussi@linux.intel.com> | 2013-10-24 09:34:19 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-11-05 17:48:49 -0500 |
commit | d562aff93bfb530b0992141500a402d17081189d (patch) | |
tree | af541e2539c575932a6b0c13d69792472c3d26fa /kernel | |
parent | 38de93abec8d8acd8d6dbbe9b0d92d6d5cdb3090 (diff) |
tracing: Add support for SOFT_DISABLE to syscall events
The original SOFT_DISABLE patches didn't add support for soft disable
of syscall events; this adds it.
Add an array of ftrace_event_file pointers indexed by syscall number
to the trace array and remove the existing enabled bitmaps, which as a
result are now redundant. The ftrace_event_file structs in turn
contain the soft disable flags we need for per-syscall soft disable
accounting.
Adding ftrace_event_files also means we can remove the USE_CALL_FILTER
bit, thus enabling multibuffer filter support for syscall events.
Link: http://lkml.kernel.org/r/6e72b566e85d8df8042f133efbc6c30e21fb017e.1382620672.git.tom.zanussi@linux.intel.com
Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/trace/trace.h | 4 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 42 |
2 files changed, 34 insertions, 12 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 12d1a612a73e..9c27cdadd71f 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -192,8 +192,8 @@ struct trace_array { | |||
192 | #ifdef CONFIG_FTRACE_SYSCALLS | 192 | #ifdef CONFIG_FTRACE_SYSCALLS |
193 | int sys_refcount_enter; | 193 | int sys_refcount_enter; |
194 | int sys_refcount_exit; | 194 | int sys_refcount_exit; |
195 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | 195 | struct ftrace_event_file *enter_syscall_files[NR_syscalls]; |
196 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | 196 | struct ftrace_event_file *exit_syscall_files[NR_syscalls]; |
197 | #endif | 197 | #endif |
198 | int stop_count; | 198 | int stop_count; |
199 | int clock_id; | 199 | int clock_id; |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 32644eece429..e4b6d11bdf78 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -302,6 +302,7 @@ static int __init syscall_exit_define_fields(struct ftrace_event_call *call) | |||
302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | 302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
303 | { | 303 | { |
304 | struct trace_array *tr = data; | 304 | struct trace_array *tr = data; |
305 | struct ftrace_event_file *ftrace_file; | ||
305 | struct syscall_trace_enter *entry; | 306 | struct syscall_trace_enter *entry; |
306 | struct syscall_metadata *sys_data; | 307 | struct syscall_metadata *sys_data; |
307 | struct ring_buffer_event *event; | 308 | struct ring_buffer_event *event; |
@@ -314,7 +315,13 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
314 | syscall_nr = trace_get_syscall_nr(current, regs); | 315 | syscall_nr = trace_get_syscall_nr(current, regs); |
315 | if (syscall_nr < 0) | 316 | if (syscall_nr < 0) |
316 | return; | 317 | return; |
317 | if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) | 318 | |
319 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ | ||
320 | ftrace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]); | ||
321 | if (!ftrace_file) | ||
322 | return; | ||
323 | |||
324 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
318 | return; | 325 | return; |
319 | 326 | ||
320 | sys_data = syscall_nr_to_meta(syscall_nr); | 327 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -336,8 +343,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
336 | entry->nr = syscall_nr; | 343 | entry->nr = syscall_nr; |
337 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); | 344 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args); |
338 | 345 | ||
339 | if (!call_filter_check_discard(sys_data->enter_event, entry, | 346 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
340 | buffer, event)) | ||
341 | trace_current_buffer_unlock_commit(buffer, event, | 347 | trace_current_buffer_unlock_commit(buffer, event, |
342 | irq_flags, pc); | 348 | irq_flags, pc); |
343 | } | 349 | } |
@@ -345,6 +351,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) | |||
345 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | 351 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
346 | { | 352 | { |
347 | struct trace_array *tr = data; | 353 | struct trace_array *tr = data; |
354 | struct ftrace_event_file *ftrace_file; | ||
348 | struct syscall_trace_exit *entry; | 355 | struct syscall_trace_exit *entry; |
349 | struct syscall_metadata *sys_data; | 356 | struct syscall_metadata *sys_data; |
350 | struct ring_buffer_event *event; | 357 | struct ring_buffer_event *event; |
@@ -356,7 +363,13 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
356 | syscall_nr = trace_get_syscall_nr(current, regs); | 363 | syscall_nr = trace_get_syscall_nr(current, regs); |
357 | if (syscall_nr < 0) | 364 | if (syscall_nr < 0) |
358 | return; | 365 | return; |
359 | if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) | 366 | |
367 | /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ | ||
368 | ftrace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]); | ||
369 | if (!ftrace_file) | ||
370 | return; | ||
371 | |||
372 | if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, &ftrace_file->flags)) | ||
360 | return; | 373 | return; |
361 | 374 | ||
362 | sys_data = syscall_nr_to_meta(syscall_nr); | 375 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -377,8 +390,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) | |||
377 | entry->nr = syscall_nr; | 390 | entry->nr = syscall_nr; |
378 | entry->ret = syscall_get_return_value(current, regs); | 391 | entry->ret = syscall_get_return_value(current, regs); |
379 | 392 | ||
380 | if (!call_filter_check_discard(sys_data->exit_event, entry, | 393 | if (!filter_check_discard(ftrace_file, entry, buffer, event)) |
381 | buffer, event)) | ||
382 | trace_current_buffer_unlock_commit(buffer, event, | 394 | trace_current_buffer_unlock_commit(buffer, event, |
383 | irq_flags, pc); | 395 | irq_flags, pc); |
384 | } | 396 | } |
@@ -397,7 +409,7 @@ static int reg_event_syscall_enter(struct ftrace_event_file *file, | |||
397 | if (!tr->sys_refcount_enter) | 409 | if (!tr->sys_refcount_enter) |
398 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); | 410 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
399 | if (!ret) { | 411 | if (!ret) { |
400 | set_bit(num, tr->enabled_enter_syscalls); | 412 | rcu_assign_pointer(tr->enter_syscall_files[num], file); |
401 | tr->sys_refcount_enter++; | 413 | tr->sys_refcount_enter++; |
402 | } | 414 | } |
403 | mutex_unlock(&syscall_trace_lock); | 415 | mutex_unlock(&syscall_trace_lock); |
@@ -415,10 +427,15 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, | |||
415 | return; | 427 | return; |
416 | mutex_lock(&syscall_trace_lock); | 428 | mutex_lock(&syscall_trace_lock); |
417 | tr->sys_refcount_enter--; | 429 | tr->sys_refcount_enter--; |
418 | clear_bit(num, tr->enabled_enter_syscalls); | 430 | rcu_assign_pointer(tr->enter_syscall_files[num], NULL); |
419 | if (!tr->sys_refcount_enter) | 431 | if (!tr->sys_refcount_enter) |
420 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); | 432 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
421 | mutex_unlock(&syscall_trace_lock); | 433 | mutex_unlock(&syscall_trace_lock); |
434 | /* | ||
435 | * Callers expect the event to be completely disabled on | ||
436 | * return, so wait for current handlers to finish. | ||
437 | */ | ||
438 | synchronize_sched(); | ||
422 | } | 439 | } |
423 | 440 | ||
424 | static int reg_event_syscall_exit(struct ftrace_event_file *file, | 441 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
@@ -435,7 +452,7 @@ static int reg_event_syscall_exit(struct ftrace_event_file *file, | |||
435 | if (!tr->sys_refcount_exit) | 452 | if (!tr->sys_refcount_exit) |
436 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); | 453 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
437 | if (!ret) { | 454 | if (!ret) { |
438 | set_bit(num, tr->enabled_exit_syscalls); | 455 | rcu_assign_pointer(tr->exit_syscall_files[num], file); |
439 | tr->sys_refcount_exit++; | 456 | tr->sys_refcount_exit++; |
440 | } | 457 | } |
441 | mutex_unlock(&syscall_trace_lock); | 458 | mutex_unlock(&syscall_trace_lock); |
@@ -453,10 +470,15 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
453 | return; | 470 | return; |
454 | mutex_lock(&syscall_trace_lock); | 471 | mutex_lock(&syscall_trace_lock); |
455 | tr->sys_refcount_exit--; | 472 | tr->sys_refcount_exit--; |
456 | clear_bit(num, tr->enabled_exit_syscalls); | 473 | rcu_assign_pointer(tr->exit_syscall_files[num], NULL); |
457 | if (!tr->sys_refcount_exit) | 474 | if (!tr->sys_refcount_exit) |
458 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); | 475 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
459 | mutex_unlock(&syscall_trace_lock); | 476 | mutex_unlock(&syscall_trace_lock); |
477 | /* | ||
478 | * Callers expect the event to be completely disabled on | ||
479 | * return, so wait for current handlers to finish. | ||
480 | */ | ||
481 | synchronize_sched(); | ||
460 | } | 482 | } |
461 | 483 | ||
462 | static int __init init_syscall_trace(struct ftrace_event_call *call) | 484 | static int __init init_syscall_trace(struct ftrace_event_call *call) |