diff options
author | Steven Rostedt <srostedt@redhat.com> | 2012-08-08 14:48:20 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2013-03-15 00:34:44 -0400 |
commit | 12ab74ee00d154bc05ea2fc659b7ce6519e5d5a6 (patch) | |
tree | d123285199790583ca792a1c80cb75ba440425b9 | |
parent | a7603ff4b5f7e26e67af82a4c3d05eeeb8d7b160 (diff) |
tracing: Make syscall events suitable for multiple buffers
Currently the syscall events record into the global buffer. But if
multiple buffers are in place, then we need to have syscall events
record in the proper buffers.
By adding descriptors to pass to the syscall event functions, the
syscall events can now record into the buffers that have been assigned
to them (one event may be applied to mulitple buffers).
This will allow tracing high volume syscalls along with seldom occurring
syscalls without losing the seldom syscall events.
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/trace.h | 11 | ||||
-rw-r--r-- | kernel/trace/trace_syscalls.c | 80 |
2 files changed, 57 insertions, 34 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 15ccd7cd1560..68cad7a9e089 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -13,6 +13,11 @@ | |||
13 | #include <linux/trace_seq.h> | 13 | #include <linux/trace_seq.h> |
14 | #include <linux/ftrace_event.h> | 14 | #include <linux/ftrace_event.h> |
15 | 15 | ||
16 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
17 | #include <asm/unistd.h> /* For NR_SYSCALLS */ | ||
18 | #include <asm/syscall.h> /* some archs define it here */ | ||
19 | #endif | ||
20 | |||
16 | enum trace_type { | 21 | enum trace_type { |
17 | __TRACE_FIRST_TYPE = 0, | 22 | __TRACE_FIRST_TYPE = 0, |
18 | 23 | ||
@@ -173,6 +178,12 @@ struct trace_array { | |||
173 | int cpu; | 178 | int cpu; |
174 | int buffer_disabled; | 179 | int buffer_disabled; |
175 | struct trace_cpu trace_cpu; /* place holder */ | 180 | struct trace_cpu trace_cpu; /* place holder */ |
181 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
182 | int sys_refcount_enter; | ||
183 | int sys_refcount_exit; | ||
184 | DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | ||
185 | DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | ||
186 | #endif | ||
176 | int stop_count; | 187 | int stop_count; |
177 | int clock_id; | 188 | int clock_id; |
178 | struct tracer *current_trace; | 189 | struct tracer *current_trace; |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 7a809e321058..a842783ad6be 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -12,10 +12,6 @@ | |||
12 | #include "trace.h" | 12 | #include "trace.h" |
13 | 13 | ||
14 | static DEFINE_MUTEX(syscall_trace_lock); | 14 | static DEFINE_MUTEX(syscall_trace_lock); |
15 | static int sys_refcount_enter; | ||
16 | static int sys_refcount_exit; | ||
17 | static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); | ||
18 | static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); | ||
19 | 15 | ||
20 | static int syscall_enter_register(struct ftrace_event_call *event, | 16 | static int syscall_enter_register(struct ftrace_event_call *event, |
21 | enum trace_reg type, void *data); | 17 | enum trace_reg type, void *data); |
@@ -303,8 +299,9 @@ static int syscall_exit_define_fields(struct ftrace_event_call *call) | |||
303 | return ret; | 299 | return ret; |
304 | } | 300 | } |
305 | 301 | ||
306 | static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | 302 | static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) |
307 | { | 303 | { |
304 | struct trace_array *tr = data; | ||
308 | struct syscall_trace_enter *entry; | 305 | struct syscall_trace_enter *entry; |
309 | struct syscall_metadata *sys_data; | 306 | struct syscall_metadata *sys_data; |
310 | struct ring_buffer_event *event; | 307 | struct ring_buffer_event *event; |
@@ -315,7 +312,7 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
315 | syscall_nr = trace_get_syscall_nr(current, regs); | 312 | syscall_nr = trace_get_syscall_nr(current, regs); |
316 | if (syscall_nr < 0) | 313 | if (syscall_nr < 0) |
317 | return; | 314 | return; |
318 | if (!test_bit(syscall_nr, enabled_enter_syscalls)) | 315 | if (!test_bit(syscall_nr, tr->enabled_enter_syscalls)) |
319 | return; | 316 | return; |
320 | 317 | ||
321 | sys_data = syscall_nr_to_meta(syscall_nr); | 318 | sys_data = syscall_nr_to_meta(syscall_nr); |
@@ -324,7 +321,8 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
324 | 321 | ||
325 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; | 322 | size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; |
326 | 323 | ||
327 | event = trace_current_buffer_lock_reserve(&buffer, | 324 | buffer = tr->buffer; |
325 | event = trace_buffer_lock_reserve(buffer, | ||
328 | sys_data->enter_event->event.type, size, 0, 0); | 326 | sys_data->enter_event->event.type, size, 0, 0); |
329 | if (!event) | 327 | if (!event) |
330 | return; | 328 | return; |
@@ -338,8 +336,9 @@ static void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) | |||
338 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 336 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
339 | } | 337 | } |
340 | 338 | ||
341 | static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | 339 | static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) |
342 | { | 340 | { |
341 | struct trace_array *tr = data; | ||
343 | struct syscall_trace_exit *entry; | 342 | struct syscall_trace_exit *entry; |
344 | struct syscall_metadata *sys_data; | 343 | struct syscall_metadata *sys_data; |
345 | struct ring_buffer_event *event; | 344 | struct ring_buffer_event *event; |
@@ -349,14 +348,15 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
349 | syscall_nr = trace_get_syscall_nr(current, regs); | 348 | syscall_nr = trace_get_syscall_nr(current, regs); |
350 | if (syscall_nr < 0) | 349 | if (syscall_nr < 0) |
351 | return; | 350 | return; |
352 | if (!test_bit(syscall_nr, enabled_exit_syscalls)) | 351 | if (!test_bit(syscall_nr, tr->enabled_exit_syscalls)) |
353 | return; | 352 | return; |
354 | 353 | ||
355 | sys_data = syscall_nr_to_meta(syscall_nr); | 354 | sys_data = syscall_nr_to_meta(syscall_nr); |
356 | if (!sys_data) | 355 | if (!sys_data) |
357 | return; | 356 | return; |
358 | 357 | ||
359 | event = trace_current_buffer_lock_reserve(&buffer, | 358 | buffer = tr->buffer; |
359 | event = trace_buffer_lock_reserve(buffer, | ||
360 | sys_data->exit_event->event.type, sizeof(*entry), 0, 0); | 360 | sys_data->exit_event->event.type, sizeof(*entry), 0, 0); |
361 | if (!event) | 361 | if (!event) |
362 | return; | 362 | return; |
@@ -370,8 +370,10 @@ static void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) | |||
370 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); | 370 | trace_current_buffer_unlock_commit(buffer, event, 0, 0); |
371 | } | 371 | } |
372 | 372 | ||
373 | static int reg_event_syscall_enter(struct ftrace_event_call *call) | 373 | static int reg_event_syscall_enter(struct ftrace_event_file *file, |
374 | struct ftrace_event_call *call) | ||
374 | { | 375 | { |
376 | struct trace_array *tr = file->tr; | ||
375 | int ret = 0; | 377 | int ret = 0; |
376 | int num; | 378 | int num; |
377 | 379 | ||
@@ -379,33 +381,37 @@ static int reg_event_syscall_enter(struct ftrace_event_call *call) | |||
379 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | 381 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
380 | return -ENOSYS; | 382 | return -ENOSYS; |
381 | mutex_lock(&syscall_trace_lock); | 383 | mutex_lock(&syscall_trace_lock); |
382 | if (!sys_refcount_enter) | 384 | if (!tr->sys_refcount_enter) |
383 | ret = register_trace_sys_enter(ftrace_syscall_enter, NULL); | 385 | ret = register_trace_sys_enter(ftrace_syscall_enter, tr); |
384 | if (!ret) { | 386 | if (!ret) { |
385 | set_bit(num, enabled_enter_syscalls); | 387 | set_bit(num, tr->enabled_enter_syscalls); |
386 | sys_refcount_enter++; | 388 | tr->sys_refcount_enter++; |
387 | } | 389 | } |
388 | mutex_unlock(&syscall_trace_lock); | 390 | mutex_unlock(&syscall_trace_lock); |
389 | return ret; | 391 | return ret; |
390 | } | 392 | } |
391 | 393 | ||
392 | static void unreg_event_syscall_enter(struct ftrace_event_call *call) | 394 | static void unreg_event_syscall_enter(struct ftrace_event_file *file, |
395 | struct ftrace_event_call *call) | ||
393 | { | 396 | { |
397 | struct trace_array *tr = file->tr; | ||
394 | int num; | 398 | int num; |
395 | 399 | ||
396 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 400 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
397 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | 401 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
398 | return; | 402 | return; |
399 | mutex_lock(&syscall_trace_lock); | 403 | mutex_lock(&syscall_trace_lock); |
400 | sys_refcount_enter--; | 404 | tr->sys_refcount_enter--; |
401 | clear_bit(num, enabled_enter_syscalls); | 405 | clear_bit(num, tr->enabled_enter_syscalls); |
402 | if (!sys_refcount_enter) | 406 | if (!tr->sys_refcount_enter) |
403 | unregister_trace_sys_enter(ftrace_syscall_enter, NULL); | 407 | unregister_trace_sys_enter(ftrace_syscall_enter, tr); |
404 | mutex_unlock(&syscall_trace_lock); | 408 | mutex_unlock(&syscall_trace_lock); |
405 | } | 409 | } |
406 | 410 | ||
407 | static int reg_event_syscall_exit(struct ftrace_event_call *call) | 411 | static int reg_event_syscall_exit(struct ftrace_event_file *file, |
412 | struct ftrace_event_call *call) | ||
408 | { | 413 | { |
414 | struct trace_array *tr = file->tr; | ||
409 | int ret = 0; | 415 | int ret = 0; |
410 | int num; | 416 | int num; |
411 | 417 | ||
@@ -413,28 +419,30 @@ static int reg_event_syscall_exit(struct ftrace_event_call *call) | |||
413 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | 419 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
414 | return -ENOSYS; | 420 | return -ENOSYS; |
415 | mutex_lock(&syscall_trace_lock); | 421 | mutex_lock(&syscall_trace_lock); |
416 | if (!sys_refcount_exit) | 422 | if (!tr->sys_refcount_exit) |
417 | ret = register_trace_sys_exit(ftrace_syscall_exit, NULL); | 423 | ret = register_trace_sys_exit(ftrace_syscall_exit, tr); |
418 | if (!ret) { | 424 | if (!ret) { |
419 | set_bit(num, enabled_exit_syscalls); | 425 | set_bit(num, tr->enabled_exit_syscalls); |
420 | sys_refcount_exit++; | 426 | tr->sys_refcount_exit++; |
421 | } | 427 | } |
422 | mutex_unlock(&syscall_trace_lock); | 428 | mutex_unlock(&syscall_trace_lock); |
423 | return ret; | 429 | return ret; |
424 | } | 430 | } |
425 | 431 | ||
426 | static void unreg_event_syscall_exit(struct ftrace_event_call *call) | 432 | static void unreg_event_syscall_exit(struct ftrace_event_file *file, |
433 | struct ftrace_event_call *call) | ||
427 | { | 434 | { |
435 | struct trace_array *tr = file->tr; | ||
428 | int num; | 436 | int num; |
429 | 437 | ||
430 | num = ((struct syscall_metadata *)call->data)->syscall_nr; | 438 | num = ((struct syscall_metadata *)call->data)->syscall_nr; |
431 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) | 439 | if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls)) |
432 | return; | 440 | return; |
433 | mutex_lock(&syscall_trace_lock); | 441 | mutex_lock(&syscall_trace_lock); |
434 | sys_refcount_exit--; | 442 | tr->sys_refcount_exit--; |
435 | clear_bit(num, enabled_exit_syscalls); | 443 | clear_bit(num, tr->enabled_exit_syscalls); |
436 | if (!sys_refcount_exit) | 444 | if (!tr->sys_refcount_exit) |
437 | unregister_trace_sys_exit(ftrace_syscall_exit, NULL); | 445 | unregister_trace_sys_exit(ftrace_syscall_exit, tr); |
438 | mutex_unlock(&syscall_trace_lock); | 446 | mutex_unlock(&syscall_trace_lock); |
439 | } | 447 | } |
440 | 448 | ||
@@ -685,11 +693,13 @@ static void perf_sysexit_disable(struct ftrace_event_call *call) | |||
685 | static int syscall_enter_register(struct ftrace_event_call *event, | 693 | static int syscall_enter_register(struct ftrace_event_call *event, |
686 | enum trace_reg type, void *data) | 694 | enum trace_reg type, void *data) |
687 | { | 695 | { |
696 | struct ftrace_event_file *file = data; | ||
697 | |||
688 | switch (type) { | 698 | switch (type) { |
689 | case TRACE_REG_REGISTER: | 699 | case TRACE_REG_REGISTER: |
690 | return reg_event_syscall_enter(event); | 700 | return reg_event_syscall_enter(file, event); |
691 | case TRACE_REG_UNREGISTER: | 701 | case TRACE_REG_UNREGISTER: |
692 | unreg_event_syscall_enter(event); | 702 | unreg_event_syscall_enter(file, event); |
693 | return 0; | 703 | return 0; |
694 | 704 | ||
695 | #ifdef CONFIG_PERF_EVENTS | 705 | #ifdef CONFIG_PERF_EVENTS |
@@ -711,11 +721,13 @@ static int syscall_enter_register(struct ftrace_event_call *event, | |||
711 | static int syscall_exit_register(struct ftrace_event_call *event, | 721 | static int syscall_exit_register(struct ftrace_event_call *event, |
712 | enum trace_reg type, void *data) | 722 | enum trace_reg type, void *data) |
713 | { | 723 | { |
724 | struct ftrace_event_file *file = data; | ||
725 | |||
714 | switch (type) { | 726 | switch (type) { |
715 | case TRACE_REG_REGISTER: | 727 | case TRACE_REG_REGISTER: |
716 | return reg_event_syscall_exit(event); | 728 | return reg_event_syscall_exit(file, event); |
717 | case TRACE_REG_UNREGISTER: | 729 | case TRACE_REG_UNREGISTER: |
718 | unreg_event_syscall_exit(event); | 730 | unreg_event_syscall_exit(file, event); |
719 | return 0; | 731 | return 0; |
720 | 732 | ||
721 | #ifdef CONFIG_PERF_EVENTS | 733 | #ifdef CONFIG_PERF_EVENTS |