diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:20:25 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-28 13:20:25 -0500 |
commit | 6556a6743549defc32e5f90ee2cb1ecd833a44c3 (patch) | |
tree | 622306583d4a3c13235a8bfc012854c125c597f1 /kernel/trace/trace_syscalls.c | |
parent | e0d272429a34ff143bfa04ee8e29dd4eed2964c7 (diff) | |
parent | 1dd2980d990068e20045b90c424518cc7f3657ff (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (172 commits)
perf_event, amd: Fix spinlock initialization
perf_event: Fix preempt warning in perf_clock()
perf tools: Flush maps on COMM events
perf_events, x86: Split PMU definitions into separate files
perf annotate: Handle samples not at objdump output addr boundaries
perf_events, x86: Remove superflous MSR writes
perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in()
perf_events, x86: AMD event scheduling
perf_events: Add new start/stop PMU callbacks
perf_events: Report the MMAP pgoff value in bytes
perf annotate: Defer allocating sym_priv->hist array
perf symbols: Improve debugging information about symtab origins
perf top: Use a macro instead of a constant variable
perf symbols: Check the right return variable
perf/scripts: Tag syscall_name helper as not yet available
perf/scripts: Add perf-trace-python Documentation
perf/scripts: Remove unnecessary PyTuple resizes
perf/scripts: Add syscall tracing scripts
perf/scripts: Add Python scripting engine
perf/scripts: Remove check-perf-trace from listed scripts
...
Fix trivial conflict in tools/perf/util/probe-event.c
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r-- | kernel/trace/trace_syscalls.c | 76 |
1 files changed, 12 insertions, 64 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index a1834dda85f4..cba47d7935cc 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -426,7 +426,7 @@ int __init init_ftrace_syscalls(void) | |||
426 | } | 426 | } |
427 | core_initcall(init_ftrace_syscalls); | 427 | core_initcall(init_ftrace_syscalls); |
428 | 428 | ||
429 | #ifdef CONFIG_EVENT_PROFILE | 429 | #ifdef CONFIG_PERF_EVENTS |
430 | 430 | ||
431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); | 431 | static DECLARE_BITMAP(enabled_prof_enter_syscalls, NR_syscalls); |
432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); | 432 | static DECLARE_BITMAP(enabled_prof_exit_syscalls, NR_syscalls); |
@@ -438,12 +438,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
438 | struct syscall_metadata *sys_data; | 438 | struct syscall_metadata *sys_data; |
439 | struct syscall_trace_enter *rec; | 439 | struct syscall_trace_enter *rec; |
440 | unsigned long flags; | 440 | unsigned long flags; |
441 | char *trace_buf; | ||
442 | char *raw_data; | ||
443 | int syscall_nr; | 441 | int syscall_nr; |
444 | int rctx; | 442 | int rctx; |
445 | int size; | 443 | int size; |
446 | int cpu; | ||
447 | 444 | ||
448 | syscall_nr = syscall_get_nr(current, regs); | 445 | syscall_nr = syscall_get_nr(current, regs); |
449 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) | 446 | if (!test_bit(syscall_nr, enabled_prof_enter_syscalls)) |
@@ -462,37 +459,15 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
462 | "profile buffer not large enough")) | 459 | "profile buffer not large enough")) |
463 | return; | 460 | return; |
464 | 461 | ||
465 | /* Protect the per cpu buffer, begin the rcu read side */ | 462 | rec = (struct syscall_trace_enter *)ftrace_perf_buf_prepare(size, |
466 | local_irq_save(flags); | 463 | sys_data->enter_event->id, &rctx, &flags); |
467 | 464 | if (!rec) | |
468 | rctx = perf_swevent_get_recursion_context(); | 465 | return; |
469 | if (rctx < 0) | ||
470 | goto end_recursion; | ||
471 | |||
472 | cpu = smp_processor_id(); | ||
473 | |||
474 | trace_buf = rcu_dereference(perf_trace_buf); | ||
475 | |||
476 | if (!trace_buf) | ||
477 | goto end; | ||
478 | |||
479 | raw_data = per_cpu_ptr(trace_buf, cpu); | ||
480 | |||
481 | /* zero the dead bytes from align to not leak stack to user */ | ||
482 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
483 | 466 | ||
484 | rec = (struct syscall_trace_enter *) raw_data; | ||
485 | tracing_generic_entry_update(&rec->ent, 0, 0); | ||
486 | rec->ent.type = sys_data->enter_event->id; | ||
487 | rec->nr = syscall_nr; | 467 | rec->nr = syscall_nr; |
488 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, | 468 | syscall_get_arguments(current, regs, 0, sys_data->nb_args, |
489 | (unsigned long *)&rec->args); | 469 | (unsigned long *)&rec->args); |
490 | perf_tp_event(sys_data->enter_event->id, 0, 1, rec, size); | 470 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); |
491 | |||
492 | end: | ||
493 | perf_swevent_put_recursion_context(rctx); | ||
494 | end_recursion: | ||
495 | local_irq_restore(flags); | ||
496 | } | 471 | } |
497 | 472 | ||
498 | int prof_sysenter_enable(struct ftrace_event_call *call) | 473 | int prof_sysenter_enable(struct ftrace_event_call *call) |
@@ -536,11 +511,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
536 | struct syscall_trace_exit *rec; | 511 | struct syscall_trace_exit *rec; |
537 | unsigned long flags; | 512 | unsigned long flags; |
538 | int syscall_nr; | 513 | int syscall_nr; |
539 | char *trace_buf; | ||
540 | char *raw_data; | ||
541 | int rctx; | 514 | int rctx; |
542 | int size; | 515 | int size; |
543 | int cpu; | ||
544 | 516 | ||
545 | syscall_nr = syscall_get_nr(current, regs); | 517 | syscall_nr = syscall_get_nr(current, regs); |
546 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) | 518 | if (!test_bit(syscall_nr, enabled_prof_exit_syscalls)) |
@@ -562,38 +534,15 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
562 | "exit event has grown above profile buffer size")) | 534 | "exit event has grown above profile buffer size")) |
563 | return; | 535 | return; |
564 | 536 | ||
565 | /* Protect the per cpu buffer, begin the rcu read side */ | 537 | rec = (struct syscall_trace_exit *)ftrace_perf_buf_prepare(size, |
566 | local_irq_save(flags); | 538 | sys_data->exit_event->id, &rctx, &flags); |
567 | 539 | if (!rec) | |
568 | rctx = perf_swevent_get_recursion_context(); | 540 | return; |
569 | if (rctx < 0) | ||
570 | goto end_recursion; | ||
571 | |||
572 | cpu = smp_processor_id(); | ||
573 | |||
574 | trace_buf = rcu_dereference(perf_trace_buf); | ||
575 | |||
576 | if (!trace_buf) | ||
577 | goto end; | ||
578 | |||
579 | raw_data = per_cpu_ptr(trace_buf, cpu); | ||
580 | |||
581 | /* zero the dead bytes from align to not leak stack to user */ | ||
582 | *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; | ||
583 | |||
584 | rec = (struct syscall_trace_exit *)raw_data; | ||
585 | 541 | ||
586 | tracing_generic_entry_update(&rec->ent, 0, 0); | ||
587 | rec->ent.type = sys_data->exit_event->id; | ||
588 | rec->nr = syscall_nr; | 542 | rec->nr = syscall_nr; |
589 | rec->ret = syscall_get_return_value(current, regs); | 543 | rec->ret = syscall_get_return_value(current, regs); |
590 | 544 | ||
591 | perf_tp_event(sys_data->exit_event->id, 0, 1, rec, size); | 545 | ftrace_perf_buf_submit(rec, size, rctx, 0, 1, flags); |
592 | |||
593 | end: | ||
594 | perf_swevent_put_recursion_context(rctx); | ||
595 | end_recursion: | ||
596 | local_irq_restore(flags); | ||
597 | } | 546 | } |
598 | 547 | ||
599 | int prof_sysexit_enable(struct ftrace_event_call *call) | 548 | int prof_sysexit_enable(struct ftrace_event_call *call) |
@@ -631,6 +580,5 @@ void prof_sysexit_disable(struct ftrace_event_call *call) | |||
631 | mutex_unlock(&syscall_trace_lock); | 580 | mutex_unlock(&syscall_trace_lock); |
632 | } | 581 | } |
633 | 582 | ||
634 | #endif | 583 | #endif /* CONFIG_PERF_EVENTS */ |
635 | |||
636 | 584 | ||