diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-11-23 05:37:29 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-23 05:49:57 -0500 |
commit | 4ed7c92d68a5387ba5f7030dc76eab03558e27f5 (patch) | |
tree | 38d758819c41261275c2dbd6f64976f824c5fd27 /kernel/trace/trace_syscalls.c | |
parent | f67218c3e93abaf0f480bb94b53d234853ffe4de (diff) |
perf_events: Undo some recursion damage
Make perf_swevent_get_recursion_context return a context number
and disable preemption.
This could be used to remove the IRQ disable from the trace bit
and index the per-cpu buffer with.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Paul Mackerras <paulus@samba.org>
LKML-Reference: <20091123103819.993226816@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r-- | kernel/trace/trace_syscalls.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 41b6dd963daa..9189cbe86079 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -481,8 +481,8 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
481 | unsigned long flags; | 481 | unsigned long flags; |
482 | char *trace_buf; | 482 | char *trace_buf; |
483 | char *raw_data; | 483 | char *raw_data; |
484 | int *recursion; | ||
485 | int syscall_nr; | 484 | int syscall_nr; |
485 | int rctx; | ||
486 | int size; | 486 | int size; |
487 | int cpu; | 487 | int cpu; |
488 | 488 | ||
@@ -506,7 +506,8 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
506 | /* Protect the per cpu buffer, begin the rcu read side */ | 506 | /* Protect the per cpu buffer, begin the rcu read side */ |
507 | local_irq_save(flags); | 507 | local_irq_save(flags); |
508 | 508 | ||
509 | if (perf_swevent_get_recursion_context(&recursion)) | 509 | rctx = perf_swevent_get_recursion_context(); |
510 | if (rctx < 0) | ||
510 | goto end_recursion; | 511 | goto end_recursion; |
511 | 512 | ||
512 | cpu = smp_processor_id(); | 513 | cpu = smp_processor_id(); |
@@ -530,7 +531,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id) | |||
530 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); | 531 | perf_tp_event(sys_data->enter_id, 0, 1, rec, size); |
531 | 532 | ||
532 | end: | 533 | end: |
533 | perf_swevent_put_recursion_context(recursion); | 534 | perf_swevent_put_recursion_context(rctx); |
534 | end_recursion: | 535 | end_recursion: |
535 | local_irq_restore(flags); | 536 | local_irq_restore(flags); |
536 | } | 537 | } |
@@ -582,7 +583,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
582 | int syscall_nr; | 583 | int syscall_nr; |
583 | char *trace_buf; | 584 | char *trace_buf; |
584 | char *raw_data; | 585 | char *raw_data; |
585 | int *recursion; | 586 | int rctx; |
586 | int size; | 587 | int size; |
587 | int cpu; | 588 | int cpu; |
588 | 589 | ||
@@ -609,7 +610,8 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
609 | /* Protect the per cpu buffer, begin the rcu read side */ | 610 | /* Protect the per cpu buffer, begin the rcu read side */ |
610 | local_irq_save(flags); | 611 | local_irq_save(flags); |
611 | 612 | ||
612 | if (perf_swevent_get_recursion_context(&recursion)) | 613 | rctx = perf_swevent_get_recursion_context(); |
614 | if (rctx < 0) | ||
613 | goto end_recursion; | 615 | goto end_recursion; |
614 | 616 | ||
615 | cpu = smp_processor_id(); | 617 | cpu = smp_processor_id(); |
@@ -634,7 +636,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret) | |||
634 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); | 636 | perf_tp_event(sys_data->exit_id, 0, 1, rec, size); |
635 | 637 | ||
636 | end: | 638 | end: |
637 | perf_swevent_put_recursion_context(recursion); | 639 | perf_swevent_put_recursion_context(rctx); |
638 | end_recursion: | 640 | end_recursion: |
639 | local_irq_restore(flags); | 641 | local_irq_restore(flags); |
640 | } | 642 | } |