aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_syscalls.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-21 23:26:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-22 03:03:42 -0500
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250 /kernel/trace/trace_syscalls.c
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_syscalls.c')
-rw-r--r--kernel/trace/trace_syscalls.c47
1 files changed, 17 insertions, 30 deletions
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 51213b0aa81b..0bb934875263 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -477,10 +477,11 @@ static int sys_prof_refcount_exit;
477static void prof_syscall_enter(struct pt_regs *regs, long id) 477static void prof_syscall_enter(struct pt_regs *regs, long id)
478{ 478{
479 struct syscall_metadata *sys_data; 479 struct syscall_metadata *sys_data;
480 struct perf_trace_buf *trace_buf;
481 struct syscall_trace_enter *rec; 480 struct syscall_trace_enter *rec;
482 unsigned long flags; 481 unsigned long flags;
482 char *trace_buf;
483 char *raw_data; 483 char *raw_data;
484 int *recursion;
484 int syscall_nr; 485 int syscall_nr;
485 int size; 486 int size;
486 int cpu; 487 int cpu;
@@ -505,6 +506,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
505 /* Protect the per cpu buffer, begin the rcu read side */ 506 /* Protect the per cpu buffer, begin the rcu read side */
506 local_irq_save(flags); 507 local_irq_save(flags);
507 508
509 if (perf_swevent_get_recursion_context(&recursion))
510 goto end_recursion;
511
508 cpu = smp_processor_id(); 512 cpu = smp_processor_id();
509 513
510 if (in_nmi()) 514 if (in_nmi())
@@ -515,18 +519,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
515 if (!trace_buf) 519 if (!trace_buf)
516 goto end; 520 goto end;
517 521
518 trace_buf = per_cpu_ptr(trace_buf, cpu); 522 raw_data = per_cpu_ptr(trace_buf, cpu);
519
520 if (trace_buf->recursion++)
521 goto end_recursion;
522
523 /*
524 * Make recursion update visible before entering perf_tp_event
525 * so that we protect from perf recursions.
526 */
527 barrier();
528
529 raw_data = trace_buf->buf;
530 523
531 /* zero the dead bytes from align to not leak stack to user */ 524 /* zero the dead bytes from align to not leak stack to user */
532 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 525 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -539,9 +532,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
539 (unsigned long *)&rec->args); 532 (unsigned long *)&rec->args);
540 perf_tp_event(sys_data->enter_id, 0, 1, rec, size); 533 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
541 534
542end_recursion:
543 trace_buf->recursion--;
544end: 535end:
536 perf_swevent_put_recursion_context(recursion);
537end_recursion:
545 local_irq_restore(flags); 538 local_irq_restore(flags);
546} 539}
547 540
@@ -588,10 +581,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
588{ 581{
589 struct syscall_metadata *sys_data; 582 struct syscall_metadata *sys_data;
590 struct syscall_trace_exit *rec; 583 struct syscall_trace_exit *rec;
591 struct perf_trace_buf *trace_buf;
592 unsigned long flags; 584 unsigned long flags;
593 int syscall_nr; 585 int syscall_nr;
586 char *trace_buf;
594 char *raw_data; 587 char *raw_data;
588 int *recursion;
595 int size; 589 int size;
596 int cpu; 590 int cpu;
597 591
@@ -617,6 +611,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
617 611
618 /* Protect the per cpu buffer, begin the rcu read side */ 612 /* Protect the per cpu buffer, begin the rcu read side */
619 local_irq_save(flags); 613 local_irq_save(flags);
614
615 if (perf_swevent_get_recursion_context(&recursion))
616 goto end_recursion;
617
620 cpu = smp_processor_id(); 618 cpu = smp_processor_id();
621 619
622 if (in_nmi()) 620 if (in_nmi())
@@ -627,18 +625,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
627 if (!trace_buf) 625 if (!trace_buf)
628 goto end; 626 goto end;
629 627
630 trace_buf = per_cpu_ptr(trace_buf, cpu); 628 raw_data = per_cpu_ptr(trace_buf, cpu);
631
632 if (trace_buf->recursion++)
633 goto end_recursion;
634
635 /*
636 * Make recursion update visible before entering perf_tp_event
637 * so that we protect from perf recursions.
638 */
639 barrier();
640
641 raw_data = trace_buf->buf;
642 629
643 /* zero the dead bytes from align to not leak stack to user */ 630 /* zero the dead bytes from align to not leak stack to user */
644 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 631 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -652,9 +639,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
652 639
653 perf_tp_event(sys_data->exit_id, 0, 1, rec, size); 640 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
654 641
655end_recursion:
656 trace_buf->recursion--;
657end: 642end:
643 perf_swevent_put_recursion_context(recursion);
644end_recursion:
658 local_irq_restore(flags); 645 local_irq_restore(flags);
659} 646}
660 647