diff options
Diffstat (limited to 'include/trace/ftrace.h')
-rw-r--r-- | include/trace/ftrace.h | 39 |
1 files changed, 30 insertions, 9 deletions
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h index a7f946094128..4945d1c99864 100644 --- a/include/trace/ftrace.h +++ b/include/trace/ftrace.h | |||
@@ -649,6 +649,7 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
649 | * struct ftrace_event_call *event_call = &event_<call>; | 649 | * struct ftrace_event_call *event_call = &event_<call>; |
650 | * extern void perf_tp_event(int, u64, u64, void *, int); | 650 | * extern void perf_tp_event(int, u64, u64, void *, int); |
651 | * struct ftrace_raw_##call *entry; | 651 | * struct ftrace_raw_##call *entry; |
652 | * struct perf_trace_buf *trace_buf; | ||
652 | * u64 __addr = 0, __count = 1; | 653 | * u64 __addr = 0, __count = 1; |
653 | * unsigned long irq_flags; | 654 | * unsigned long irq_flags; |
654 | * struct trace_entry *ent; | 655 | * struct trace_entry *ent; |
@@ -673,14 +674,25 @@ __attribute__((section("_ftrace_events"))) event_##call = { \ | |||
673 | * __cpu = smp_processor_id(); | 674 | * __cpu = smp_processor_id(); |
674 | * | 675 | * |
675 | * if (in_nmi()) | 676 | * if (in_nmi()) |
676 | * raw_data = rcu_dereference(trace_profile_buf_nmi); | 677 | * trace_buf = rcu_dereference(perf_trace_buf_nmi); |
677 | * else | 678 | * else |
678 | * raw_data = rcu_dereference(trace_profile_buf); | 679 | * trace_buf = rcu_dereference(perf_trace_buf); |
679 | * | 680 | * |
680 | * if (!raw_data) | 681 | * if (!trace_buf) |
681 | * goto end; | 682 | * goto end; |
682 | * | 683 | * |
683 | * raw_data = per_cpu_ptr(raw_data, __cpu); | 684 | * trace_buf = per_cpu_ptr(trace_buf, __cpu); |
685 | * | ||
686 | * // Avoid recursion from perf that could mess up the buffer | ||
687 | * if (trace_buf->recursion++) | ||
688 | * goto end_recursion; | ||
689 | * | ||
690 | * raw_data = trace_buf->buf; | ||
691 | * | ||
692 | * // Make recursion update visible before entering perf_tp_event | ||
693 | * // so that we protect from perf recursions. | ||
694 | * | ||
695 | * barrier(); | ||
684 | * | 696 | * |
685 | * //zero dead bytes from alignment to avoid stack leak to userspace: | 697 | * //zero dead bytes from alignment to avoid stack leak to userspace: |
686 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; | 698 | * *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; |
@@ -713,8 +725,9 @@ static void ftrace_profile_##call(proto) \ | |||
713 | { \ | 725 | { \ |
714 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ | 726 | struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ |
715 | struct ftrace_event_call *event_call = &event_##call; \ | 727 | struct ftrace_event_call *event_call = &event_##call; \ |
716 | extern void perf_tp_event(int, u64, u64, void *, int); \ | 728 | extern void perf_tp_event(int, u64, u64, void *, int); \ |
717 | struct ftrace_raw_##call *entry; \ | 729 | struct ftrace_raw_##call *entry; \ |
730 | struct perf_trace_buf *trace_buf; \ | ||
718 | u64 __addr = 0, __count = 1; \ | 731 | u64 __addr = 0, __count = 1; \ |
719 | unsigned long irq_flags; \ | 732 | unsigned long irq_flags; \ |
720 | struct trace_entry *ent; \ | 733 | struct trace_entry *ent; \ |
@@ -739,14 +752,20 @@ static void ftrace_profile_##call(proto) \ | |||
739 | __cpu = smp_processor_id(); \ | 752 | __cpu = smp_processor_id(); \ |
740 | \ | 753 | \ |
741 | if (in_nmi()) \ | 754 | if (in_nmi()) \ |
742 | raw_data = rcu_dereference(trace_profile_buf_nmi); \ | 755 | trace_buf = rcu_dereference(perf_trace_buf_nmi); \ |
743 | else \ | 756 | else \ |
744 | raw_data = rcu_dereference(trace_profile_buf); \ | 757 | trace_buf = rcu_dereference(perf_trace_buf); \ |
745 | \ | 758 | \ |
746 | if (!raw_data) \ | 759 | if (!trace_buf) \ |
747 | goto end; \ | 760 | goto end; \ |
748 | \ | 761 | \ |
749 | raw_data = per_cpu_ptr(raw_data, __cpu); \ | 762 | trace_buf = per_cpu_ptr(trace_buf, __cpu); \ |
763 | if (trace_buf->recursion++) \ | ||
764 | goto end_recursion; \ | ||
765 | \ | ||
766 | barrier(); \ | ||
767 | \ | ||
768 | raw_data = trace_buf->buf; \ | ||
750 | \ | 769 | \ |
751 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ | 770 | *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ |
752 | entry = (struct ftrace_raw_##call *)raw_data; \ | 771 | entry = (struct ftrace_raw_##call *)raw_data; \ |
@@ -761,6 +780,8 @@ static void ftrace_profile_##call(proto) \ | |||
761 | perf_tp_event(event_call->id, __addr, __count, entry, \ | 780 | perf_tp_event(event_call->id, __addr, __count, entry, \ |
762 | __entry_size); \ | 781 | __entry_size); \ |
763 | \ | 782 | \ |
783 | end_recursion: \ | ||
784 | trace_buf->recursion--; \ | ||
764 | end: \ | 785 | end: \ |
765 | local_irq_restore(irq_flags); \ | 786 | local_irq_restore(irq_flags); \ |
766 | \ | 787 | \ |