aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-21 23:26:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-22 03:03:42 -0500
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250 /include
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/trace/ftrace.h23
3 files changed, 18 insertions, 18 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 43360c1d8f70..47bbdf9c38d0 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -137,13 +137,8 @@ struct ftrace_event_call {
137 137
138#define FTRACE_MAX_PROFILE_SIZE 2048 138#define FTRACE_MAX_PROFILE_SIZE 2048
139 139
140struct perf_trace_buf { 140extern char *perf_trace_buf;
141 char buf[FTRACE_MAX_PROFILE_SIZE]; 141extern char *perf_trace_buf_nmi;
142 int recursion;
143};
144
145extern struct perf_trace_buf *perf_trace_buf;
146extern struct perf_trace_buf *perf_trace_buf_nmi;
147 142
148#define MAX_FILTER_PRED 32 143#define MAX_FILTER_PRED 32
149#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 144#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 36fe89f72641..74e98b1d3391 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -874,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle,
874extern void perf_output_end(struct perf_output_handle *handle); 874extern void perf_output_end(struct perf_output_handle *handle);
875extern void perf_output_copy(struct perf_output_handle *handle, 875extern void perf_output_copy(struct perf_output_handle *handle,
876 const void *buf, unsigned int len); 876 const void *buf, unsigned int len);
877extern int perf_swevent_get_recursion_context(int **recursion);
878extern void perf_swevent_put_recursion_context(int *recursion);
877#else 879#else
878static inline void 880static inline void
879perf_event_task_sched_in(struct task_struct *task, int cpu) { } 881perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -902,6 +904,8 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) { }
902static inline void perf_event_comm(struct task_struct *tsk) { } 904static inline void perf_event_comm(struct task_struct *tsk) { }
903static inline void perf_event_fork(struct task_struct *tsk) { } 905static inline void perf_event_fork(struct task_struct *tsk) { }
904static inline void perf_event_init(void) { } 906static inline void perf_event_init(void) { }
907static int perf_swevent_get_recursion_context(int **recursion) { return -1; }
908static void perf_swevent_put_recursion_context(int *recursion) { }
905 909
906#endif 910#endif
907 911
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 4945d1c99864..c222ef5238bf 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -724,16 +724,19 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
724static void ftrace_profile_##call(proto) \ 724static void ftrace_profile_##call(proto) \
725{ \ 725{ \
726 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 726 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
727 extern int perf_swevent_get_recursion_context(int **recursion); \
728 extern void perf_swevent_put_recursion_context(int *recursion); \
727 struct ftrace_event_call *event_call = &event_##call; \ 729 struct ftrace_event_call *event_call = &event_##call; \
728 extern void perf_tp_event(int, u64, u64, void *, int); \ 730 extern void perf_tp_event(int, u64, u64, void *, int); \
729 struct ftrace_raw_##call *entry; \ 731 struct ftrace_raw_##call *entry; \
730 struct perf_trace_buf *trace_buf; \
731 u64 __addr = 0, __count = 1; \ 732 u64 __addr = 0, __count = 1; \
732 unsigned long irq_flags; \ 733 unsigned long irq_flags; \
733 struct trace_entry *ent; \ 734 struct trace_entry *ent; \
734 int __entry_size; \ 735 int __entry_size; \
735 int __data_size; \ 736 int __data_size; \
737 char *trace_buf; \
736 char *raw_data; \ 738 char *raw_data; \
739 int *recursion; \
737 int __cpu; \ 740 int __cpu; \
738 int pc; \ 741 int pc; \
739 \ 742 \
@@ -749,6 +752,10 @@ static void ftrace_profile_##call(proto) \
749 return; \ 752 return; \
750 \ 753 \
751 local_irq_save(irq_flags); \ 754 local_irq_save(irq_flags); \
755 \
756 if (perf_swevent_get_recursion_context(&recursion)) \
757 goto end_recursion; \
758 \
752 __cpu = smp_processor_id(); \ 759 __cpu = smp_processor_id(); \
753 \ 760 \
754 if (in_nmi()) \ 761 if (in_nmi()) \
@@ -759,13 +766,7 @@ static void ftrace_profile_##call(proto) \
759 if (!trace_buf) \ 766 if (!trace_buf) \
760 goto end; \ 767 goto end; \
761 \ 768 \
762 trace_buf = per_cpu_ptr(trace_buf, __cpu); \ 769 raw_data = per_cpu_ptr(trace_buf, __cpu); \
763 if (trace_buf->recursion++) \
764 goto end_recursion; \
765 \
766 barrier(); \
767 \
768 raw_data = trace_buf->buf; \
769 \ 770 \
770 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 771 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
771 entry = (struct ftrace_raw_##call *)raw_data; \ 772 entry = (struct ftrace_raw_##call *)raw_data; \
@@ -780,9 +781,9 @@ static void ftrace_profile_##call(proto) \
780 perf_tp_event(event_call->id, __addr, __count, entry, \ 781 perf_tp_event(event_call->id, __addr, __count, entry, \
781 __entry_size); \ 782 __entry_size); \
782 \ 783 \
783end_recursion: \ 784end: \
784 trace_buf->recursion--; \ 785 perf_swevent_put_recursion_context(recursion); \
785end: \ 786end_recursion: \
786 local_irq_restore(irq_flags); \ 787 local_irq_restore(irq_flags); \
787 \ 788 \
788} 789}