diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-11-21 23:26:55 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-11-22 03:03:42 -0500 |
commit | ce71b9df8893ec954e56c5979df6da274f20f65e (patch) | |
tree | 76e8a5e33393c2f4fca4083628fc142dcbb55250 /kernel/perf_event.c | |
parent | e25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff) |
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are
recursing in the same buffer so that we don't mess-up the buffer
with a recursing trace. But later on, we do the same check from
perf to avoid commit recursion. The recursion check is desired
early before we touch the buffer but we want to do this check
only once.
Then export the recursion protection from perf and use it from
the trace events before submitting a trace.
v2: Put appropriate Reported-by tag
Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Masami Hiramatsu <mhiramat@redhat.com>
Cc: Jason Baron <jbaron@redhat.com>
LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 68 |
1 files changed, 45 insertions, 23 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 718fa939b1a7..aba822722300 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -3880,34 +3880,42 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx, | |||
3880 | } | 3880 | } |
3881 | } | 3881 | } |
3882 | 3882 | ||
3883 | static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) | 3883 | /* |
3884 | * Must be called with preemption disabled | ||
3885 | */ | ||
3886 | int perf_swevent_get_recursion_context(int **recursion) | ||
3884 | { | 3887 | { |
3888 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
3889 | |||
3885 | if (in_nmi()) | 3890 | if (in_nmi()) |
3886 | return &cpuctx->recursion[3]; | 3891 | *recursion = &cpuctx->recursion[3]; |
3892 | else if (in_irq()) | ||
3893 | *recursion = &cpuctx->recursion[2]; | ||
3894 | else if (in_softirq()) | ||
3895 | *recursion = &cpuctx->recursion[1]; | ||
3896 | else | ||
3897 | *recursion = &cpuctx->recursion[0]; | ||
3887 | 3898 | ||
3888 | if (in_irq()) | 3899 | if (**recursion) |
3889 | return &cpuctx->recursion[2]; | 3900 | return -1; |
3890 | 3901 | ||
3891 | if (in_softirq()) | 3902 | (**recursion)++; |
3892 | return &cpuctx->recursion[1]; | ||
3893 | 3903 | ||
3894 | return &cpuctx->recursion[0]; | 3904 | return 0; |
3895 | } | 3905 | } |
3896 | 3906 | ||
3897 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | 3907 | void perf_swevent_put_recursion_context(int *recursion) |
3898 | u64 nr, int nmi, | ||
3899 | struct perf_sample_data *data, | ||
3900 | struct pt_regs *regs) | ||
3901 | { | 3908 | { |
3902 | struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); | 3909 | (*recursion)--; |
3903 | int *recursion = perf_swevent_recursion_context(cpuctx); | 3910 | } |
3904 | struct perf_event_context *ctx; | ||
3905 | |||
3906 | if (*recursion) | ||
3907 | goto out; | ||
3908 | 3911 | ||
3909 | (*recursion)++; | 3912 | static void __do_perf_sw_event(enum perf_type_id type, u32 event_id, |
3910 | barrier(); | 3913 | u64 nr, int nmi, |
3914 | struct perf_sample_data *data, | ||
3915 | struct pt_regs *regs) | ||
3916 | { | ||
3917 | struct perf_event_context *ctx; | ||
3918 | struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); | ||
3911 | 3919 | ||
3912 | rcu_read_lock(); | 3920 | rcu_read_lock(); |
3913 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, | 3921 | perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, |
@@ -3920,12 +3928,25 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id, | |||
3920 | if (ctx) | 3928 | if (ctx) |
3921 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); | 3929 | perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); |
3922 | rcu_read_unlock(); | 3930 | rcu_read_unlock(); |
3931 | } | ||
3923 | 3932 | ||
3924 | barrier(); | 3933 | static void do_perf_sw_event(enum perf_type_id type, u32 event_id, |
3925 | (*recursion)--; | 3934 | u64 nr, int nmi, |
3935 | struct perf_sample_data *data, | ||
3936 | struct pt_regs *regs) | ||
3937 | { | ||
3938 | int *recursion; | ||
3939 | |||
3940 | preempt_disable(); | ||
3941 | |||
3942 | if (perf_swevent_get_recursion_context(&recursion)) | ||
3943 | goto out; | ||
3944 | |||
3945 | __do_perf_sw_event(type, event_id, nr, nmi, data, regs); | ||
3926 | 3946 | ||
3947 | perf_swevent_put_recursion_context(recursion); | ||
3927 | out: | 3948 | out: |
3928 | put_cpu_var(perf_cpu_context); | 3949 | preempt_enable(); |
3929 | } | 3950 | } |
3930 | 3951 | ||
3931 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, | 3952 | void __perf_sw_event(u32 event_id, u64 nr, int nmi, |
@@ -4159,7 +4180,8 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record, | |||
4159 | if (!regs) | 4180 | if (!regs) |
4160 | regs = task_pt_regs(current); | 4181 | regs = task_pt_regs(current); |
4161 | 4182 | ||
4162 | do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | 4183 | /* Trace events already protected against recursion */ |
4184 | __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, | ||
4163 | &data, regs); | 4185 | &data, regs); |
4164 | } | 4186 | } |
4165 | EXPORT_SYMBOL_GPL(perf_tp_event); | 4187 | EXPORT_SYMBOL_GPL(perf_tp_event); |