aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-21 23:26:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-22 03:03:42 -0500
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/ftrace_event.h9
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/trace/ftrace.h23
-rw-r--r--kernel/perf_event.c68
-rw-r--r--kernel/trace/trace_event_profile.c14
-rw-r--r--kernel/trace/trace_kprobe.c48
-rw-r--r--kernel/trace/trace_syscalls.c47
7 files changed, 106 insertions, 107 deletions
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 43360c1d8f70..47bbdf9c38d0 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -137,13 +137,8 @@ struct ftrace_event_call {
137 137
138#define FTRACE_MAX_PROFILE_SIZE 2048 138#define FTRACE_MAX_PROFILE_SIZE 2048
139 139
140struct perf_trace_buf { 140extern char *perf_trace_buf;
141 char buf[FTRACE_MAX_PROFILE_SIZE]; 141extern char *perf_trace_buf_nmi;
142 int recursion;
143};
144
145extern struct perf_trace_buf *perf_trace_buf;
146extern struct perf_trace_buf *perf_trace_buf_nmi;
147 142
148#define MAX_FILTER_PRED 32 143#define MAX_FILTER_PRED 32
149#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */ 144#define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 36fe89f72641..74e98b1d3391 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -874,6 +874,8 @@ extern int perf_output_begin(struct perf_output_handle *handle,
874extern void perf_output_end(struct perf_output_handle *handle); 874extern void perf_output_end(struct perf_output_handle *handle);
875extern void perf_output_copy(struct perf_output_handle *handle, 875extern void perf_output_copy(struct perf_output_handle *handle,
876 const void *buf, unsigned int len); 876 const void *buf, unsigned int len);
877extern int perf_swevent_get_recursion_context(int **recursion);
878extern void perf_swevent_put_recursion_context(int *recursion);
877#else 879#else
878static inline void 880static inline void
879perf_event_task_sched_in(struct task_struct *task, int cpu) { } 881perf_event_task_sched_in(struct task_struct *task, int cpu) { }
@@ -902,6 +904,8 @@ static inline void perf_event_mmap(struct vm_area_struct *vma) { }
902static inline void perf_event_comm(struct task_struct *tsk) { } 904static inline void perf_event_comm(struct task_struct *tsk) { }
903static inline void perf_event_fork(struct task_struct *tsk) { } 905static inline void perf_event_fork(struct task_struct *tsk) { }
904static inline void perf_event_init(void) { } 906static inline void perf_event_init(void) { }
907static int perf_swevent_get_recursion_context(int **recursion) { return -1; }
908static void perf_swevent_put_recursion_context(int *recursion) { }
905 909
906#endif 910#endif
907 911
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 4945d1c99864..c222ef5238bf 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -724,16 +724,19 @@ __attribute__((section("_ftrace_events"))) event_##call = { \
724static void ftrace_profile_##call(proto) \ 724static void ftrace_profile_##call(proto) \
725{ \ 725{ \
726 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ 726 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
727 extern int perf_swevent_get_recursion_context(int **recursion); \
728 extern void perf_swevent_put_recursion_context(int *recursion); \
727 struct ftrace_event_call *event_call = &event_##call; \ 729 struct ftrace_event_call *event_call = &event_##call; \
728 extern void perf_tp_event(int, u64, u64, void *, int); \ 730 extern void perf_tp_event(int, u64, u64, void *, int); \
729 struct ftrace_raw_##call *entry; \ 731 struct ftrace_raw_##call *entry; \
730 struct perf_trace_buf *trace_buf; \
731 u64 __addr = 0, __count = 1; \ 732 u64 __addr = 0, __count = 1; \
732 unsigned long irq_flags; \ 733 unsigned long irq_flags; \
733 struct trace_entry *ent; \ 734 struct trace_entry *ent; \
734 int __entry_size; \ 735 int __entry_size; \
735 int __data_size; \ 736 int __data_size; \
737 char *trace_buf; \
736 char *raw_data; \ 738 char *raw_data; \
739 int *recursion; \
737 int __cpu; \ 740 int __cpu; \
738 int pc; \ 741 int pc; \
739 \ 742 \
@@ -749,6 +752,10 @@ static void ftrace_profile_##call(proto) \
749 return; \ 752 return; \
750 \ 753 \
751 local_irq_save(irq_flags); \ 754 local_irq_save(irq_flags); \
755 \
756 if (perf_swevent_get_recursion_context(&recursion)) \
757 goto end_recursion; \
758 \
752 __cpu = smp_processor_id(); \ 759 __cpu = smp_processor_id(); \
753 \ 760 \
754 if (in_nmi()) \ 761 if (in_nmi()) \
@@ -759,13 +766,7 @@ static void ftrace_profile_##call(proto) \
759 if (!trace_buf) \ 766 if (!trace_buf) \
760 goto end; \ 767 goto end; \
761 \ 768 \
762 trace_buf = per_cpu_ptr(trace_buf, __cpu); \ 769 raw_data = per_cpu_ptr(trace_buf, __cpu); \
763 if (trace_buf->recursion++) \
764 goto end_recursion; \
765 \
766 barrier(); \
767 \
768 raw_data = trace_buf->buf; \
769 \ 770 \
770 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \ 771 *(u64 *)(&raw_data[__entry_size - sizeof(u64)]) = 0ULL; \
771 entry = (struct ftrace_raw_##call *)raw_data; \ 772 entry = (struct ftrace_raw_##call *)raw_data; \
@@ -780,9 +781,9 @@ static void ftrace_profile_##call(proto) \
780 perf_tp_event(event_call->id, __addr, __count, entry, \ 781 perf_tp_event(event_call->id, __addr, __count, entry, \
781 __entry_size); \ 782 __entry_size); \
782 \ 783 \
783end_recursion: \ 784end: \
784 trace_buf->recursion--; \ 785 perf_swevent_put_recursion_context(recursion); \
785end: \ 786end_recursion: \
786 local_irq_restore(irq_flags); \ 787 local_irq_restore(irq_flags); \
787 \ 788 \
788} 789}
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 718fa939b1a7..aba822722300 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3880,34 +3880,42 @@ static void perf_swevent_ctx_event(struct perf_event_context *ctx,
3880 } 3880 }
3881} 3881}
3882 3882
3883static int *perf_swevent_recursion_context(struct perf_cpu_context *cpuctx) 3883/*
3884 * Must be called with preemption disabled
3885 */
3886int perf_swevent_get_recursion_context(int **recursion)
3884{ 3887{
3888 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3889
3885 if (in_nmi()) 3890 if (in_nmi())
3886 return &cpuctx->recursion[3]; 3891 *recursion = &cpuctx->recursion[3];
3892 else if (in_irq())
3893 *recursion = &cpuctx->recursion[2];
3894 else if (in_softirq())
3895 *recursion = &cpuctx->recursion[1];
3896 else
3897 *recursion = &cpuctx->recursion[0];
3887 3898
3888 if (in_irq()) 3899 if (**recursion)
3889 return &cpuctx->recursion[2]; 3900 return -1;
3890 3901
3891 if (in_softirq()) 3902 (**recursion)++;
3892 return &cpuctx->recursion[1];
3893 3903
3894 return &cpuctx->recursion[0]; 3904 return 0;
3895} 3905}
3896 3906
3897static void do_perf_sw_event(enum perf_type_id type, u32 event_id, 3907void perf_swevent_put_recursion_context(int *recursion)
3898 u64 nr, int nmi,
3899 struct perf_sample_data *data,
3900 struct pt_regs *regs)
3901{ 3908{
3902 struct perf_cpu_context *cpuctx = &get_cpu_var(perf_cpu_context); 3909 (*recursion)--;
3903 int *recursion = perf_swevent_recursion_context(cpuctx); 3910}
3904 struct perf_event_context *ctx;
3905
3906 if (*recursion)
3907 goto out;
3908 3911
3909 (*recursion)++; 3912static void __do_perf_sw_event(enum perf_type_id type, u32 event_id,
3910 barrier(); 3913 u64 nr, int nmi,
3914 struct perf_sample_data *data,
3915 struct pt_regs *regs)
3916{
3917 struct perf_event_context *ctx;
3918 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
3911 3919
3912 rcu_read_lock(); 3920 rcu_read_lock();
3913 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id, 3921 perf_swevent_ctx_event(&cpuctx->ctx, type, event_id,
@@ -3920,12 +3928,25 @@ static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3920 if (ctx) 3928 if (ctx)
3921 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs); 3929 perf_swevent_ctx_event(ctx, type, event_id, nr, nmi, data, regs);
3922 rcu_read_unlock(); 3930 rcu_read_unlock();
3931}
3923 3932
3924 barrier(); 3933static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
3925 (*recursion)--; 3934 u64 nr, int nmi,
3935 struct perf_sample_data *data,
3936 struct pt_regs *regs)
3937{
3938 int *recursion;
3939
3940 preempt_disable();
3941
3942 if (perf_swevent_get_recursion_context(&recursion))
3943 goto out;
3944
3945 __do_perf_sw_event(type, event_id, nr, nmi, data, regs);
3926 3946
3947 perf_swevent_put_recursion_context(recursion);
3927out: 3948out:
3928 put_cpu_var(perf_cpu_context); 3949 preempt_enable();
3929} 3950}
3930 3951
3931void __perf_sw_event(u32 event_id, u64 nr, int nmi, 3952void __perf_sw_event(u32 event_id, u64 nr, int nmi,
@@ -4159,7 +4180,8 @@ void perf_tp_event(int event_id, u64 addr, u64 count, void *record,
4159 if (!regs) 4180 if (!regs)
4160 regs = task_pt_regs(current); 4181 regs = task_pt_regs(current);
4161 4182
4162 do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, 4183 /* Trace events already protected against recursion */
4184 __do_perf_sw_event(PERF_TYPE_TRACEPOINT, event_id, count, 1,
4163 &data, regs); 4185 &data, regs);
4164} 4186}
4165EXPORT_SYMBOL_GPL(perf_tp_event); 4187EXPORT_SYMBOL_GPL(perf_tp_event);
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index e0d351b01f5a..d9c60f80aa0d 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -9,31 +9,33 @@
9#include "trace.h" 9#include "trace.h"
10 10
11 11
12struct perf_trace_buf *perf_trace_buf; 12char *perf_trace_buf;
13EXPORT_SYMBOL_GPL(perf_trace_buf); 13EXPORT_SYMBOL_GPL(perf_trace_buf);
14 14
15struct perf_trace_buf *perf_trace_buf_nmi; 15char *perf_trace_buf_nmi;
16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi); 16EXPORT_SYMBOL_GPL(perf_trace_buf_nmi);
17 17
18typedef typeof(char [FTRACE_MAX_PROFILE_SIZE]) perf_trace_t ;
19
18/* Count the events in use (per event id, not per instance) */ 20/* Count the events in use (per event id, not per instance) */
19static int total_profile_count; 21static int total_profile_count;
20 22
21static int ftrace_profile_enable_event(struct ftrace_event_call *event) 23static int ftrace_profile_enable_event(struct ftrace_event_call *event)
22{ 24{
23 struct perf_trace_buf *buf; 25 char *buf;
24 int ret = -ENOMEM; 26 int ret = -ENOMEM;
25 27
26 if (atomic_inc_return(&event->profile_count)) 28 if (atomic_inc_return(&event->profile_count))
27 return 0; 29 return 0;
28 30
29 if (!total_profile_count) { 31 if (!total_profile_count) {
30 buf = alloc_percpu(struct perf_trace_buf); 32 buf = (char *)alloc_percpu(perf_trace_t);
31 if (!buf) 33 if (!buf)
32 goto fail_buf; 34 goto fail_buf;
33 35
34 rcu_assign_pointer(perf_trace_buf, buf); 36 rcu_assign_pointer(perf_trace_buf, buf);
35 37
36 buf = alloc_percpu(struct perf_trace_buf); 38 buf = (char *)alloc_percpu(perf_trace_t);
37 if (!buf) 39 if (!buf)
38 goto fail_buf_nmi; 40 goto fail_buf_nmi;
39 41
@@ -79,7 +81,7 @@ int ftrace_profile_enable(int event_id)
79 81
80static void ftrace_profile_disable_event(struct ftrace_event_call *event) 82static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81{ 83{
82 struct perf_trace_buf *buf, *nmi_buf; 84 char *buf, *nmi_buf;
83 85
84 if (!atomic_add_negative(-1, &event->profile_count)) 86 if (!atomic_add_negative(-1, &event->profile_count))
85 return; 87 return;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3696476f307d..22e6f68b05b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1209 struct ftrace_event_call *call = &tp->call; 1209 struct ftrace_event_call *call = &tp->call;
1210 struct kprobe_trace_entry *entry; 1210 struct kprobe_trace_entry *entry;
1211 struct perf_trace_buf *trace_buf;
1212 struct trace_entry *ent; 1211 struct trace_entry *ent;
1213 int size, __size, i, pc, __cpu; 1212 int size, __size, i, pc, __cpu;
1214 unsigned long irq_flags; 1213 unsigned long irq_flags;
1214 char *trace_buf;
1215 char *raw_data; 1215 char *raw_data;
1216 int *recursion;
1216 1217
1217 pc = preempt_count(); 1218 pc = preempt_count();
1218 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1219 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1227,6 +1228,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1227 * This also protects the rcu read side 1228 * This also protects the rcu read side
1228 */ 1229 */
1229 local_irq_save(irq_flags); 1230 local_irq_save(irq_flags);
1231
1232 if (perf_swevent_get_recursion_context(&recursion))
1233 goto end_recursion;
1234
1230 __cpu = smp_processor_id(); 1235 __cpu = smp_processor_id();
1231 1236
1232 if (in_nmi()) 1237 if (in_nmi())
@@ -1237,18 +1242,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1237 if (!trace_buf) 1242 if (!trace_buf)
1238 goto end; 1243 goto end;
1239 1244
1240 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1245 raw_data = per_cpu_ptr(trace_buf, __cpu);
1241
1242 if (trace_buf->recursion++)
1243 goto end_recursion;
1244
1245 /*
1246 * Make recursion update visible before entering perf_tp_event
1247 * so that we protect from perf recursions.
1248 */
1249 barrier();
1250
1251 raw_data = trace_buf->buf;
1252 1246
1253 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1247 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1254 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1248 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1263,9 +1257,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1263 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1257 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1264 perf_tp_event(call->id, entry->ip, 1, entry, size); 1258 perf_tp_event(call->id, entry->ip, 1, entry, size);
1265 1259
1266end_recursion:
1267 trace_buf->recursion--;
1268end: 1260end:
1261 perf_swevent_put_recursion_context(recursion);
1262end_recursion:
1269 local_irq_restore(irq_flags); 1263 local_irq_restore(irq_flags);
1270 1264
1271 return 0; 1265 return 0;
@@ -1278,10 +1272,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1278 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1272 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1279 struct ftrace_event_call *call = &tp->call; 1273 struct ftrace_event_call *call = &tp->call;
1280 struct kretprobe_trace_entry *entry; 1274 struct kretprobe_trace_entry *entry;
1281 struct perf_trace_buf *trace_buf;
1282 struct trace_entry *ent; 1275 struct trace_entry *ent;
1283 int size, __size, i, pc, __cpu; 1276 int size, __size, i, pc, __cpu;
1284 unsigned long irq_flags; 1277 unsigned long irq_flags;
1278 char *trace_buf;
1279 int *recursion;
1285 char *raw_data; 1280 char *raw_data;
1286 1281
1287 pc = preempt_count(); 1282 pc = preempt_count();
@@ -1297,6 +1292,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1297 * This also protects the rcu read side 1292 * This also protects the rcu read side
1298 */ 1293 */
1299 local_irq_save(irq_flags); 1294 local_irq_save(irq_flags);
1295
1296 if (perf_swevent_get_recursion_context(&recursion))
1297 goto end_recursion;
1298
1300 __cpu = smp_processor_id(); 1299 __cpu = smp_processor_id();
1301 1300
1302 if (in_nmi()) 1301 if (in_nmi())
@@ -1307,18 +1306,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1307 if (!trace_buf) 1306 if (!trace_buf)
1308 goto end; 1307 goto end;
1309 1308
1310 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1309 raw_data = per_cpu_ptr(trace_buf, __cpu);
1311
1312 if (trace_buf->recursion++)
1313 goto end_recursion;
1314
1315 /*
1316 * Make recursion update visible before entering perf_tp_event
1317 * so that we protect from perf recursions.
1318 */
1319 barrier();
1320
1321 raw_data = trace_buf->buf;
1322 1310
1323 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1311 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1324 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1312 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1334,9 +1322,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1334 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1322 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1335 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1323 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1336 1324
1337end_recursion:
1338 trace_buf->recursion--;
1339end: 1325end:
1326 perf_swevent_put_recursion_context(recursion);
1327end_recursion:
1340 local_irq_restore(irq_flags); 1328 local_irq_restore(irq_flags);
1341 1329
1342 return 0; 1330 return 0;
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 51213b0aa81b..0bb934875263 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -477,10 +477,11 @@ static int sys_prof_refcount_exit;
477static void prof_syscall_enter(struct pt_regs *regs, long id) 477static void prof_syscall_enter(struct pt_regs *regs, long id)
478{ 478{
479 struct syscall_metadata *sys_data; 479 struct syscall_metadata *sys_data;
480 struct perf_trace_buf *trace_buf;
481 struct syscall_trace_enter *rec; 480 struct syscall_trace_enter *rec;
482 unsigned long flags; 481 unsigned long flags;
482 char *trace_buf;
483 char *raw_data; 483 char *raw_data;
484 int *recursion;
484 int syscall_nr; 485 int syscall_nr;
485 int size; 486 int size;
486 int cpu; 487 int cpu;
@@ -505,6 +506,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
505 /* Protect the per cpu buffer, begin the rcu read side */ 506 /* Protect the per cpu buffer, begin the rcu read side */
506 local_irq_save(flags); 507 local_irq_save(flags);
507 508
509 if (perf_swevent_get_recursion_context(&recursion))
510 goto end_recursion;
511
508 cpu = smp_processor_id(); 512 cpu = smp_processor_id();
509 513
510 if (in_nmi()) 514 if (in_nmi())
@@ -515,18 +519,7 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
515 if (!trace_buf) 519 if (!trace_buf)
516 goto end; 520 goto end;
517 521
518 trace_buf = per_cpu_ptr(trace_buf, cpu); 522 raw_data = per_cpu_ptr(trace_buf, cpu);
519
520 if (trace_buf->recursion++)
521 goto end_recursion;
522
523 /*
524 * Make recursion update visible before entering perf_tp_event
525 * so that we protect from perf recursions.
526 */
527 barrier();
528
529 raw_data = trace_buf->buf;
530 523
531 /* zero the dead bytes from align to not leak stack to user */ 524 /* zero the dead bytes from align to not leak stack to user */
532 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 525 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -539,9 +532,9 @@ static void prof_syscall_enter(struct pt_regs *regs, long id)
539 (unsigned long *)&rec->args); 532 (unsigned long *)&rec->args);
540 perf_tp_event(sys_data->enter_id, 0, 1, rec, size); 533 perf_tp_event(sys_data->enter_id, 0, 1, rec, size);
541 534
542end_recursion:
543 trace_buf->recursion--;
544end: 535end:
536 perf_swevent_put_recursion_context(recursion);
537end_recursion:
545 local_irq_restore(flags); 538 local_irq_restore(flags);
546} 539}
547 540
@@ -588,10 +581,11 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
588{ 581{
589 struct syscall_metadata *sys_data; 582 struct syscall_metadata *sys_data;
590 struct syscall_trace_exit *rec; 583 struct syscall_trace_exit *rec;
591 struct perf_trace_buf *trace_buf;
592 unsigned long flags; 584 unsigned long flags;
593 int syscall_nr; 585 int syscall_nr;
586 char *trace_buf;
594 char *raw_data; 587 char *raw_data;
588 int *recursion;
595 int size; 589 int size;
596 int cpu; 590 int cpu;
597 591
@@ -617,6 +611,10 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
617 611
618 /* Protect the per cpu buffer, begin the rcu read side */ 612 /* Protect the per cpu buffer, begin the rcu read side */
619 local_irq_save(flags); 613 local_irq_save(flags);
614
615 if (perf_swevent_get_recursion_context(&recursion))
616 goto end_recursion;
617
620 cpu = smp_processor_id(); 618 cpu = smp_processor_id();
621 619
622 if (in_nmi()) 620 if (in_nmi())
@@ -627,18 +625,7 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
627 if (!trace_buf) 625 if (!trace_buf)
628 goto end; 626 goto end;
629 627
630 trace_buf = per_cpu_ptr(trace_buf, cpu); 628 raw_data = per_cpu_ptr(trace_buf, cpu);
631
632 if (trace_buf->recursion++)
633 goto end_recursion;
634
635 /*
636 * Make recursion update visible before entering perf_tp_event
637 * so that we protect from perf recursions.
638 */
639 barrier();
640
641 raw_data = trace_buf->buf;
642 629
643 /* zero the dead bytes from align to not leak stack to user */ 630 /* zero the dead bytes from align to not leak stack to user */
644 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 631 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -652,9 +639,9 @@ static void prof_syscall_exit(struct pt_regs *regs, long ret)
652 639
653 perf_tp_event(sys_data->exit_id, 0, 1, rec, size); 640 perf_tp_event(sys_data->exit_id, 0, 1, rec, size);
654 641
655end_recursion:
656 trace_buf->recursion--;
657end: 642end:
643 perf_swevent_put_recursion_context(recursion);
644end_recursion:
658 local_irq_restore(flags); 645 local_irq_restore(flags);
659} 646}
660 647