aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_kprobe.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-11-21 23:26:55 -0500
committerIngo Molnar <mingo@elte.hu>2009-11-22 03:03:42 -0500
commitce71b9df8893ec954e56c5979df6da274f20f65e (patch)
tree76e8a5e33393c2f4fca4083628fc142dcbb55250 /kernel/trace/trace_kprobe.c
parente25613683bd5c46d3e8c8ae6416dccc9f357dcdc (diff)
tracing: Use the perf recursion protection from trace event
When we commit a trace to perf, we first check if we are recursing in the same buffer so that we don't mess-up the buffer with a recursing trace. But later on, we do the same check from perf to avoid commit recursion. The recursion check is desired early before we touch the buffer but we want to do this check only once. Then export the recursion protection from perf and use it from the trace events before submitting a trace. v2: Put appropriate Reported-by tag Reported-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Masami Hiramatsu <mhiramat@redhat.com> Cc: Jason Baron <jbaron@redhat.com> LKML-Reference: <1258864015-10579-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r--kernel/trace/trace_kprobe.c48
1 files changed, 18 insertions, 30 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 3696476f307d..22e6f68b05b3 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1208,11 +1208,12 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1208 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1209 struct ftrace_event_call *call = &tp->call; 1209 struct ftrace_event_call *call = &tp->call;
1210 struct kprobe_trace_entry *entry; 1210 struct kprobe_trace_entry *entry;
1211 struct perf_trace_buf *trace_buf;
1212 struct trace_entry *ent; 1211 struct trace_entry *ent;
1213 int size, __size, i, pc, __cpu; 1212 int size, __size, i, pc, __cpu;
1214 unsigned long irq_flags; 1213 unsigned long irq_flags;
1214 char *trace_buf;
1215 char *raw_data; 1215 char *raw_data;
1216 int *recursion;
1216 1217
1217 pc = preempt_count(); 1218 pc = preempt_count();
1218 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1219 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
@@ -1227,6 +1228,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1227 * This also protects the rcu read side 1228 * This also protects the rcu read side
1228 */ 1229 */
1229 local_irq_save(irq_flags); 1230 local_irq_save(irq_flags);
1231
1232 if (perf_swevent_get_recursion_context(&recursion))
1233 goto end_recursion;
1234
1230 __cpu = smp_processor_id(); 1235 __cpu = smp_processor_id();
1231 1236
1232 if (in_nmi()) 1237 if (in_nmi())
@@ -1237,18 +1242,7 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1237 if (!trace_buf) 1242 if (!trace_buf)
1238 goto end; 1243 goto end;
1239 1244
1240 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1245 raw_data = per_cpu_ptr(trace_buf, __cpu);
1241
1242 if (trace_buf->recursion++)
1243 goto end_recursion;
1244
1245 /*
1246 * Make recursion update visible before entering perf_tp_event
1247 * so that we protect from perf recursions.
1248 */
1249 barrier();
1250
1251 raw_data = trace_buf->buf;
1252 1246
1253 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1247 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1254 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1248 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1263,9 +1257,9 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1263 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1257 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1264 perf_tp_event(call->id, entry->ip, 1, entry, size); 1258 perf_tp_event(call->id, entry->ip, 1, entry, size);
1265 1259
1266end_recursion:
1267 trace_buf->recursion--;
1268end: 1260end:
1261 perf_swevent_put_recursion_context(recursion);
1262end_recursion:
1269 local_irq_restore(irq_flags); 1263 local_irq_restore(irq_flags);
1270 1264
1271 return 0; 1265 return 0;
@@ -1278,10 +1272,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1278 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1272 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1279 struct ftrace_event_call *call = &tp->call; 1273 struct ftrace_event_call *call = &tp->call;
1280 struct kretprobe_trace_entry *entry; 1274 struct kretprobe_trace_entry *entry;
1281 struct perf_trace_buf *trace_buf;
1282 struct trace_entry *ent; 1275 struct trace_entry *ent;
1283 int size, __size, i, pc, __cpu; 1276 int size, __size, i, pc, __cpu;
1284 unsigned long irq_flags; 1277 unsigned long irq_flags;
1278 char *trace_buf;
1279 int *recursion;
1285 char *raw_data; 1280 char *raw_data;
1286 1281
1287 pc = preempt_count(); 1282 pc = preempt_count();
@@ -1297,6 +1292,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1297 * This also protects the rcu read side 1292 * This also protects the rcu read side
1298 */ 1293 */
1299 local_irq_save(irq_flags); 1294 local_irq_save(irq_flags);
1295
1296 if (perf_swevent_get_recursion_context(&recursion))
1297 goto end_recursion;
1298
1300 __cpu = smp_processor_id(); 1299 __cpu = smp_processor_id();
1301 1300
1302 if (in_nmi()) 1301 if (in_nmi())
@@ -1307,18 +1306,7 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1307 if (!trace_buf) 1306 if (!trace_buf)
1308 goto end; 1307 goto end;
1309 1308
1310 trace_buf = per_cpu_ptr(trace_buf, __cpu); 1309 raw_data = per_cpu_ptr(trace_buf, __cpu);
1311
1312 if (trace_buf->recursion++)
1313 goto end_recursion;
1314
1315 /*
1316 * Make recursion update visible before entering perf_tp_event
1317 * so that we protect from perf recursions.
1318 */
1319 barrier();
1320
1321 raw_data = trace_buf->buf;
1322 1310
1323 /* Zero dead bytes from alignment to avoid buffer leak to userspace */ 1311 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1324 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; 1312 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
@@ -1334,9 +1322,9 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1334 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1322 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1335 perf_tp_event(call->id, entry->ret_ip, 1, entry, size); 1323 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1336 1324
1337end_recursion:
1338 trace_buf->recursion--;
1339end: 1325end:
1326 perf_swevent_put_recursion_context(recursion);
1327end_recursion:
1340 local_irq_restore(irq_flags); 1328 local_irq_restore(irq_flags);
1341 1329
1342 return 0; 1330 return 0;