aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace_kprobe.c
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-01-27 20:32:29 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-28 20:02:57 -0500
commit430ad5a600a83956749307b13257c464c3826b55 (patch)
tree9cd3dd3f54e29397ff303478de9fe6902f675b9b /kernel/trace/trace_kprobe.c
parent339ce1a4dc2ca26444c4f65c31b71a5056f3bb0b (diff)
perf: Factorize trace events raw sample buffer operations
Introduce ftrace_perf_buf_prepare() and ftrace_perf_buf_submit() to gather the common code that operates on raw events sampling buffer. This cleans up redundant code between regular trace events, syscall events and kprobe events. Changelog v1->v2: - Rename function name as per Masami and Frederic's suggestion - Add __kprobes for ftrace_perf_buf_prepare() and make ftrace_perf_buf_submit() inline as per Masami's suggestion - Export ftrace_perf_buf_prepare since modules will use it Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Acked-by: Masami Hiramatsu <mhiramat@redhat.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Jason Baron <jbaron@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> LKML-Reference: <4B60E92D.9000808@cn.fujitsu.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r--kernel/trace/trace_kprobe.c86
1 files changed, 10 insertions, 76 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d6266cad6953..2e28ee36646f 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1243,14 +1243,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1243 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); 1243 struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
1244 struct ftrace_event_call *call = &tp->call; 1244 struct ftrace_event_call *call = &tp->call;
1245 struct kprobe_trace_entry *entry; 1245 struct kprobe_trace_entry *entry;
1246 struct trace_entry *ent; 1246 int size, __size, i;
1247 int size, __size, i, pc, __cpu;
1248 unsigned long irq_flags; 1247 unsigned long irq_flags;
1249 char *trace_buf;
1250 char *raw_data;
1251 int rctx; 1248 int rctx;
1252 1249
1253 pc = preempt_count();
1254 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); 1250 __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
1255 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1251 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1256 size -= sizeof(u32); 1252 size -= sizeof(u32);
@@ -1258,45 +1254,16 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
1258 "profile buffer not large enough")) 1254 "profile buffer not large enough"))
1259 return 0; 1255 return 0;
1260 1256
1261 /* 1257 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1262 * Protect the non nmi buffer 1258 if (!entry)
1263 * This also protects the rcu read side 1259 return 0;
1264 */
1265 local_irq_save(irq_flags);
1266
1267 rctx = perf_swevent_get_recursion_context();
1268 if (rctx < 0)
1269 goto end_recursion;
1270
1271 __cpu = smp_processor_id();
1272
1273 if (in_nmi())
1274 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1275 else
1276 trace_buf = rcu_dereference(perf_trace_buf);
1277
1278 if (!trace_buf)
1279 goto end;
1280
1281 raw_data = per_cpu_ptr(trace_buf, __cpu);
1282
1283 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1284 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1285 entry = (struct kprobe_trace_entry *)raw_data;
1286 ent = &entry->ent;
1287 1260
1288 tracing_generic_entry_update(ent, irq_flags, pc);
1289 ent->type = call->id;
1290 entry->nargs = tp->nr_args; 1261 entry->nargs = tp->nr_args;
1291 entry->ip = (unsigned long)kp->addr; 1262 entry->ip = (unsigned long)kp->addr;
1292 for (i = 0; i < tp->nr_args; i++) 1263 for (i = 0; i < tp->nr_args; i++)
1293 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1264 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1294 perf_tp_event(call->id, entry->ip, 1, entry, size);
1295 1265
1296end: 1266 ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags);
1297 perf_swevent_put_recursion_context(rctx);
1298end_recursion:
1299 local_irq_restore(irq_flags);
1300 1267
1301 return 0; 1268 return 0;
1302} 1269}
@@ -1308,14 +1275,10 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1308 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); 1275 struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
1309 struct ftrace_event_call *call = &tp->call; 1276 struct ftrace_event_call *call = &tp->call;
1310 struct kretprobe_trace_entry *entry; 1277 struct kretprobe_trace_entry *entry;
1311 struct trace_entry *ent; 1278 int size, __size, i;
1312 int size, __size, i, pc, __cpu;
1313 unsigned long irq_flags; 1279 unsigned long irq_flags;
1314 char *trace_buf;
1315 char *raw_data;
1316 int rctx; 1280 int rctx;
1317 1281
1318 pc = preempt_count();
1319 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); 1282 __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
1320 size = ALIGN(__size + sizeof(u32), sizeof(u64)); 1283 size = ALIGN(__size + sizeof(u32), sizeof(u64));
1321 size -= sizeof(u32); 1284 size -= sizeof(u32);
@@ -1323,46 +1286,17 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
1323 "profile buffer not large enough")) 1286 "profile buffer not large enough"))
1324 return 0; 1287 return 0;
1325 1288
1326 /* 1289 entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags);
1327 * Protect the non nmi buffer 1290 if (!entry)
1328 * This also protects the rcu read side 1291 return 0;
1329 */
1330 local_irq_save(irq_flags);
1331
1332 rctx = perf_swevent_get_recursion_context();
1333 if (rctx < 0)
1334 goto end_recursion;
1335
1336 __cpu = smp_processor_id();
1337
1338 if (in_nmi())
1339 trace_buf = rcu_dereference(perf_trace_buf_nmi);
1340 else
1341 trace_buf = rcu_dereference(perf_trace_buf);
1342
1343 if (!trace_buf)
1344 goto end;
1345
1346 raw_data = per_cpu_ptr(trace_buf, __cpu);
1347
1348 /* Zero dead bytes from alignment to avoid buffer leak to userspace */
1349 *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
1350 entry = (struct kretprobe_trace_entry *)raw_data;
1351 ent = &entry->ent;
1352 1292
1353 tracing_generic_entry_update(ent, irq_flags, pc);
1354 ent->type = call->id;
1355 entry->nargs = tp->nr_args; 1293 entry->nargs = tp->nr_args;
1356 entry->func = (unsigned long)tp->rp.kp.addr; 1294 entry->func = (unsigned long)tp->rp.kp.addr;
1357 entry->ret_ip = (unsigned long)ri->ret_addr; 1295 entry->ret_ip = (unsigned long)ri->ret_addr;
1358 for (i = 0; i < tp->nr_args; i++) 1296 for (i = 0; i < tp->nr_args; i++)
1359 entry->args[i] = call_fetch(&tp->args[i].fetch, regs); 1297 entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
1360 perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
1361 1298
1362end: 1299 ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags);
1363 perf_swevent_put_recursion_context(rctx);
1364end_recursion:
1365 local_irq_restore(irq_flags);
1366 1300
1367 return 0; 1301 return 0;
1368} 1302}