diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-27 18:23:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-27 18:23:47 -0400 |
commit | c5617b200ac52e35f7e8cf05a17b0a2d50f6b3e9 (patch) | |
tree | 40d5e99660c77c5791392d349a93113c044dbf14 /kernel/trace/trace_kprobe.c | |
parent | cad719d86e9dbd06634eaba6401e022c8101d6b2 (diff) | |
parent | 49c177461bfbedeccbab22bf3905db2f9da7f1c3 (diff) |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (61 commits)
tracing: Add __used annotation to event variable
perf, trace: Fix !x86 build bug
perf report: Support multiple events on the TUI
perf annotate: Fix up usage of the build id cache
x86/mmiotrace: Remove redundant instruction prefix checks
perf annotate: Add TUI interface
perf tui: Remove annotate from popup menu after failure
perf report: Don't start the TUI if -D is used
perf: Fix getline undeclared
perf: Optimize perf_tp_event_match()
perf: Remove more code from the fastpath
perf: Optimize the !vmalloc backed buffer
perf: Optimize perf_output_copy()
perf: Fix wakeup storm for RO mmap()s
perf-record: Share per-cpu buffers
perf-record: Remove -M
perf: Ensure that IOC_OUTPUT isn't used to create multi-writer buffers
perf, trace: Optimize tracepoints by using per-tracepoint-per-cpu hlist to track events
perf, trace: Optimize tracepoints by removing IRQ-disable from perf/tracepoint interaction
perf tui: Allow disabling the TUI on a per command basis in ~/.perfconfig
...
Diffstat (limited to 'kernel/trace/trace_kprobe.c')
-rw-r--r-- | kernel/trace/trace_kprobe.c | 113 |
1 files changed, 67 insertions, 46 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index a7514326052b..faf7cefd15da 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -324,8 +324,8 @@ struct trace_probe { | |||
324 | unsigned long nhit; | 324 | unsigned long nhit; |
325 | unsigned int flags; /* For TP_FLAG_* */ | 325 | unsigned int flags; /* For TP_FLAG_* */ |
326 | const char *symbol; /* symbol name */ | 326 | const char *symbol; /* symbol name */ |
327 | struct ftrace_event_class class; | ||
327 | struct ftrace_event_call call; | 328 | struct ftrace_event_call call; |
328 | struct trace_event event; | ||
329 | ssize_t size; /* trace entry size */ | 329 | ssize_t size; /* trace entry size */ |
330 | unsigned int nr_args; | 330 | unsigned int nr_args; |
331 | struct probe_arg args[]; | 331 | struct probe_arg args[]; |
@@ -404,6 +404,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
404 | goto error; | 404 | goto error; |
405 | } | 405 | } |
406 | 406 | ||
407 | tp->call.class = &tp->class; | ||
407 | tp->call.name = kstrdup(event, GFP_KERNEL); | 408 | tp->call.name = kstrdup(event, GFP_KERNEL); |
408 | if (!tp->call.name) | 409 | if (!tp->call.name) |
409 | goto error; | 410 | goto error; |
@@ -413,8 +414,8 @@ static struct trace_probe *alloc_trace_probe(const char *group, | |||
413 | goto error; | 414 | goto error; |
414 | } | 415 | } |
415 | 416 | ||
416 | tp->call.system = kstrdup(group, GFP_KERNEL); | 417 | tp->class.system = kstrdup(group, GFP_KERNEL); |
417 | if (!tp->call.system) | 418 | if (!tp->class.system) |
418 | goto error; | 419 | goto error; |
419 | 420 | ||
420 | INIT_LIST_HEAD(&tp->list); | 421 | INIT_LIST_HEAD(&tp->list); |
@@ -443,7 +444,7 @@ static void free_trace_probe(struct trace_probe *tp) | |||
443 | for (i = 0; i < tp->nr_args; i++) | 444 | for (i = 0; i < tp->nr_args; i++) |
444 | free_probe_arg(&tp->args[i]); | 445 | free_probe_arg(&tp->args[i]); |
445 | 446 | ||
446 | kfree(tp->call.system); | 447 | kfree(tp->call.class->system); |
447 | kfree(tp->call.name); | 448 | kfree(tp->call.name); |
448 | kfree(tp->symbol); | 449 | kfree(tp->symbol); |
449 | kfree(tp); | 450 | kfree(tp); |
@@ -456,7 +457,7 @@ static struct trace_probe *find_probe_event(const char *event, | |||
456 | 457 | ||
457 | list_for_each_entry(tp, &probe_list, list) | 458 | list_for_each_entry(tp, &probe_list, list) |
458 | if (strcmp(tp->call.name, event) == 0 && | 459 | if (strcmp(tp->call.name, event) == 0 && |
459 | strcmp(tp->call.system, group) == 0) | 460 | strcmp(tp->call.class->system, group) == 0) |
460 | return tp; | 461 | return tp; |
461 | return NULL; | 462 | return NULL; |
462 | } | 463 | } |
@@ -481,7 +482,7 @@ static int register_trace_probe(struct trace_probe *tp) | |||
481 | mutex_lock(&probe_lock); | 482 | mutex_lock(&probe_lock); |
482 | 483 | ||
483 | /* register as an event */ | 484 | /* register as an event */ |
484 | old_tp = find_probe_event(tp->call.name, tp->call.system); | 485 | old_tp = find_probe_event(tp->call.name, tp->call.class->system); |
485 | if (old_tp) { | 486 | if (old_tp) { |
486 | /* delete old event */ | 487 | /* delete old event */ |
487 | unregister_trace_probe(old_tp); | 488 | unregister_trace_probe(old_tp); |
@@ -904,7 +905,7 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
904 | int i; | 905 | int i; |
905 | 906 | ||
906 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); | 907 | seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); |
907 | seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); | 908 | seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name); |
908 | 909 | ||
909 | if (!tp->symbol) | 910 | if (!tp->symbol) |
910 | seq_printf(m, " 0x%p", tp->rp.kp.addr); | 911 | seq_printf(m, " 0x%p", tp->rp.kp.addr); |
@@ -1061,8 +1062,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
1061 | 1062 | ||
1062 | size = sizeof(*entry) + tp->size; | 1063 | size = sizeof(*entry) + tp->size; |
1063 | 1064 | ||
1064 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 1065 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1065 | irq_flags, pc); | 1066 | size, irq_flags, pc); |
1066 | if (!event) | 1067 | if (!event) |
1067 | return; | 1068 | return; |
1068 | 1069 | ||
@@ -1094,8 +1095,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1094 | 1095 | ||
1095 | size = sizeof(*entry) + tp->size; | 1096 | size = sizeof(*entry) + tp->size; |
1096 | 1097 | ||
1097 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 1098 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
1098 | irq_flags, pc); | 1099 | size, irq_flags, pc); |
1099 | if (!event) | 1100 | if (!event) |
1100 | return; | 1101 | return; |
1101 | 1102 | ||
@@ -1112,18 +1113,17 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1112 | 1113 | ||
1113 | /* Event entry printers */ | 1114 | /* Event entry printers */ |
1114 | enum print_line_t | 1115 | enum print_line_t |
1115 | print_kprobe_event(struct trace_iterator *iter, int flags) | 1116 | print_kprobe_event(struct trace_iterator *iter, int flags, |
1117 | struct trace_event *event) | ||
1116 | { | 1118 | { |
1117 | struct kprobe_trace_entry_head *field; | 1119 | struct kprobe_trace_entry_head *field; |
1118 | struct trace_seq *s = &iter->seq; | 1120 | struct trace_seq *s = &iter->seq; |
1119 | struct trace_event *event; | ||
1120 | struct trace_probe *tp; | 1121 | struct trace_probe *tp; |
1121 | u8 *data; | 1122 | u8 *data; |
1122 | int i; | 1123 | int i; |
1123 | 1124 | ||
1124 | field = (struct kprobe_trace_entry_head *)iter->ent; | 1125 | field = (struct kprobe_trace_entry_head *)iter->ent; |
1125 | event = ftrace_find_event(field->ent.type); | 1126 | tp = container_of(event, struct trace_probe, call.event); |
1126 | tp = container_of(event, struct trace_probe, event); | ||
1127 | 1127 | ||
1128 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1128 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) |
1129 | goto partial; | 1129 | goto partial; |
@@ -1149,18 +1149,17 @@ partial: | |||
1149 | } | 1149 | } |
1150 | 1150 | ||
1151 | enum print_line_t | 1151 | enum print_line_t |
1152 | print_kretprobe_event(struct trace_iterator *iter, int flags) | 1152 | print_kretprobe_event(struct trace_iterator *iter, int flags, |
1153 | struct trace_event *event) | ||
1153 | { | 1154 | { |
1154 | struct kretprobe_trace_entry_head *field; | 1155 | struct kretprobe_trace_entry_head *field; |
1155 | struct trace_seq *s = &iter->seq; | 1156 | struct trace_seq *s = &iter->seq; |
1156 | struct trace_event *event; | ||
1157 | struct trace_probe *tp; | 1157 | struct trace_probe *tp; |
1158 | u8 *data; | 1158 | u8 *data; |
1159 | int i; | 1159 | int i; |
1160 | 1160 | ||
1161 | field = (struct kretprobe_trace_entry_head *)iter->ent; | 1161 | field = (struct kretprobe_trace_entry_head *)iter->ent; |
1162 | event = ftrace_find_event(field->ent.type); | 1162 | tp = container_of(event, struct trace_probe, call.event); |
1163 | tp = container_of(event, struct trace_probe, event); | ||
1164 | 1163 | ||
1165 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) | 1164 | if (!trace_seq_printf(s, "%s: (", tp->call.name)) |
1166 | goto partial; | 1165 | goto partial; |
@@ -1217,8 +1216,6 @@ static void probe_event_disable(struct ftrace_event_call *call) | |||
1217 | 1216 | ||
1218 | static int probe_event_raw_init(struct ftrace_event_call *event_call) | 1217 | static int probe_event_raw_init(struct ftrace_event_call *event_call) |
1219 | { | 1218 | { |
1220 | INIT_LIST_HEAD(&event_call->fields); | ||
1221 | |||
1222 | return 0; | 1219 | return 0; |
1223 | } | 1220 | } |
1224 | 1221 | ||
@@ -1341,9 +1338,9 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1341 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1338 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
1342 | struct ftrace_event_call *call = &tp->call; | 1339 | struct ftrace_event_call *call = &tp->call; |
1343 | struct kprobe_trace_entry_head *entry; | 1340 | struct kprobe_trace_entry_head *entry; |
1341 | struct hlist_head *head; | ||
1344 | u8 *data; | 1342 | u8 *data; |
1345 | int size, __size, i; | 1343 | int size, __size, i; |
1346 | unsigned long irq_flags; | ||
1347 | int rctx; | 1344 | int rctx; |
1348 | 1345 | ||
1349 | __size = sizeof(*entry) + tp->size; | 1346 | __size = sizeof(*entry) + tp->size; |
@@ -1353,7 +1350,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1353 | "profile buffer not large enough")) | 1350 | "profile buffer not large enough")) |
1354 | return; | 1351 | return; |
1355 | 1352 | ||
1356 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); | 1353 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1357 | if (!entry) | 1354 | if (!entry) |
1358 | return; | 1355 | return; |
1359 | 1356 | ||
@@ -1362,7 +1359,8 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, | |||
1362 | for (i = 0; i < tp->nr_args; i++) | 1359 | for (i = 0; i < tp->nr_args; i++) |
1363 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | 1360 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
1364 | 1361 | ||
1365 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags, regs); | 1362 | head = per_cpu_ptr(call->perf_events, smp_processor_id()); |
1363 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head); | ||
1366 | } | 1364 | } |
1367 | 1365 | ||
1368 | /* Kretprobe profile handler */ | 1366 | /* Kretprobe profile handler */ |
@@ -1372,9 +1370,9 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1372 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1370 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
1373 | struct ftrace_event_call *call = &tp->call; | 1371 | struct ftrace_event_call *call = &tp->call; |
1374 | struct kretprobe_trace_entry_head *entry; | 1372 | struct kretprobe_trace_entry_head *entry; |
1373 | struct hlist_head *head; | ||
1375 | u8 *data; | 1374 | u8 *data; |
1376 | int size, __size, i; | 1375 | int size, __size, i; |
1377 | unsigned long irq_flags; | ||
1378 | int rctx; | 1376 | int rctx; |
1379 | 1377 | ||
1380 | __size = sizeof(*entry) + tp->size; | 1378 | __size = sizeof(*entry) + tp->size; |
@@ -1384,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1384 | "profile buffer not large enough")) | 1382 | "profile buffer not large enough")) |
1385 | return; | 1383 | return; |
1386 | 1384 | ||
1387 | entry = perf_trace_buf_prepare(size, call->id, &rctx, &irq_flags); | 1385 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
1388 | if (!entry) | 1386 | if (!entry) |
1389 | return; | 1387 | return; |
1390 | 1388 | ||
@@ -1394,8 +1392,8 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, | |||
1394 | for (i = 0; i < tp->nr_args; i++) | 1392 | for (i = 0; i < tp->nr_args; i++) |
1395 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); | 1393 | call_fetch(&tp->args[i].fetch, regs, data + tp->args[i].offset); |
1396 | 1394 | ||
1397 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, | 1395 | head = per_cpu_ptr(call->perf_events, smp_processor_id()); |
1398 | irq_flags, regs); | 1396 | perf_trace_buf_submit(entry, size, rctx, entry->ret_ip, 1, regs, head); |
1399 | } | 1397 | } |
1400 | 1398 | ||
1401 | static int probe_perf_enable(struct ftrace_event_call *call) | 1399 | static int probe_perf_enable(struct ftrace_event_call *call) |
@@ -1425,6 +1423,26 @@ static void probe_perf_disable(struct ftrace_event_call *call) | |||
1425 | } | 1423 | } |
1426 | #endif /* CONFIG_PERF_EVENTS */ | 1424 | #endif /* CONFIG_PERF_EVENTS */ |
1427 | 1425 | ||
1426 | static __kprobes | ||
1427 | int kprobe_register(struct ftrace_event_call *event, enum trace_reg type) | ||
1428 | { | ||
1429 | switch (type) { | ||
1430 | case TRACE_REG_REGISTER: | ||
1431 | return probe_event_enable(event); | ||
1432 | case TRACE_REG_UNREGISTER: | ||
1433 | probe_event_disable(event); | ||
1434 | return 0; | ||
1435 | |||
1436 | #ifdef CONFIG_PERF_EVENTS | ||
1437 | case TRACE_REG_PERF_REGISTER: | ||
1438 | return probe_perf_enable(event); | ||
1439 | case TRACE_REG_PERF_UNREGISTER: | ||
1440 | probe_perf_disable(event); | ||
1441 | return 0; | ||
1442 | #endif | ||
1443 | } | ||
1444 | return 0; | ||
1445 | } | ||
1428 | 1446 | ||
1429 | static __kprobes | 1447 | static __kprobes |
1430 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) | 1448 | int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) |
@@ -1454,6 +1472,14 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) | |||
1454 | return 0; /* We don't tweek kernel, so just return 0 */ | 1472 | return 0; /* We don't tweek kernel, so just return 0 */ |
1455 | } | 1473 | } |
1456 | 1474 | ||
1475 | static struct trace_event_functions kretprobe_funcs = { | ||
1476 | .trace = print_kretprobe_event | ||
1477 | }; | ||
1478 | |||
1479 | static struct trace_event_functions kprobe_funcs = { | ||
1480 | .trace = print_kprobe_event | ||
1481 | }; | ||
1482 | |||
1457 | static int register_probe_event(struct trace_probe *tp) | 1483 | static int register_probe_event(struct trace_probe *tp) |
1458 | { | 1484 | { |
1459 | struct ftrace_event_call *call = &tp->call; | 1485 | struct ftrace_event_call *call = &tp->call; |
@@ -1461,36 +1487,31 @@ static int register_probe_event(struct trace_probe *tp) | |||
1461 | 1487 | ||
1462 | /* Initialize ftrace_event_call */ | 1488 | /* Initialize ftrace_event_call */ |
1463 | if (probe_is_return(tp)) { | 1489 | if (probe_is_return(tp)) { |
1464 | tp->event.trace = print_kretprobe_event; | 1490 | INIT_LIST_HEAD(&call->class->fields); |
1465 | call->raw_init = probe_event_raw_init; | 1491 | call->event.funcs = &kretprobe_funcs; |
1466 | call->define_fields = kretprobe_event_define_fields; | 1492 | call->class->raw_init = probe_event_raw_init; |
1493 | call->class->define_fields = kretprobe_event_define_fields; | ||
1467 | } else { | 1494 | } else { |
1468 | tp->event.trace = print_kprobe_event; | 1495 | INIT_LIST_HEAD(&call->class->fields); |
1469 | call->raw_init = probe_event_raw_init; | 1496 | call->event.funcs = &kprobe_funcs; |
1470 | call->define_fields = kprobe_event_define_fields; | 1497 | call->class->raw_init = probe_event_raw_init; |
1498 | call->class->define_fields = kprobe_event_define_fields; | ||
1471 | } | 1499 | } |
1472 | if (set_print_fmt(tp) < 0) | 1500 | if (set_print_fmt(tp) < 0) |
1473 | return -ENOMEM; | 1501 | return -ENOMEM; |
1474 | call->event = &tp->event; | 1502 | ret = register_ftrace_event(&call->event); |
1475 | call->id = register_ftrace_event(&tp->event); | 1503 | if (!ret) { |
1476 | if (!call->id) { | ||
1477 | kfree(call->print_fmt); | 1504 | kfree(call->print_fmt); |
1478 | return -ENODEV; | 1505 | return -ENODEV; |
1479 | } | 1506 | } |
1480 | call->enabled = 0; | 1507 | call->flags = 0; |
1481 | call->regfunc = probe_event_enable; | 1508 | call->class->reg = kprobe_register; |
1482 | call->unregfunc = probe_event_disable; | ||
1483 | |||
1484 | #ifdef CONFIG_PERF_EVENTS | ||
1485 | call->perf_event_enable = probe_perf_enable; | ||
1486 | call->perf_event_disable = probe_perf_disable; | ||
1487 | #endif | ||
1488 | call->data = tp; | 1509 | call->data = tp; |
1489 | ret = trace_add_event_call(call); | 1510 | ret = trace_add_event_call(call); |
1490 | if (ret) { | 1511 | if (ret) { |
1491 | pr_info("Failed to register kprobe event: %s\n", call->name); | 1512 | pr_info("Failed to register kprobe event: %s\n", call->name); |
1492 | kfree(call->print_fmt); | 1513 | kfree(call->print_fmt); |
1493 | unregister_ftrace_event(&tp->event); | 1514 | unregister_ftrace_event(&call->event); |
1494 | } | 1515 | } |
1495 | return ret; | 1516 | return ret; |
1496 | } | 1517 | } |