diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 10:41:01 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-04-30 10:41:01 -0400 |
| commit | e0972916e8fe943f342b0dd1c9d43dbf5bc261c2 (patch) | |
| tree | 690c436f1f9b839c4ba34d17ab3efa63b97a2dce /kernel/trace | |
| parent | 1f889ec62c3f0d8913f3c32f9aff2a1e15099346 (diff) | |
| parent | 5ac2b5c2721501a8f5c5e1cd4116cbc31ace6886 (diff) | |
Merge branch 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Ingo Molnar:
"Features:
- Add "uretprobes" - an optimization to uprobes, like kretprobes are
an optimization to kprobes. "perf probe -x file sym%return" now
works like kretprobes. By Oleg Nesterov.
- Introduce per core aggregation in 'perf stat', from Stephane
Eranian.
- Add memory profiling via PEBS, from Stephane Eranian.
- Event group view for 'annotate' in --stdio, --tui and --gtk, from
Namhyung Kim.
- Add support for AMD NB and L2I "uncore" counters, by Jacob Shin.
- Add Ivy Bridge-EP uncore support, by Zheng Yan
- IBM zEnterprise EC12 oprofile support patchlet from Robert Richter.
- Add perf test entries for checking breakpoint overflow signal
handler issues, from Jiri Olsa.
- Add perf test entry for for checking number of EXIT events, from
Namhyung Kim.
- Add perf test entries for checking --cpu in record and stat, from
Jiri Olsa.
- Introduce perf stat --repeat forever, from Frederik Deweerdt.
- Add --no-demangle to report/top, from Namhyung Kim.
- PowerPC fixes plus a couple of cleanups/optimizations in uprobes
and trace_uprobes, by Oleg Nesterov.
Various fixes and refactorings:
- Fix dependency of the python binding wrt libtraceevent, from
Naohiro Aota.
- Simplify some perf_evlist methods and to allow 'stat' to share code
with 'record' and 'trace', by Arnaldo Carvalho de Melo.
- Remove dead code in related to libtraceevent integration, from
Namhyung Kim.
- Revert "perf sched: Handle PERF_RECORD_EXIT events" to get 'perf
sched lat' back working, by Arnaldo Carvalho de Melo
- We don't use Newt anymore, just plain libslang, by Arnaldo Carvalho
de Melo.
- Kill a bunch of die() calls, from Namhyung Kim.
- Fix build on non-glibc systems due to libio.h absence, from Cody P
Schafer.
- Remove some perf_session and tracing dead code, from David Ahern.
- Honor parallel jobs, fix from Borislav Petkov
- Introduce tools/lib/lk library, initially just removing duplication
among tools/perf and tools/vm. from Borislav Petkov
... and many more I missed to list, see the shortlog and git log for
more details."
* 'perf-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (136 commits)
perf/x86/intel/P4: Robistify P4 PMU types
perf/x86/amd: Fix AMD NB and L2I "uncore" support
perf/x86/amd: Remove old-style NB counter support from perf_event_amd.c
perf/x86: Check all MSRs before passing hw check
perf/x86/amd: Add support for AMD NB and L2I "uncore" counters
perf/x86/intel: Add Ivy Bridge-EP uncore support
perf/x86/intel: Fix SNB-EP CBO and PCU uncore PMU filter management
perf/x86: Avoid kfree() in CPU_{STARTING,DYING}
uprobes/perf: Avoid perf_trace_buf_prepare/submit if ->perf_events is empty
uprobes/tracing: Don't pass addr=ip to perf_trace_buf_submit()
uprobes/tracing: Change create_trace_uprobe() to support uretprobes
uprobes/tracing: Make seq_printf() code uretprobe-friendly
uprobes/tracing: Make register_uprobe_event() paths uretprobe-friendly
uprobes/tracing: Make uprobe_{trace,perf}_print() uretprobe-friendly
uprobes/tracing: Introduce is_ret_probe() and uretprobe_dispatcher()
uprobes/tracing: Introduce uprobe_{trace,perf}_print() helpers
uprobes/tracing: Generalize struct uprobe_trace_entry_head
uprobes/tracing: Kill the pointless local_save_flags/preempt_count calls
uprobes/tracing: Kill the pointless seq_print_ip_sym() call
uprobes/tracing: Kill the pointless task_pt_regs() calls
...
Diffstat (limited to 'kernel/trace')
| -rw-r--r-- | kernel/trace/trace.h | 5 | ||||
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 203 |
2 files changed, 146 insertions, 62 deletions
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 9e014582e763..711ca7d3e7f1 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -109,11 +109,6 @@ struct kretprobe_trace_entry_head { | |||
| 109 | unsigned long ret_ip; | 109 | unsigned long ret_ip; |
| 110 | }; | 110 | }; |
| 111 | 111 | ||
| 112 | struct uprobe_trace_entry_head { | ||
| 113 | struct trace_entry ent; | ||
| 114 | unsigned long ip; | ||
| 115 | }; | ||
| 116 | |||
| 117 | /* | 112 | /* |
| 118 | * trace_flag_type is an enumeration that holds different | 113 | * trace_flag_type is an enumeration that holds different |
| 119 | * states when a trace occurs. These are: | 114 | * states when a trace occurs. These are: |
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 8dad2a92dee9..32494fb0ee64 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -28,6 +28,18 @@ | |||
| 28 | 28 | ||
| 29 | #define UPROBE_EVENT_SYSTEM "uprobes" | 29 | #define UPROBE_EVENT_SYSTEM "uprobes" |
| 30 | 30 | ||
| 31 | struct uprobe_trace_entry_head { | ||
| 32 | struct trace_entry ent; | ||
| 33 | unsigned long vaddr[]; | ||
| 34 | }; | ||
| 35 | |||
| 36 | #define SIZEOF_TRACE_ENTRY(is_return) \ | ||
| 37 | (sizeof(struct uprobe_trace_entry_head) + \ | ||
| 38 | sizeof(unsigned long) * (is_return ? 2 : 1)) | ||
| 39 | |||
| 40 | #define DATAOF_TRACE_ENTRY(entry, is_return) \ | ||
| 41 | ((void*)(entry) + SIZEOF_TRACE_ENTRY(is_return)) | ||
| 42 | |||
| 31 | struct trace_uprobe_filter { | 43 | struct trace_uprobe_filter { |
| 32 | rwlock_t rwlock; | 44 | rwlock_t rwlock; |
| 33 | int nr_systemwide; | 45 | int nr_systemwide; |
| @@ -64,6 +76,8 @@ static DEFINE_MUTEX(uprobe_lock); | |||
| 64 | static LIST_HEAD(uprobe_list); | 76 | static LIST_HEAD(uprobe_list); |
| 65 | 77 | ||
| 66 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); | 78 | static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs); |
| 79 | static int uretprobe_dispatcher(struct uprobe_consumer *con, | ||
| 80 | unsigned long func, struct pt_regs *regs); | ||
| 67 | 81 | ||
| 68 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) | 82 | static inline void init_trace_uprobe_filter(struct trace_uprobe_filter *filter) |
| 69 | { | 83 | { |
| @@ -77,11 +91,16 @@ static inline bool uprobe_filter_is_empty(struct trace_uprobe_filter *filter) | |||
| 77 | return !filter->nr_systemwide && list_empty(&filter->perf_events); | 91 | return !filter->nr_systemwide && list_empty(&filter->perf_events); |
| 78 | } | 92 | } |
| 79 | 93 | ||
| 94 | static inline bool is_ret_probe(struct trace_uprobe *tu) | ||
| 95 | { | ||
| 96 | return tu->consumer.ret_handler != NULL; | ||
| 97 | } | ||
| 98 | |||
| 80 | /* | 99 | /* |
| 81 | * Allocate new trace_uprobe and initialize it (including uprobes). | 100 | * Allocate new trace_uprobe and initialize it (including uprobes). |
| 82 | */ | 101 | */ |
| 83 | static struct trace_uprobe * | 102 | static struct trace_uprobe * |
| 84 | alloc_trace_uprobe(const char *group, const char *event, int nargs) | 103 | alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) |
| 85 | { | 104 | { |
| 86 | struct trace_uprobe *tu; | 105 | struct trace_uprobe *tu; |
| 87 | 106 | ||
| @@ -106,6 +125,8 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs) | |||
| 106 | 125 | ||
| 107 | INIT_LIST_HEAD(&tu->list); | 126 | INIT_LIST_HEAD(&tu->list); |
| 108 | tu->consumer.handler = uprobe_dispatcher; | 127 | tu->consumer.handler = uprobe_dispatcher; |
| 128 | if (is_ret) | ||
| 129 | tu->consumer.ret_handler = uretprobe_dispatcher; | ||
| 109 | init_trace_uprobe_filter(&tu->filter); | 130 | init_trace_uprobe_filter(&tu->filter); |
| 110 | return tu; | 131 | return tu; |
| 111 | 132 | ||
| @@ -180,7 +201,7 @@ end: | |||
| 180 | 201 | ||
| 181 | /* | 202 | /* |
| 182 | * Argument syntax: | 203 | * Argument syntax: |
| 183 | * - Add uprobe: p[:[GRP/]EVENT] PATH:SYMBOL[+offs] [FETCHARGS] | 204 | * - Add uprobe: p|r[:[GRP/]EVENT] PATH:SYMBOL [FETCHARGS] |
| 184 | * | 205 | * |
| 185 | * - Remove uprobe: -:[GRP/]EVENT | 206 | * - Remove uprobe: -:[GRP/]EVENT |
| 186 | */ | 207 | */ |
| @@ -192,20 +213,23 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 192 | char buf[MAX_EVENT_NAME_LEN]; | 213 | char buf[MAX_EVENT_NAME_LEN]; |
| 193 | struct path path; | 214 | struct path path; |
| 194 | unsigned long offset; | 215 | unsigned long offset; |
| 195 | bool is_delete; | 216 | bool is_delete, is_return; |
| 196 | int i, ret; | 217 | int i, ret; |
| 197 | 218 | ||
| 198 | inode = NULL; | 219 | inode = NULL; |
| 199 | ret = 0; | 220 | ret = 0; |
| 200 | is_delete = false; | 221 | is_delete = false; |
| 222 | is_return = false; | ||
| 201 | event = NULL; | 223 | event = NULL; |
| 202 | group = NULL; | 224 | group = NULL; |
| 203 | 225 | ||
| 204 | /* argc must be >= 1 */ | 226 | /* argc must be >= 1 */ |
| 205 | if (argv[0][0] == '-') | 227 | if (argv[0][0] == '-') |
| 206 | is_delete = true; | 228 | is_delete = true; |
| 229 | else if (argv[0][0] == 'r') | ||
| 230 | is_return = true; | ||
| 207 | else if (argv[0][0] != 'p') { | 231 | else if (argv[0][0] != 'p') { |
| 208 | pr_info("Probe definition must be started with 'p' or '-'.\n"); | 232 | pr_info("Probe definition must be started with 'p', 'r' or '-'.\n"); |
| 209 | return -EINVAL; | 233 | return -EINVAL; |
| 210 | } | 234 | } |
| 211 | 235 | ||
| @@ -303,7 +327,7 @@ static int create_trace_uprobe(int argc, char **argv) | |||
| 303 | kfree(tail); | 327 | kfree(tail); |
| 304 | } | 328 | } |
| 305 | 329 | ||
| 306 | tu = alloc_trace_uprobe(group, event, argc); | 330 | tu = alloc_trace_uprobe(group, event, argc, is_return); |
| 307 | if (IS_ERR(tu)) { | 331 | if (IS_ERR(tu)) { |
| 308 | pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); | 332 | pr_info("Failed to allocate trace_uprobe.(%d)\n", (int)PTR_ERR(tu)); |
| 309 | ret = PTR_ERR(tu); | 333 | ret = PTR_ERR(tu); |
| @@ -414,9 +438,10 @@ static void probes_seq_stop(struct seq_file *m, void *v) | |||
| 414 | static int probes_seq_show(struct seq_file *m, void *v) | 438 | static int probes_seq_show(struct seq_file *m, void *v) |
| 415 | { | 439 | { |
| 416 | struct trace_uprobe *tu = v; | 440 | struct trace_uprobe *tu = v; |
| 441 | char c = is_ret_probe(tu) ? 'r' : 'p'; | ||
| 417 | int i; | 442 | int i; |
| 418 | 443 | ||
| 419 | seq_printf(m, "p:%s/%s", tu->call.class->system, tu->call.name); | 444 | seq_printf(m, "%c:%s/%s", c, tu->call.class->system, tu->call.name); |
| 420 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 445 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 421 | 446 | ||
| 422 | for (i = 0; i < tu->nr_args; i++) | 447 | for (i = 0; i < tu->nr_args; i++) |
| @@ -485,65 +510,81 @@ static const struct file_operations uprobe_profile_ops = { | |||
| 485 | .release = seq_release, | 510 | .release = seq_release, |
| 486 | }; | 511 | }; |
| 487 | 512 | ||
| 488 | /* uprobe handler */ | 513 | static void uprobe_trace_print(struct trace_uprobe *tu, |
| 489 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 514 | unsigned long func, struct pt_regs *regs) |
| 490 | { | 515 | { |
| 491 | struct uprobe_trace_entry_head *entry; | 516 | struct uprobe_trace_entry_head *entry; |
| 492 | struct ring_buffer_event *event; | 517 | struct ring_buffer_event *event; |
| 493 | struct ring_buffer *buffer; | 518 | struct ring_buffer *buffer; |
| 494 | u8 *data; | 519 | void *data; |
| 495 | int size, i, pc; | 520 | int size, i; |
| 496 | unsigned long irq_flags; | ||
| 497 | struct ftrace_event_call *call = &tu->call; | 521 | struct ftrace_event_call *call = &tu->call; |
| 498 | 522 | ||
| 499 | local_save_flags(irq_flags); | 523 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 500 | pc = preempt_count(); | ||
| 501 | |||
| 502 | size = sizeof(*entry) + tu->size; | ||
| 503 | |||
| 504 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 524 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, |
| 505 | size, irq_flags, pc); | 525 | size + tu->size, 0, 0); |
| 506 | if (!event) | 526 | if (!event) |
| 507 | return 0; | 527 | return; |
| 508 | 528 | ||
| 509 | entry = ring_buffer_event_data(event); | 529 | entry = ring_buffer_event_data(event); |
| 510 | entry->ip = instruction_pointer(task_pt_regs(current)); | 530 | if (is_ret_probe(tu)) { |
| 511 | data = (u8 *)&entry[1]; | 531 | entry->vaddr[0] = func; |
| 532 | entry->vaddr[1] = instruction_pointer(regs); | ||
| 533 | data = DATAOF_TRACE_ENTRY(entry, true); | ||
| 534 | } else { | ||
| 535 | entry->vaddr[0] = instruction_pointer(regs); | ||
| 536 | data = DATAOF_TRACE_ENTRY(entry, false); | ||
| 537 | } | ||
| 538 | |||
| 512 | for (i = 0; i < tu->nr_args; i++) | 539 | for (i = 0; i < tu->nr_args; i++) |
| 513 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 540 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
| 514 | 541 | ||
| 515 | if (!filter_current_check_discard(buffer, call, entry, event)) | 542 | if (!filter_current_check_discard(buffer, call, entry, event)) |
| 516 | trace_buffer_unlock_commit(buffer, event, irq_flags, pc); | 543 | trace_buffer_unlock_commit(buffer, event, 0, 0); |
| 544 | } | ||
| 517 | 545 | ||
| 546 | /* uprobe handler */ | ||
| 547 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | ||
| 548 | { | ||
| 549 | if (!is_ret_probe(tu)) | ||
| 550 | uprobe_trace_print(tu, 0, regs); | ||
| 518 | return 0; | 551 | return 0; |
| 519 | } | 552 | } |
| 520 | 553 | ||
| 554 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | ||
| 555 | struct pt_regs *regs) | ||
| 556 | { | ||
| 557 | uprobe_trace_print(tu, func, regs); | ||
| 558 | } | ||
| 559 | |||
| 521 | /* Event entry printers */ | 560 | /* Event entry printers */ |
| 522 | static enum print_line_t | 561 | static enum print_line_t |
| 523 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) | 562 | print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *event) |
| 524 | { | 563 | { |
| 525 | struct uprobe_trace_entry_head *field; | 564 | struct uprobe_trace_entry_head *entry; |
| 526 | struct trace_seq *s = &iter->seq; | 565 | struct trace_seq *s = &iter->seq; |
| 527 | struct trace_uprobe *tu; | 566 | struct trace_uprobe *tu; |
| 528 | u8 *data; | 567 | u8 *data; |
| 529 | int i; | 568 | int i; |
| 530 | 569 | ||
| 531 | field = (struct uprobe_trace_entry_head *)iter->ent; | 570 | entry = (struct uprobe_trace_entry_head *)iter->ent; |
| 532 | tu = container_of(event, struct trace_uprobe, call.event); | 571 | tu = container_of(event, struct trace_uprobe, call.event); |
| 533 | 572 | ||
| 534 | if (!trace_seq_printf(s, "%s: (", tu->call.name)) | 573 | if (is_ret_probe(tu)) { |
| 535 | goto partial; | 574 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->call.name, |
| 536 | 575 | entry->vaddr[1], entry->vaddr[0])) | |
| 537 | if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) | 576 | goto partial; |
| 538 | goto partial; | 577 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 539 | 578 | } else { | |
| 540 | if (!trace_seq_puts(s, ")")) | 579 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->call.name, |
| 541 | goto partial; | 580 | entry->vaddr[0])) |
| 581 | goto partial; | ||
| 582 | data = DATAOF_TRACE_ENTRY(entry, false); | ||
| 583 | } | ||
| 542 | 584 | ||
| 543 | data = (u8 *)&field[1]; | ||
| 544 | for (i = 0; i < tu->nr_args; i++) { | 585 | for (i = 0; i < tu->nr_args; i++) { |
| 545 | if (!tu->args[i].type->print(s, tu->args[i].name, | 586 | if (!tu->args[i].type->print(s, tu->args[i].name, |
| 546 | data + tu->args[i].offset, field)) | 587 | data + tu->args[i].offset, entry)) |
| 547 | goto partial; | 588 | goto partial; |
| 548 | } | 589 | } |
| 549 | 590 | ||
| @@ -595,16 +636,23 @@ static void probe_event_disable(struct trace_uprobe *tu, int flag) | |||
| 595 | 636 | ||
| 596 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) | 637 | static int uprobe_event_define_fields(struct ftrace_event_call *event_call) |
| 597 | { | 638 | { |
| 598 | int ret, i; | 639 | int ret, i, size; |
| 599 | struct uprobe_trace_entry_head field; | 640 | struct uprobe_trace_entry_head field; |
| 600 | struct trace_uprobe *tu = (struct trace_uprobe *)event_call->data; | 641 | struct trace_uprobe *tu = event_call->data; |
| 601 | 642 | ||
| 602 | DEFINE_FIELD(unsigned long, ip, FIELD_STRING_IP, 0); | 643 | if (is_ret_probe(tu)) { |
| 644 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_FUNC, 0); | ||
| 645 | DEFINE_FIELD(unsigned long, vaddr[1], FIELD_STRING_RETIP, 0); | ||
| 646 | size = SIZEOF_TRACE_ENTRY(true); | ||
| 647 | } else { | ||
| 648 | DEFINE_FIELD(unsigned long, vaddr[0], FIELD_STRING_IP, 0); | ||
| 649 | size = SIZEOF_TRACE_ENTRY(false); | ||
| 650 | } | ||
| 603 | /* Set argument names as fields */ | 651 | /* Set argument names as fields */ |
| 604 | for (i = 0; i < tu->nr_args; i++) { | 652 | for (i = 0; i < tu->nr_args; i++) { |
| 605 | ret = trace_define_field(event_call, tu->args[i].type->fmttype, | 653 | ret = trace_define_field(event_call, tu->args[i].type->fmttype, |
| 606 | tu->args[i].name, | 654 | tu->args[i].name, |
| 607 | sizeof(field) + tu->args[i].offset, | 655 | size + tu->args[i].offset, |
| 608 | tu->args[i].type->size, | 656 | tu->args[i].type->size, |
| 609 | tu->args[i].type->is_signed, | 657 | tu->args[i].type->is_signed, |
| 610 | FILTER_OTHER); | 658 | FILTER_OTHER); |
| @@ -622,8 +670,13 @@ static int __set_print_fmt(struct trace_uprobe *tu, char *buf, int len) | |||
| 622 | int i; | 670 | int i; |
| 623 | int pos = 0; | 671 | int pos = 0; |
| 624 | 672 | ||
| 625 | fmt = "(%lx)"; | 673 | if (is_ret_probe(tu)) { |
| 626 | arg = "REC->" FIELD_STRING_IP; | 674 | fmt = "(%lx <- %lx)"; |
| 675 | arg = "REC->" FIELD_STRING_FUNC ", REC->" FIELD_STRING_RETIP; | ||
| 676 | } else { | ||
| 677 | fmt = "(%lx)"; | ||
| 678 | arg = "REC->" FIELD_STRING_IP; | ||
| 679 | } | ||
| 627 | 680 | ||
| 628 | /* When len=0, we just calculate the needed length */ | 681 | /* When len=0, we just calculate the needed length */ |
| 629 | 682 | ||
| @@ -752,49 +805,68 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 752 | return ret; | 805 | return ret; |
| 753 | } | 806 | } |
| 754 | 807 | ||
| 755 | /* uprobe profile handler */ | 808 | static void uprobe_perf_print(struct trace_uprobe *tu, |
| 756 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 809 | unsigned long func, struct pt_regs *regs) |
| 757 | { | 810 | { |
| 758 | struct ftrace_event_call *call = &tu->call; | 811 | struct ftrace_event_call *call = &tu->call; |
| 759 | struct uprobe_trace_entry_head *entry; | 812 | struct uprobe_trace_entry_head *entry; |
| 760 | struct hlist_head *head; | 813 | struct hlist_head *head; |
| 761 | u8 *data; | 814 | void *data; |
| 762 | int size, __size, i; | 815 | int size, rctx, i; |
| 763 | int rctx; | ||
| 764 | 816 | ||
| 765 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 817 | size = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 766 | return UPROBE_HANDLER_REMOVE; | 818 | size = ALIGN(size + tu->size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 767 | |||
| 768 | __size = sizeof(*entry) + tu->size; | ||
| 769 | size = ALIGN(__size + sizeof(u32), sizeof(u64)); | ||
| 770 | size -= sizeof(u32); | ||
| 771 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 819 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 772 | return 0; | 820 | return; |
| 773 | 821 | ||
| 774 | preempt_disable(); | 822 | preempt_disable(); |
| 823 | head = this_cpu_ptr(call->perf_events); | ||
| 824 | if (hlist_empty(head)) | ||
| 825 | goto out; | ||
| 775 | 826 | ||
| 776 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); | 827 | entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx); |
| 777 | if (!entry) | 828 | if (!entry) |
| 778 | goto out; | 829 | goto out; |
| 779 | 830 | ||
| 780 | entry->ip = instruction_pointer(task_pt_regs(current)); | 831 | if (is_ret_probe(tu)) { |
| 781 | data = (u8 *)&entry[1]; | 832 | entry->vaddr[0] = func; |
| 833 | entry->vaddr[1] = instruction_pointer(regs); | ||
| 834 | data = DATAOF_TRACE_ENTRY(entry, true); | ||
| 835 | } else { | ||
| 836 | entry->vaddr[0] = instruction_pointer(regs); | ||
| 837 | data = DATAOF_TRACE_ENTRY(entry, false); | ||
| 838 | } | ||
| 839 | |||
| 782 | for (i = 0; i < tu->nr_args; i++) | 840 | for (i = 0; i < tu->nr_args; i++) |
| 783 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); | 841 | call_fetch(&tu->args[i].fetch, regs, data + tu->args[i].offset); |
| 784 | 842 | ||
| 785 | head = this_cpu_ptr(call->perf_events); | 843 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 786 | perf_trace_buf_submit(entry, size, rctx, entry->ip, 1, regs, head, NULL); | ||
| 787 | |||
| 788 | out: | 844 | out: |
| 789 | preempt_enable(); | 845 | preempt_enable(); |
| 846 | } | ||
| 847 | |||
| 848 | /* uprobe profile handler */ | ||
| 849 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | ||
| 850 | { | ||
| 851 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
| 852 | return UPROBE_HANDLER_REMOVE; | ||
| 853 | |||
| 854 | if (!is_ret_probe(tu)) | ||
| 855 | uprobe_perf_print(tu, 0, regs); | ||
| 790 | return 0; | 856 | return 0; |
| 791 | } | 857 | } |
| 858 | |||
| 859 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | ||
| 860 | struct pt_regs *regs) | ||
| 861 | { | ||
| 862 | uprobe_perf_print(tu, func, regs); | ||
| 863 | } | ||
| 792 | #endif /* CONFIG_PERF_EVENTS */ | 864 | #endif /* CONFIG_PERF_EVENTS */ |
| 793 | 865 | ||
| 794 | static | 866 | static |
| 795 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 867 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) |
| 796 | { | 868 | { |
| 797 | struct trace_uprobe *tu = (struct trace_uprobe *)event->data; | 869 | struct trace_uprobe *tu = event->data; |
| 798 | 870 | ||
| 799 | switch (type) { | 871 | switch (type) { |
| 800 | case TRACE_REG_REGISTER: | 872 | case TRACE_REG_REGISTER: |
| @@ -843,6 +915,23 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 843 | return ret; | 915 | return ret; |
| 844 | } | 916 | } |
| 845 | 917 | ||
| 918 | static int uretprobe_dispatcher(struct uprobe_consumer *con, | ||
| 919 | unsigned long func, struct pt_regs *regs) | ||
| 920 | { | ||
| 921 | struct trace_uprobe *tu; | ||
| 922 | |||
| 923 | tu = container_of(con, struct trace_uprobe, consumer); | ||
| 924 | |||
| 925 | if (tu->flags & TP_FLAG_TRACE) | ||
| 926 | uretprobe_trace_func(tu, func, regs); | ||
| 927 | |||
| 928 | #ifdef CONFIG_PERF_EVENTS | ||
| 929 | if (tu->flags & TP_FLAG_PROFILE) | ||
| 930 | uretprobe_perf_func(tu, func, regs); | ||
| 931 | #endif | ||
| 932 | return 0; | ||
| 933 | } | ||
| 934 | |||
| 846 | static struct trace_event_functions uprobe_funcs = { | 935 | static struct trace_event_functions uprobe_funcs = { |
| 847 | .trace = print_uprobe_event | 936 | .trace = print_uprobe_event |
| 848 | }; | 937 | }; |
