diff options
Diffstat (limited to 'kernel/trace/trace_uprobe.c')
| -rw-r--r-- | kernel/trace/trace_uprobe.c | 211 |
1 files changed, 147 insertions, 64 deletions
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 79e52d93860b..930e51462dc8 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c | |||
| @@ -260,6 +260,7 @@ alloc_trace_uprobe(const char *group, const char *event, int nargs, bool is_ret) | |||
| 260 | goto error; | 260 | goto error; |
| 261 | 261 | ||
| 262 | INIT_LIST_HEAD(&tu->list); | 262 | INIT_LIST_HEAD(&tu->list); |
| 263 | INIT_LIST_HEAD(&tu->tp.files); | ||
| 263 | tu->consumer.handler = uprobe_dispatcher; | 264 | tu->consumer.handler = uprobe_dispatcher; |
| 264 | if (is_ret) | 265 | if (is_ret) |
| 265 | tu->consumer.ret_handler = uretprobe_dispatcher; | 266 | tu->consumer.ret_handler = uretprobe_dispatcher; |
| @@ -293,7 +294,7 @@ static struct trace_uprobe *find_probe_event(const char *event, const char *grou | |||
| 293 | struct trace_uprobe *tu; | 294 | struct trace_uprobe *tu; |
| 294 | 295 | ||
| 295 | list_for_each_entry(tu, &uprobe_list, list) | 296 | list_for_each_entry(tu, &uprobe_list, list) |
| 296 | if (strcmp(tu->tp.call.name, event) == 0 && | 297 | if (strcmp(ftrace_event_name(&tu->tp.call), event) == 0 && |
| 297 | strcmp(tu->tp.call.class->system, group) == 0) | 298 | strcmp(tu->tp.call.class->system, group) == 0) |
| 298 | return tu; | 299 | return tu; |
| 299 | 300 | ||
| @@ -323,7 +324,8 @@ static int register_trace_uprobe(struct trace_uprobe *tu) | |||
| 323 | mutex_lock(&uprobe_lock); | 324 | mutex_lock(&uprobe_lock); |
| 324 | 325 | ||
| 325 | /* register as an event */ | 326 | /* register as an event */ |
| 326 | old_tu = find_probe_event(tu->tp.call.name, tu->tp.call.class->system); | 327 | old_tu = find_probe_event(ftrace_event_name(&tu->tp.call), |
| 328 | tu->tp.call.class->system); | ||
| 327 | if (old_tu) { | 329 | if (old_tu) { |
| 328 | /* delete old event */ | 330 | /* delete old event */ |
| 329 | ret = unregister_trace_uprobe(old_tu); | 331 | ret = unregister_trace_uprobe(old_tu); |
| @@ -598,7 +600,8 @@ static int probes_seq_show(struct seq_file *m, void *v) | |||
| 598 | char c = is_ret_probe(tu) ? 'r' : 'p'; | 600 | char c = is_ret_probe(tu) ? 'r' : 'p'; |
| 599 | int i; | 601 | int i; |
| 600 | 602 | ||
| 601 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, tu->tp.call.name); | 603 | seq_printf(m, "%c:%s/%s", c, tu->tp.call.class->system, |
| 604 | ftrace_event_name(&tu->tp.call)); | ||
| 602 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); | 605 | seq_printf(m, " %s:0x%p", tu->filename, (void *)tu->offset); |
| 603 | 606 | ||
| 604 | for (i = 0; i < tu->tp.nr_args; i++) | 607 | for (i = 0; i < tu->tp.nr_args; i++) |
| @@ -648,7 +651,8 @@ static int probes_profile_seq_show(struct seq_file *m, void *v) | |||
| 648 | { | 651 | { |
| 649 | struct trace_uprobe *tu = v; | 652 | struct trace_uprobe *tu = v; |
| 650 | 653 | ||
| 651 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, tu->tp.call.name, tu->nhit); | 654 | seq_printf(m, " %s %-44s %15lu\n", tu->filename, |
| 655 | ftrace_event_name(&tu->tp.call), tu->nhit); | ||
| 652 | return 0; | 656 | return 0; |
| 653 | } | 657 | } |
| 654 | 658 | ||
| @@ -758,31 +762,32 @@ static void uprobe_buffer_put(struct uprobe_cpu_buffer *ucb) | |||
| 758 | mutex_unlock(&ucb->mutex); | 762 | mutex_unlock(&ucb->mutex); |
| 759 | } | 763 | } |
| 760 | 764 | ||
| 761 | static void uprobe_trace_print(struct trace_uprobe *tu, | 765 | static void __uprobe_trace_func(struct trace_uprobe *tu, |
| 762 | unsigned long func, struct pt_regs *regs) | 766 | unsigned long func, struct pt_regs *regs, |
| 767 | struct uprobe_cpu_buffer *ucb, int dsize, | ||
| 768 | struct ftrace_event_file *ftrace_file) | ||
| 763 | { | 769 | { |
| 764 | struct uprobe_trace_entry_head *entry; | 770 | struct uprobe_trace_entry_head *entry; |
| 765 | struct ring_buffer_event *event; | 771 | struct ring_buffer_event *event; |
| 766 | struct ring_buffer *buffer; | 772 | struct ring_buffer *buffer; |
| 767 | struct uprobe_cpu_buffer *ucb; | ||
| 768 | void *data; | 773 | void *data; |
| 769 | int size, dsize, esize; | 774 | int size, esize; |
| 770 | struct ftrace_event_call *call = &tu->tp.call; | 775 | struct ftrace_event_call *call = &tu->tp.call; |
| 771 | 776 | ||
| 772 | dsize = __get_data_size(&tu->tp, regs); | 777 | WARN_ON(call != ftrace_file->event_call); |
| 773 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 774 | 778 | ||
| 775 | if (WARN_ON_ONCE(!uprobe_cpu_buffer || tu->tp.size + dsize > PAGE_SIZE)) | 779 | if (WARN_ON_ONCE(tu->tp.size + dsize > PAGE_SIZE)) |
| 776 | return; | 780 | return; |
| 777 | 781 | ||
| 778 | ucb = uprobe_buffer_get(); | 782 | if (ftrace_trigger_soft_disabled(ftrace_file)) |
| 779 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | 783 | return; |
| 780 | 784 | ||
| 785 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 781 | size = esize + tu->tp.size + dsize; | 786 | size = esize + tu->tp.size + dsize; |
| 782 | event = trace_current_buffer_lock_reserve(&buffer, call->event.type, | 787 | event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, |
| 783 | size, 0, 0); | 788 | call->event.type, size, 0, 0); |
| 784 | if (!event) | 789 | if (!event) |
| 785 | goto out; | 790 | return; |
| 786 | 791 | ||
| 787 | entry = ring_buffer_event_data(event); | 792 | entry = ring_buffer_event_data(event); |
| 788 | if (is_ret_probe(tu)) { | 793 | if (is_ret_probe(tu)) { |
| @@ -796,25 +801,36 @@ static void uprobe_trace_print(struct trace_uprobe *tu, | |||
| 796 | 801 | ||
| 797 | memcpy(data, ucb->buf, tu->tp.size + dsize); | 802 | memcpy(data, ucb->buf, tu->tp.size + dsize); |
| 798 | 803 | ||
| 799 | if (!call_filter_check_discard(call, entry, buffer, event)) | 804 | event_trigger_unlock_commit(ftrace_file, buffer, event, entry, 0, 0); |
| 800 | trace_buffer_unlock_commit(buffer, event, 0, 0); | ||
| 801 | |||
| 802 | out: | ||
| 803 | uprobe_buffer_put(ucb); | ||
| 804 | } | 805 | } |
| 805 | 806 | ||
| 806 | /* uprobe handler */ | 807 | /* uprobe handler */ |
| 807 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs) | 808 | static int uprobe_trace_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 809 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 808 | { | 810 | { |
| 809 | if (!is_ret_probe(tu)) | 811 | struct event_file_link *link; |
| 810 | uprobe_trace_print(tu, 0, regs); | 812 | |
| 813 | if (is_ret_probe(tu)) | ||
| 814 | return 0; | ||
| 815 | |||
| 816 | rcu_read_lock(); | ||
| 817 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 818 | __uprobe_trace_func(tu, 0, regs, ucb, dsize, link->file); | ||
| 819 | rcu_read_unlock(); | ||
| 820 | |||
| 811 | return 0; | 821 | return 0; |
| 812 | } | 822 | } |
| 813 | 823 | ||
| 814 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, | 824 | static void uretprobe_trace_func(struct trace_uprobe *tu, unsigned long func, |
| 815 | struct pt_regs *regs) | 825 | struct pt_regs *regs, |
| 826 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 816 | { | 827 | { |
| 817 | uprobe_trace_print(tu, func, regs); | 828 | struct event_file_link *link; |
| 829 | |||
| 830 | rcu_read_lock(); | ||
| 831 | list_for_each_entry_rcu(link, &tu->tp.files, list) | ||
| 832 | __uprobe_trace_func(tu, func, regs, ucb, dsize, link->file); | ||
| 833 | rcu_read_unlock(); | ||
| 818 | } | 834 | } |
| 819 | 835 | ||
| 820 | /* Event entry printers */ | 836 | /* Event entry printers */ |
| @@ -831,12 +847,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e | |||
| 831 | tu = container_of(event, struct trace_uprobe, tp.call.event); | 847 | tu = container_of(event, struct trace_uprobe, tp.call.event); |
| 832 | 848 | ||
| 833 | if (is_ret_probe(tu)) { | 849 | if (is_ret_probe(tu)) { |
| 834 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", tu->tp.call.name, | 850 | if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", |
| 851 | ftrace_event_name(&tu->tp.call), | ||
| 835 | entry->vaddr[1], entry->vaddr[0])) | 852 | entry->vaddr[1], entry->vaddr[0])) |
| 836 | goto partial; | 853 | goto partial; |
| 837 | data = DATAOF_TRACE_ENTRY(entry, true); | 854 | data = DATAOF_TRACE_ENTRY(entry, true); |
| 838 | } else { | 855 | } else { |
| 839 | if (!trace_seq_printf(s, "%s: (0x%lx)", tu->tp.call.name, | 856 | if (!trace_seq_printf(s, "%s: (0x%lx)", |
| 857 | ftrace_event_name(&tu->tp.call), | ||
| 840 | entry->vaddr[0])) | 858 | entry->vaddr[0])) |
| 841 | goto partial; | 859 | goto partial; |
| 842 | data = DATAOF_TRACE_ENTRY(entry, false); | 860 | data = DATAOF_TRACE_ENTRY(entry, false); |
| @@ -861,12 +879,24 @@ typedef bool (*filter_func_t)(struct uprobe_consumer *self, | |||
| 861 | struct mm_struct *mm); | 879 | struct mm_struct *mm); |
| 862 | 880 | ||
| 863 | static int | 881 | static int |
| 864 | probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | 882 | probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file, |
| 883 | filter_func_t filter) | ||
| 865 | { | 884 | { |
| 866 | int ret = 0; | 885 | bool enabled = trace_probe_is_enabled(&tu->tp); |
| 886 | struct event_file_link *link = NULL; | ||
| 887 | int ret; | ||
| 888 | |||
| 889 | if (file) { | ||
| 890 | link = kmalloc(sizeof(*link), GFP_KERNEL); | ||
| 891 | if (!link) | ||
| 892 | return -ENOMEM; | ||
| 867 | 893 | ||
| 868 | if (trace_probe_is_enabled(&tu->tp)) | 894 | link->file = file; |
| 869 | return -EINTR; | 895 | list_add_tail_rcu(&link->list, &tu->tp.files); |
| 896 | |||
| 897 | tu->tp.flags |= TP_FLAG_TRACE; | ||
| 898 | } else | ||
| 899 | tu->tp.flags |= TP_FLAG_PROFILE; | ||
| 870 | 900 | ||
| 871 | ret = uprobe_buffer_enable(); | 901 | ret = uprobe_buffer_enable(); |
| 872 | if (ret < 0) | 902 | if (ret < 0) |
| @@ -874,24 +904,49 @@ probe_event_enable(struct trace_uprobe *tu, int flag, filter_func_t filter) | |||
| 874 | 904 | ||
| 875 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 905 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 876 | 906 | ||
| 877 | tu->tp.flags |= flag; | 907 | if (enabled) |
| 908 | return 0; | ||
| 909 | |||
| 878 | tu->consumer.filter = filter; | 910 | tu->consumer.filter = filter; |
| 879 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); | 911 | ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); |
| 880 | if (ret) | 912 | if (ret) { |
| 881 | tu->tp.flags &= ~flag; | 913 | if (file) { |
| 914 | list_del(&link->list); | ||
| 915 | kfree(link); | ||
| 916 | tu->tp.flags &= ~TP_FLAG_TRACE; | ||
| 917 | } else | ||
| 918 | tu->tp.flags &= ~TP_FLAG_PROFILE; | ||
| 919 | } | ||
| 882 | 920 | ||
| 883 | return ret; | 921 | return ret; |
| 884 | } | 922 | } |
| 885 | 923 | ||
| 886 | static void probe_event_disable(struct trace_uprobe *tu, int flag) | 924 | static void |
| 925 | probe_event_disable(struct trace_uprobe *tu, struct ftrace_event_file *file) | ||
| 887 | { | 926 | { |
| 888 | if (!trace_probe_is_enabled(&tu->tp)) | 927 | if (!trace_probe_is_enabled(&tu->tp)) |
| 889 | return; | 928 | return; |
| 890 | 929 | ||
| 930 | if (file) { | ||
| 931 | struct event_file_link *link; | ||
| 932 | |||
| 933 | link = find_event_file_link(&tu->tp, file); | ||
| 934 | if (!link) | ||
| 935 | return; | ||
| 936 | |||
| 937 | list_del_rcu(&link->list); | ||
| 938 | /* synchronize with u{,ret}probe_trace_func */ | ||
| 939 | synchronize_sched(); | ||
| 940 | kfree(link); | ||
| 941 | |||
| 942 | if (!list_empty(&tu->tp.files)) | ||
| 943 | return; | ||
| 944 | } | ||
| 945 | |||
| 891 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); | 946 | WARN_ON(!uprobe_filter_is_empty(&tu->filter)); |
| 892 | 947 | ||
| 893 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); | 948 | uprobe_unregister(tu->inode, tu->offset, &tu->consumer); |
| 894 | tu->tp.flags &= ~flag; | 949 | tu->tp.flags &= file ? ~TP_FLAG_TRACE : ~TP_FLAG_PROFILE; |
| 895 | 950 | ||
| 896 | uprobe_buffer_disable(); | 951 | uprobe_buffer_disable(); |
| 897 | } | 952 | } |
| @@ -1014,31 +1069,24 @@ static bool uprobe_perf_filter(struct uprobe_consumer *uc, | |||
| 1014 | return ret; | 1069 | return ret; |
| 1015 | } | 1070 | } |
| 1016 | 1071 | ||
| 1017 | static void uprobe_perf_print(struct trace_uprobe *tu, | 1072 | static void __uprobe_perf_func(struct trace_uprobe *tu, |
| 1018 | unsigned long func, struct pt_regs *regs) | 1073 | unsigned long func, struct pt_regs *regs, |
| 1074 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1019 | { | 1075 | { |
| 1020 | struct ftrace_event_call *call = &tu->tp.call; | 1076 | struct ftrace_event_call *call = &tu->tp.call; |
| 1021 | struct uprobe_trace_entry_head *entry; | 1077 | struct uprobe_trace_entry_head *entry; |
| 1022 | struct hlist_head *head; | 1078 | struct hlist_head *head; |
| 1023 | struct uprobe_cpu_buffer *ucb; | ||
| 1024 | void *data; | 1079 | void *data; |
| 1025 | int size, dsize, esize; | 1080 | int size, esize; |
| 1026 | int rctx; | 1081 | int rctx; |
| 1027 | 1082 | ||
| 1028 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1029 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | 1083 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); |
| 1030 | 1084 | ||
| 1031 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1032 | return; | ||
| 1033 | |||
| 1034 | size = esize + tu->tp.size + dsize; | 1085 | size = esize + tu->tp.size + dsize; |
| 1035 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); | 1086 | size = ALIGN(size + sizeof(u32), sizeof(u64)) - sizeof(u32); |
| 1036 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) | 1087 | if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE, "profile buffer not large enough")) |
| 1037 | return; | 1088 | return; |
| 1038 | 1089 | ||
| 1039 | ucb = uprobe_buffer_get(); | ||
| 1040 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1041 | |||
| 1042 | preempt_disable(); | 1090 | preempt_disable(); |
| 1043 | head = this_cpu_ptr(call->perf_events); | 1091 | head = this_cpu_ptr(call->perf_events); |
| 1044 | if (hlist_empty(head)) | 1092 | if (hlist_empty(head)) |
| @@ -1068,46 +1116,49 @@ static void uprobe_perf_print(struct trace_uprobe *tu, | |||
| 1068 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); | 1116 | perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL); |
| 1069 | out: | 1117 | out: |
| 1070 | preempt_enable(); | 1118 | preempt_enable(); |
| 1071 | uprobe_buffer_put(ucb); | ||
| 1072 | } | 1119 | } |
| 1073 | 1120 | ||
| 1074 | /* uprobe profile handler */ | 1121 | /* uprobe profile handler */ |
| 1075 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs) | 1122 | static int uprobe_perf_func(struct trace_uprobe *tu, struct pt_regs *regs, |
| 1123 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1076 | { | 1124 | { |
| 1077 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) | 1125 | if (!uprobe_perf_filter(&tu->consumer, 0, current->mm)) |
| 1078 | return UPROBE_HANDLER_REMOVE; | 1126 | return UPROBE_HANDLER_REMOVE; |
| 1079 | 1127 | ||
| 1080 | if (!is_ret_probe(tu)) | 1128 | if (!is_ret_probe(tu)) |
| 1081 | uprobe_perf_print(tu, 0, regs); | 1129 | __uprobe_perf_func(tu, 0, regs, ucb, dsize); |
| 1082 | return 0; | 1130 | return 0; |
| 1083 | } | 1131 | } |
| 1084 | 1132 | ||
| 1085 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, | 1133 | static void uretprobe_perf_func(struct trace_uprobe *tu, unsigned long func, |
| 1086 | struct pt_regs *regs) | 1134 | struct pt_regs *regs, |
| 1135 | struct uprobe_cpu_buffer *ucb, int dsize) | ||
| 1087 | { | 1136 | { |
| 1088 | uprobe_perf_print(tu, func, regs); | 1137 | __uprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1089 | } | 1138 | } |
| 1090 | #endif /* CONFIG_PERF_EVENTS */ | 1139 | #endif /* CONFIG_PERF_EVENTS */ |
| 1091 | 1140 | ||
| 1092 | static | 1141 | static int |
| 1093 | int trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, void *data) | 1142 | trace_uprobe_register(struct ftrace_event_call *event, enum trace_reg type, |
| 1143 | void *data) | ||
| 1094 | { | 1144 | { |
| 1095 | struct trace_uprobe *tu = event->data; | 1145 | struct trace_uprobe *tu = event->data; |
| 1146 | struct ftrace_event_file *file = data; | ||
| 1096 | 1147 | ||
| 1097 | switch (type) { | 1148 | switch (type) { |
| 1098 | case TRACE_REG_REGISTER: | 1149 | case TRACE_REG_REGISTER: |
| 1099 | return probe_event_enable(tu, TP_FLAG_TRACE, NULL); | 1150 | return probe_event_enable(tu, file, NULL); |
| 1100 | 1151 | ||
| 1101 | case TRACE_REG_UNREGISTER: | 1152 | case TRACE_REG_UNREGISTER: |
| 1102 | probe_event_disable(tu, TP_FLAG_TRACE); | 1153 | probe_event_disable(tu, file); |
| 1103 | return 0; | 1154 | return 0; |
| 1104 | 1155 | ||
| 1105 | #ifdef CONFIG_PERF_EVENTS | 1156 | #ifdef CONFIG_PERF_EVENTS |
| 1106 | case TRACE_REG_PERF_REGISTER: | 1157 | case TRACE_REG_PERF_REGISTER: |
| 1107 | return probe_event_enable(tu, TP_FLAG_PROFILE, uprobe_perf_filter); | 1158 | return probe_event_enable(tu, NULL, uprobe_perf_filter); |
| 1108 | 1159 | ||
| 1109 | case TRACE_REG_PERF_UNREGISTER: | 1160 | case TRACE_REG_PERF_UNREGISTER: |
| 1110 | probe_event_disable(tu, TP_FLAG_PROFILE); | 1161 | probe_event_disable(tu, NULL); |
| 1111 | return 0; | 1162 | return 0; |
| 1112 | 1163 | ||
| 1113 | case TRACE_REG_PERF_OPEN: | 1164 | case TRACE_REG_PERF_OPEN: |
| @@ -1127,8 +1178,11 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1127 | { | 1178 | { |
| 1128 | struct trace_uprobe *tu; | 1179 | struct trace_uprobe *tu; |
| 1129 | struct uprobe_dispatch_data udd; | 1180 | struct uprobe_dispatch_data udd; |
| 1181 | struct uprobe_cpu_buffer *ucb; | ||
| 1182 | int dsize, esize; | ||
| 1130 | int ret = 0; | 1183 | int ret = 0; |
| 1131 | 1184 | ||
| 1185 | |||
| 1132 | tu = container_of(con, struct trace_uprobe, consumer); | 1186 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1133 | tu->nhit++; | 1187 | tu->nhit++; |
| 1134 | 1188 | ||
| @@ -1137,13 +1191,29 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs) | |||
| 1137 | 1191 | ||
| 1138 | current->utask->vaddr = (unsigned long) &udd; | 1192 | current->utask->vaddr = (unsigned long) &udd; |
| 1139 | 1193 | ||
| 1194 | #ifdef CONFIG_PERF_EVENTS | ||
| 1195 | if ((tu->tp.flags & TP_FLAG_TRACE) == 0 && | ||
| 1196 | !uprobe_perf_filter(&tu->consumer, 0, current->mm)) | ||
| 1197 | return UPROBE_HANDLER_REMOVE; | ||
| 1198 | #endif | ||
| 1199 | |||
| 1200 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1201 | return 0; | ||
| 1202 | |||
| 1203 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1204 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1205 | |||
| 1206 | ucb = uprobe_buffer_get(); | ||
| 1207 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1208 | |||
| 1140 | if (tu->tp.flags & TP_FLAG_TRACE) | 1209 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1141 | ret |= uprobe_trace_func(tu, regs); | 1210 | ret |= uprobe_trace_func(tu, regs, ucb, dsize); |
| 1142 | 1211 | ||
| 1143 | #ifdef CONFIG_PERF_EVENTS | 1212 | #ifdef CONFIG_PERF_EVENTS |
| 1144 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1213 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1145 | ret |= uprobe_perf_func(tu, regs); | 1214 | ret |= uprobe_perf_func(tu, regs, ucb, dsize); |
| 1146 | #endif | 1215 | #endif |
| 1216 | uprobe_buffer_put(ucb); | ||
| 1147 | return ret; | 1217 | return ret; |
| 1148 | } | 1218 | } |
| 1149 | 1219 | ||
| @@ -1152,6 +1222,8 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1152 | { | 1222 | { |
| 1153 | struct trace_uprobe *tu; | 1223 | struct trace_uprobe *tu; |
| 1154 | struct uprobe_dispatch_data udd; | 1224 | struct uprobe_dispatch_data udd; |
| 1225 | struct uprobe_cpu_buffer *ucb; | ||
| 1226 | int dsize, esize; | ||
| 1155 | 1227 | ||
| 1156 | tu = container_of(con, struct trace_uprobe, consumer); | 1228 | tu = container_of(con, struct trace_uprobe, consumer); |
| 1157 | 1229 | ||
| @@ -1160,13 +1232,23 @@ static int uretprobe_dispatcher(struct uprobe_consumer *con, | |||
| 1160 | 1232 | ||
| 1161 | current->utask->vaddr = (unsigned long) &udd; | 1233 | current->utask->vaddr = (unsigned long) &udd; |
| 1162 | 1234 | ||
| 1235 | if (WARN_ON_ONCE(!uprobe_cpu_buffer)) | ||
| 1236 | return 0; | ||
| 1237 | |||
| 1238 | dsize = __get_data_size(&tu->tp, regs); | ||
| 1239 | esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); | ||
| 1240 | |||
| 1241 | ucb = uprobe_buffer_get(); | ||
| 1242 | store_trace_args(esize, &tu->tp, regs, ucb->buf, dsize); | ||
| 1243 | |||
| 1163 | if (tu->tp.flags & TP_FLAG_TRACE) | 1244 | if (tu->tp.flags & TP_FLAG_TRACE) |
| 1164 | uretprobe_trace_func(tu, func, regs); | 1245 | uretprobe_trace_func(tu, func, regs, ucb, dsize); |
| 1165 | 1246 | ||
| 1166 | #ifdef CONFIG_PERF_EVENTS | 1247 | #ifdef CONFIG_PERF_EVENTS |
| 1167 | if (tu->tp.flags & TP_FLAG_PROFILE) | 1248 | if (tu->tp.flags & TP_FLAG_PROFILE) |
| 1168 | uretprobe_perf_func(tu, func, regs); | 1249 | uretprobe_perf_func(tu, func, regs, ucb, dsize); |
| 1169 | #endif | 1250 | #endif |
| 1251 | uprobe_buffer_put(ucb); | ||
| 1170 | return 0; | 1252 | return 0; |
| 1171 | } | 1253 | } |
| 1172 | 1254 | ||
| @@ -1198,7 +1280,8 @@ static int register_uprobe_event(struct trace_uprobe *tu) | |||
| 1198 | ret = trace_add_event_call(call); | 1280 | ret = trace_add_event_call(call); |
| 1199 | 1281 | ||
| 1200 | if (ret) { | 1282 | if (ret) { |
| 1201 | pr_info("Failed to register uprobe event: %s\n", call->name); | 1283 | pr_info("Failed to register uprobe event: %s\n", |
| 1284 | ftrace_event_name(call)); | ||
| 1202 | kfree(call->print_fmt); | 1285 | kfree(call->print_fmt); |
| 1203 | unregister_ftrace_event(&call->event); | 1286 | unregister_ftrace_event(&call->event); |
| 1204 | } | 1287 | } |
