diff options
-rw-r--r-- | kernel/trace/trace_kprobe.c | 27 |
1 files changed, 10 insertions, 17 deletions
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 2e28ee36646f..6178abf3637e 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -942,7 +942,7 @@ static const struct file_operations kprobe_profile_ops = { | |||
942 | }; | 942 | }; |
943 | 943 | ||
944 | /* Kprobe handler */ | 944 | /* Kprobe handler */ |
945 | static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | 945 | static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) |
946 | { | 946 | { |
947 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 947 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
948 | struct kprobe_trace_entry *entry; | 948 | struct kprobe_trace_entry *entry; |
@@ -962,7 +962,7 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
962 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 962 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
963 | irq_flags, pc); | 963 | irq_flags, pc); |
964 | if (!event) | 964 | if (!event) |
965 | return 0; | 965 | return; |
966 | 966 | ||
967 | entry = ring_buffer_event_data(event); | 967 | entry = ring_buffer_event_data(event); |
968 | entry->nargs = tp->nr_args; | 968 | entry->nargs = tp->nr_args; |
@@ -972,11 +972,10 @@ static __kprobes int kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) | |||
972 | 972 | ||
973 | if (!filter_current_check_discard(buffer, call, entry, event)) | 973 | if (!filter_current_check_discard(buffer, call, entry, event)) |
974 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 974 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
975 | return 0; | ||
976 | } | 975 | } |
977 | 976 | ||
978 | /* Kretprobe handler */ | 977 | /* Kretprobe handler */ |
979 | static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | 978 | static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, |
980 | struct pt_regs *regs) | 979 | struct pt_regs *regs) |
981 | { | 980 | { |
982 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 981 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
@@ -995,7 +994,7 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | |||
995 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, | 994 | event = trace_current_buffer_lock_reserve(&buffer, call->id, size, |
996 | irq_flags, pc); | 995 | irq_flags, pc); |
997 | if (!event) | 996 | if (!event) |
998 | return 0; | 997 | return; |
999 | 998 | ||
1000 | entry = ring_buffer_event_data(event); | 999 | entry = ring_buffer_event_data(event); |
1001 | entry->nargs = tp->nr_args; | 1000 | entry->nargs = tp->nr_args; |
@@ -1006,8 +1005,6 @@ static __kprobes int kretprobe_trace_func(struct kretprobe_instance *ri, | |||
1006 | 1005 | ||
1007 | if (!filter_current_check_discard(buffer, call, entry, event)) | 1006 | if (!filter_current_check_discard(buffer, call, entry, event)) |
1008 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); | 1007 | trace_nowake_buffer_unlock_commit(buffer, event, irq_flags, pc); |
1009 | |||
1010 | return 0; | ||
1011 | } | 1008 | } |
1012 | 1009 | ||
1013 | /* Event entry printers */ | 1010 | /* Event entry printers */ |
@@ -1237,7 +1234,7 @@ static int kretprobe_event_show_format(struct ftrace_event_call *call, | |||
1237 | #ifdef CONFIG_PERF_EVENTS | 1234 | #ifdef CONFIG_PERF_EVENTS |
1238 | 1235 | ||
1239 | /* Kprobe profile handler */ | 1236 | /* Kprobe profile handler */ |
1240 | static __kprobes int kprobe_profile_func(struct kprobe *kp, | 1237 | static __kprobes void kprobe_profile_func(struct kprobe *kp, |
1241 | struct pt_regs *regs) | 1238 | struct pt_regs *regs) |
1242 | { | 1239 | { |
1243 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); | 1240 | struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); |
@@ -1252,11 +1249,11 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, | |||
1252 | size -= sizeof(u32); | 1249 | size -= sizeof(u32); |
1253 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1250 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, |
1254 | "profile buffer not large enough")) | 1251 | "profile buffer not large enough")) |
1255 | return 0; | 1252 | return; |
1256 | 1253 | ||
1257 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1254 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); |
1258 | if (!entry) | 1255 | if (!entry) |
1259 | return 0; | 1256 | return; |
1260 | 1257 | ||
1261 | entry->nargs = tp->nr_args; | 1258 | entry->nargs = tp->nr_args; |
1262 | entry->ip = (unsigned long)kp->addr; | 1259 | entry->ip = (unsigned long)kp->addr; |
@@ -1264,12 +1261,10 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, | |||
1264 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1261 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1265 | 1262 | ||
1266 | ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); | 1263 | ftrace_perf_buf_submit(entry, size, rctx, entry->ip, 1, irq_flags); |
1267 | |||
1268 | return 0; | ||
1269 | } | 1264 | } |
1270 | 1265 | ||
1271 | /* Kretprobe profile handler */ | 1266 | /* Kretprobe profile handler */ |
1272 | static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, | 1267 | static __kprobes void kretprobe_profile_func(struct kretprobe_instance *ri, |
1273 | struct pt_regs *regs) | 1268 | struct pt_regs *regs) |
1274 | { | 1269 | { |
1275 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); | 1270 | struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); |
@@ -1284,11 +1279,11 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1284 | size -= sizeof(u32); | 1279 | size -= sizeof(u32); |
1285 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, | 1280 | if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE, |
1286 | "profile buffer not large enough")) | 1281 | "profile buffer not large enough")) |
1287 | return 0; | 1282 | return; |
1288 | 1283 | ||
1289 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); | 1284 | entry = ftrace_perf_buf_prepare(size, call->id, &rctx, &irq_flags); |
1290 | if (!entry) | 1285 | if (!entry) |
1291 | return 0; | 1286 | return; |
1292 | 1287 | ||
1293 | entry->nargs = tp->nr_args; | 1288 | entry->nargs = tp->nr_args; |
1294 | entry->func = (unsigned long)tp->rp.kp.addr; | 1289 | entry->func = (unsigned long)tp->rp.kp.addr; |
@@ -1297,8 +1292,6 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, | |||
1297 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); | 1292 | entry->args[i] = call_fetch(&tp->args[i].fetch, regs); |
1298 | 1293 | ||
1299 | ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); | 1294 | ftrace_perf_buf_submit(entry, size, rctx, entry->ret_ip, 1, irq_flags); |
1300 | |||
1301 | return 0; | ||
1302 | } | 1295 | } |
1303 | 1296 | ||
1304 | static int probe_profile_enable(struct ftrace_event_call *call) | 1297 | static int probe_profile_enable(struct ftrace_event_call *call) |