aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/stackmap.c
diff options
context:
space:
mode:
authorYonghong Song <yhs@fb.com>2018-04-29 01:28:08 -0400
committerAlexei Starovoitov <ast@kernel.org>2018-04-29 11:45:53 -0400
commitc195651e565ae7f41a68acb7d4aa7390ad215de1 (patch)
treeb11a7c5682239958cc2089fbbb95f626df7b5b7c /kernel/bpf/stackmap.c
parent5f4126327494c189767ca64b222abadb07c55e3d (diff)
bpf: add bpf_get_stack helper
Currently, stackmap and bpf_get_stackid helper are provided for bpf program to get the stack trace. This approach has a limitation though. If two stack traces have the same hash, only one will get stored in the stackmap table, so some stack traces are missing from user perspective. This patch implements a new helper, bpf_get_stack, will send stack traces directly to bpf program. The bpf program is able to see all stack traces, and then can do in-kernel processing or send stack traces to user space through shared map or bpf_perf_event_output. Acked-by: Alexei Starovoitov <ast@fb.com> Signed-off-by: Yonghong Song <yhs@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/stackmap.c')
-rw-r--r--kernel/bpf/stackmap.c67
1 files changed, 67 insertions, 0 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 04f6ec1679f0..3ba102b41512 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -402,6 +402,73 @@ const struct bpf_func_proto bpf_get_stackid_proto = {
402 .arg3_type = ARG_ANYTHING, 402 .arg3_type = ARG_ANYTHING,
403}; 403};
404 404
405BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size,
406 u64, flags)
407{
408 u32 init_nr, trace_nr, copy_len, elem_size, num_elem;
409 bool user_build_id = flags & BPF_F_USER_BUILD_ID;
410 u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
411 bool user = flags & BPF_F_USER_STACK;
412 struct perf_callchain_entry *trace;
413 bool kernel = !user;
414 int err = -EINVAL;
415 u64 *ips;
416
417 if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK |
418 BPF_F_USER_BUILD_ID)))
419 goto clear;
420 if (kernel && user_build_id)
421 goto clear;
422
423 elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id)
424 : sizeof(u64);
425 if (unlikely(size % elem_size))
426 goto clear;
427
428 num_elem = size / elem_size;
429 if (sysctl_perf_event_max_stack < num_elem)
430 init_nr = 0;
431 else
432 init_nr = sysctl_perf_event_max_stack - num_elem;
433 trace = get_perf_callchain(regs, init_nr, kernel, user,
434 sysctl_perf_event_max_stack, false, false);
435 if (unlikely(!trace))
436 goto err_fault;
437
438 trace_nr = trace->nr - init_nr;
439 if (trace_nr < skip)
440 goto err_fault;
441
442 trace_nr -= skip;
443 trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem;
444 copy_len = trace_nr * elem_size;
445 ips = trace->ip + skip + init_nr;
446 if (user && user_build_id)
447 stack_map_get_build_id_offset(buf, ips, trace_nr, user);
448 else
449 memcpy(buf, ips, copy_len);
450
451 if (size > copy_len)
452 memset(buf + copy_len, 0, size - copy_len);
453 return copy_len;
454
455err_fault:
456 err = -EFAULT;
457clear:
458 memset(buf, 0, size);
459 return err;
460}
461
462const struct bpf_func_proto bpf_get_stack_proto = {
463 .func = bpf_get_stack,
464 .gpl_only = true,
465 .ret_type = RET_INTEGER,
466 .arg1_type = ARG_PTR_TO_CTX,
467 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
468 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
469 .arg4_type = ARG_ANYTHING,
470};
471
405/* Called from eBPF program */ 472/* Called from eBPF program */
406static void *stack_map_lookup_elem(struct bpf_map *map, void *key) 473static void *stack_map_lookup_elem(struct bpf_map *map, void *key)
407{ 474{