diff options
Diffstat (limited to 'kernel/bpf/stackmap.c')
-rw-r--r-- | kernel/bpf/stackmap.c | 67 |
1 files changed, 67 insertions, 0 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 04f6ec1679f0..3ba102b41512 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -402,6 +402,73 @@ const struct bpf_func_proto bpf_get_stackid_proto = { | |||
402 | .arg3_type = ARG_ANYTHING, | 402 | .arg3_type = ARG_ANYTHING, |
403 | }; | 403 | }; |
404 | 404 | ||
405 | BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, | ||
406 | u64, flags) | ||
407 | { | ||
408 | u32 init_nr, trace_nr, copy_len, elem_size, num_elem; | ||
409 | bool user_build_id = flags & BPF_F_USER_BUILD_ID; | ||
410 | u32 skip = flags & BPF_F_SKIP_FIELD_MASK; | ||
411 | bool user = flags & BPF_F_USER_STACK; | ||
412 | struct perf_callchain_entry *trace; | ||
413 | bool kernel = !user; | ||
414 | int err = -EINVAL; | ||
415 | u64 *ips; | ||
416 | |||
417 | if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | | ||
418 | BPF_F_USER_BUILD_ID))) | ||
419 | goto clear; | ||
420 | if (kernel && user_build_id) | ||
421 | goto clear; | ||
422 | |||
423 | elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) | ||
424 | : sizeof(u64); | ||
425 | if (unlikely(size % elem_size)) | ||
426 | goto clear; | ||
427 | |||
428 | num_elem = size / elem_size; | ||
429 | if (sysctl_perf_event_max_stack < num_elem) | ||
430 | init_nr = 0; | ||
431 | else | ||
432 | init_nr = sysctl_perf_event_max_stack - num_elem; | ||
433 | trace = get_perf_callchain(regs, init_nr, kernel, user, | ||
434 | sysctl_perf_event_max_stack, false, false); | ||
435 | if (unlikely(!trace)) | ||
436 | goto err_fault; | ||
437 | |||
438 | trace_nr = trace->nr - init_nr; | ||
439 | if (trace_nr < skip) | ||
440 | goto err_fault; | ||
441 | |||
442 | trace_nr -= skip; | ||
443 | trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; | ||
444 | copy_len = trace_nr * elem_size; | ||
445 | ips = trace->ip + skip + init_nr; | ||
446 | if (user && user_build_id) | ||
447 | stack_map_get_build_id_offset(buf, ips, trace_nr, user); | ||
448 | else | ||
449 | memcpy(buf, ips, copy_len); | ||
450 | |||
451 | if (size > copy_len) | ||
452 | memset(buf + copy_len, 0, size - copy_len); | ||
453 | return copy_len; | ||
454 | |||
455 | err_fault: | ||
456 | err = -EFAULT; | ||
457 | clear: | ||
458 | memset(buf, 0, size); | ||
459 | return err; | ||
460 | } | ||
461 | |||
462 | const struct bpf_func_proto bpf_get_stack_proto = { | ||
463 | .func = bpf_get_stack, | ||
464 | .gpl_only = true, | ||
465 | .ret_type = RET_INTEGER, | ||
466 | .arg1_type = ARG_PTR_TO_CTX, | ||
467 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | ||
468 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | ||
469 | .arg4_type = ARG_ANYTHING, | ||
470 | }; | ||
471 | |||
405 | /* Called from eBPF program */ | 472 | /* Called from eBPF program */ |
406 | static void *stack_map_lookup_elem(struct bpf_map *map, void *key) | 473 | static void *stack_map_lookup_elem(struct bpf_map *map, void *key) |
407 | { | 474 | { |