diff options
author | David S. Miller <davem@davemloft.net> | 2018-05-07 23:35:08 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-07 23:35:08 -0400 |
commit | 01adc4851a8090b46c7a5ed9cfc4b97e65abfbf4 (patch) | |
tree | 2ae02593d7139962648dff203f3f9701e34ccbc3 /kernel/bpf/stackmap.c | |
parent | 18b338f5f9539512e76fd9ebd4c6ca1a0e159e2b (diff) | |
parent | e94fa1d93117e7f1eb783dc9cae6c70650944449 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Minor conflict, a CHECK was placed into an if() statement
in net-next, whilst a newline was added to that CHECK
call in 'net'. Thanks to Daniel for the merge resolution.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/stackmap.c')
-rw-r--r-- | kernel/bpf/stackmap.c | 80 |
1 files changed, 72 insertions, 8 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 57eeb1234b67..3ba102b41512 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -262,16 +262,11 @@ out: | |||
262 | return ret; | 262 | return ret; |
263 | } | 263 | } |
264 | 264 | ||
265 | static void stack_map_get_build_id_offset(struct bpf_map *map, | 265 | static void stack_map_get_build_id_offset(struct bpf_stack_build_id *id_offs, |
266 | struct stack_map_bucket *bucket, | ||
267 | u64 *ips, u32 trace_nr, bool user) | 266 | u64 *ips, u32 trace_nr, bool user) |
268 | { | 267 | { |
269 | int i; | 268 | int i; |
270 | struct vm_area_struct *vma; | 269 | struct vm_area_struct *vma; |
271 | struct bpf_stack_build_id *id_offs; | ||
272 | |||
273 | bucket->nr = trace_nr; | ||
274 | id_offs = (struct bpf_stack_build_id *)bucket->data; | ||
275 | 270 | ||
276 | /* | 271 | /* |
277 | * We cannot do up_read() in nmi context, so build_id lookup is | 272 | * We cannot do up_read() in nmi context, so build_id lookup is |
@@ -361,8 +356,10 @@ BPF_CALL_3(bpf_get_stackid, struct pt_regs *, regs, struct bpf_map *, map, | |||
361 | pcpu_freelist_pop(&smap->freelist); | 356 | pcpu_freelist_pop(&smap->freelist); |
362 | if (unlikely(!new_bucket)) | 357 | if (unlikely(!new_bucket)) |
363 | return -ENOMEM; | 358 | return -ENOMEM; |
364 | stack_map_get_build_id_offset(map, new_bucket, ips, | 359 | new_bucket->nr = trace_nr; |
365 | trace_nr, user); | 360 | stack_map_get_build_id_offset( |
361 | (struct bpf_stack_build_id *)new_bucket->data, | ||
362 | ips, trace_nr, user); | ||
366 | trace_len = trace_nr * sizeof(struct bpf_stack_build_id); | 363 | trace_len = trace_nr * sizeof(struct bpf_stack_build_id); |
367 | if (hash_matches && bucket->nr == trace_nr && | 364 | if (hash_matches && bucket->nr == trace_nr && |
368 | memcmp(bucket->data, new_bucket->data, trace_len) == 0) { | 365 | memcmp(bucket->data, new_bucket->data, trace_len) == 0) { |
@@ -405,6 +402,73 @@ const struct bpf_func_proto bpf_get_stackid_proto = { | |||
405 | .arg3_type = ARG_ANYTHING, | 402 | .arg3_type = ARG_ANYTHING, |
406 | }; | 403 | }; |
407 | 404 | ||
405 | BPF_CALL_4(bpf_get_stack, struct pt_regs *, regs, void *, buf, u32, size, | ||
406 | u64, flags) | ||
407 | { | ||
408 | u32 init_nr, trace_nr, copy_len, elem_size, num_elem; | ||
409 | bool user_build_id = flags & BPF_F_USER_BUILD_ID; | ||
410 | u32 skip = flags & BPF_F_SKIP_FIELD_MASK; | ||
411 | bool user = flags & BPF_F_USER_STACK; | ||
412 | struct perf_callchain_entry *trace; | ||
413 | bool kernel = !user; | ||
414 | int err = -EINVAL; | ||
415 | u64 *ips; | ||
416 | |||
417 | if (unlikely(flags & ~(BPF_F_SKIP_FIELD_MASK | BPF_F_USER_STACK | | ||
418 | BPF_F_USER_BUILD_ID))) | ||
419 | goto clear; | ||
420 | if (kernel && user_build_id) | ||
421 | goto clear; | ||
422 | |||
423 | elem_size = (user && user_build_id) ? sizeof(struct bpf_stack_build_id) | ||
424 | : sizeof(u64); | ||
425 | if (unlikely(size % elem_size)) | ||
426 | goto clear; | ||
427 | |||
428 | num_elem = size / elem_size; | ||
429 | if (sysctl_perf_event_max_stack < num_elem) | ||
430 | init_nr = 0; | ||
431 | else | ||
432 | init_nr = sysctl_perf_event_max_stack - num_elem; | ||
433 | trace = get_perf_callchain(regs, init_nr, kernel, user, | ||
434 | sysctl_perf_event_max_stack, false, false); | ||
435 | if (unlikely(!trace)) | ||
436 | goto err_fault; | ||
437 | |||
438 | trace_nr = trace->nr - init_nr; | ||
439 | if (trace_nr < skip) | ||
440 | goto err_fault; | ||
441 | |||
442 | trace_nr -= skip; | ||
443 | trace_nr = (trace_nr <= num_elem) ? trace_nr : num_elem; | ||
444 | copy_len = trace_nr * elem_size; | ||
445 | ips = trace->ip + skip + init_nr; | ||
446 | if (user && user_build_id) | ||
447 | stack_map_get_build_id_offset(buf, ips, trace_nr, user); | ||
448 | else | ||
449 | memcpy(buf, ips, copy_len); | ||
450 | |||
451 | if (size > copy_len) | ||
452 | memset(buf + copy_len, 0, size - copy_len); | ||
453 | return copy_len; | ||
454 | |||
455 | err_fault: | ||
456 | err = -EFAULT; | ||
457 | clear: | ||
458 | memset(buf, 0, size); | ||
459 | return err; | ||
460 | } | ||
461 | |||
462 | const struct bpf_func_proto bpf_get_stack_proto = { | ||
463 | .func = bpf_get_stack, | ||
464 | .gpl_only = true, | ||
465 | .ret_type = RET_INTEGER, | ||
466 | .arg1_type = ARG_PTR_TO_CTX, | ||
467 | .arg2_type = ARG_PTR_TO_UNINIT_MEM, | ||
468 | .arg3_type = ARG_CONST_SIZE_OR_ZERO, | ||
469 | .arg4_type = ARG_ANYTHING, | ||
470 | }; | ||
471 | |||
408 | /* Called from eBPF program */ | 472 | /* Called from eBPF program */ |
409 | static void *stack_map_lookup_elem(struct bpf_map *map, void *key) | 473 | static void *stack_map_lookup_elem(struct bpf_map *map, void *key) |
410 | { | 474 | { |