diff options
author | Alexei Starovoitov <ast@fb.com> | 2016-04-06 21:43:27 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-04-07 21:04:26 -0400 |
commit | 9940d67c93b5bb7ddcf862b41b1847cb728186c4 (patch) | |
tree | e5b9a36df5bc8bde9b7435cda796d2cefe686e45 | |
parent | 9fd82b610ba3351f05a59c3e9117cfefe82f7751 (diff) |
bpf: support bpf_get_stackid() and bpf_perf_event_output() in tracepoint programs
needs two wrapper functions to fetch 'struct pt_regs *' to convert
tracepoint bpf context into kprobe bpf context to reuse existing
helper functions
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/linux/bpf.h | 1 | ||||
-rw-r--r-- | kernel/bpf/stackmap.c | 2 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 42 |
3 files changed, 43 insertions, 2 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 21ee41b92e8a..198f6ace70ec 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -160,6 +160,7 @@ struct bpf_array { | |||
160 | #define MAX_TAIL_CALL_CNT 32 | 160 | #define MAX_TAIL_CALL_CNT 32 |
161 | 161 | ||
162 | u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); | 162 | u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); |
163 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | ||
163 | void bpf_fd_array_map_clear(struct bpf_map *map); | 164 | void bpf_fd_array_map_clear(struct bpf_map *map); |
164 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); | 165 | bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); |
165 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); | 166 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 499d9e933f8e..35114725cf30 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -116,7 +116,7 @@ free_smap: | |||
116 | return ERR_PTR(err); | 116 | return ERR_PTR(err); |
117 | } | 117 | } |
118 | 118 | ||
119 | static u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) | 119 | u64 bpf_get_stackid(u64 r1, u64 r2, u64 flags, u64 r4, u64 r5) |
120 | { | 120 | { |
121 | struct pt_regs *regs = (struct pt_regs *) (long) r1; | 121 | struct pt_regs *regs = (struct pt_regs *) (long) r1; |
122 | struct bpf_map *map = (struct bpf_map *) (long) r2; | 122 | struct bpf_map *map = (struct bpf_map *) (long) r2; |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 3e5ebe3254d2..413ec5614180 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -340,12 +340,52 @@ static struct bpf_prog_type_list kprobe_tl = { | |||
340 | .type = BPF_PROG_TYPE_KPROBE, | 340 | .type = BPF_PROG_TYPE_KPROBE, |
341 | }; | 341 | }; |
342 | 342 | ||
343 | static u64 bpf_perf_event_output_tp(u64 r1, u64 r2, u64 index, u64 r4, u64 size) | ||
344 | { | ||
345 | /* | ||
346 | * r1 points to perf tracepoint buffer where first 8 bytes are hidden | ||
347 | * from bpf program and contain a pointer to 'struct pt_regs'. Fetch it | ||
348 | * from there and call the same bpf_perf_event_output() helper | ||
349 | */ | ||
350 | u64 ctx = *(long *)r1; | ||
351 | |||
352 | return bpf_perf_event_output(ctx, r2, index, r4, size); | ||
353 | } | ||
354 | |||
355 | static const struct bpf_func_proto bpf_perf_event_output_proto_tp = { | ||
356 | .func = bpf_perf_event_output_tp, | ||
357 | .gpl_only = true, | ||
358 | .ret_type = RET_INTEGER, | ||
359 | .arg1_type = ARG_PTR_TO_CTX, | ||
360 | .arg2_type = ARG_CONST_MAP_PTR, | ||
361 | .arg3_type = ARG_ANYTHING, | ||
362 | .arg4_type = ARG_PTR_TO_STACK, | ||
363 | .arg5_type = ARG_CONST_STACK_SIZE, | ||
364 | }; | ||
365 | |||
366 | static u64 bpf_get_stackid_tp(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | ||
367 | { | ||
368 | u64 ctx = *(long *)r1; | ||
369 | |||
370 | return bpf_get_stackid(ctx, r2, r3, r4, r5); | ||
371 | } | ||
372 | |||
373 | static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | ||
374 | .func = bpf_get_stackid_tp, | ||
375 | .gpl_only = true, | ||
376 | .ret_type = RET_INTEGER, | ||
377 | .arg1_type = ARG_PTR_TO_CTX, | ||
378 | .arg2_type = ARG_CONST_MAP_PTR, | ||
379 | .arg3_type = ARG_ANYTHING, | ||
380 | }; | ||
381 | |||
343 | static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) | 382 | static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) |
344 | { | 383 | { |
345 | switch (func_id) { | 384 | switch (func_id) { |
346 | case BPF_FUNC_perf_event_output: | 385 | case BPF_FUNC_perf_event_output: |
386 | return &bpf_perf_event_output_proto_tp; | ||
347 | case BPF_FUNC_get_stackid: | 387 | case BPF_FUNC_get_stackid: |
348 | return NULL; | 388 | return &bpf_get_stackid_proto_tp; |
349 | default: | 389 | default: |
350 | return tracing_func_proto(func_id); | 390 | return tracing_func_proto(func_id); |
351 | } | 391 | } |