aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c52
1 files changed, 49 insertions, 3 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 56ba0f2a01db..ce2cbbff27e4 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -20,6 +20,7 @@
20#include "trace.h" 20#include "trace.h"
21 21
22u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 22u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
23u64 bpf_get_stack(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
23 24
24/** 25/**
25 * trace_call_bpf - invoke BPF program 26 * trace_call_bpf - invoke BPF program
@@ -474,8 +475,6 @@ BPF_CALL_2(bpf_current_task_under_cgroup, struct bpf_map *, map, u32, idx)
474 struct bpf_array *array = container_of(map, struct bpf_array, map); 475 struct bpf_array *array = container_of(map, struct bpf_array, map);
475 struct cgroup *cgrp; 476 struct cgroup *cgrp;
476 477
477 if (unlikely(in_interrupt()))
478 return -EINVAL;
479 if (unlikely(idx >= array->map.max_entries)) 478 if (unlikely(idx >= array->map.max_entries))
480 return -E2BIG; 479 return -E2BIG;
481 480
@@ -577,6 +576,8 @@ kprobe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
577 return &bpf_perf_event_output_proto; 576 return &bpf_perf_event_output_proto;
578 case BPF_FUNC_get_stackid: 577 case BPF_FUNC_get_stackid:
579 return &bpf_get_stackid_proto; 578 return &bpf_get_stackid_proto;
579 case BPF_FUNC_get_stack:
580 return &bpf_get_stack_proto;
580 case BPF_FUNC_perf_event_read_value: 581 case BPF_FUNC_perf_event_read_value:
581 return &bpf_perf_event_read_value_proto; 582 return &bpf_perf_event_read_value_proto;
582#ifdef CONFIG_BPF_KPROBE_OVERRIDE 583#ifdef CONFIG_BPF_KPROBE_OVERRIDE
@@ -664,6 +665,25 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = {
664 .arg3_type = ARG_ANYTHING, 665 .arg3_type = ARG_ANYTHING,
665}; 666};
666 667
668BPF_CALL_4(bpf_get_stack_tp, void *, tp_buff, void *, buf, u32, size,
669 u64, flags)
670{
671 struct pt_regs *regs = *(struct pt_regs **)tp_buff;
672
673 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
674 (unsigned long) size, flags, 0);
675}
676
677static const struct bpf_func_proto bpf_get_stack_proto_tp = {
678 .func = bpf_get_stack_tp,
679 .gpl_only = true,
680 .ret_type = RET_INTEGER,
681 .arg1_type = ARG_PTR_TO_CTX,
682 .arg2_type = ARG_PTR_TO_UNINIT_MEM,
683 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
684 .arg4_type = ARG_ANYTHING,
685};
686
667static const struct bpf_func_proto * 687static const struct bpf_func_proto *
668tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 688tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
669{ 689{
@@ -672,6 +692,8 @@ tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
672 return &bpf_perf_event_output_proto_tp; 692 return &bpf_perf_event_output_proto_tp;
673 case BPF_FUNC_get_stackid: 693 case BPF_FUNC_get_stackid:
674 return &bpf_get_stackid_proto_tp; 694 return &bpf_get_stackid_proto_tp;
695 case BPF_FUNC_get_stack:
696 return &bpf_get_stack_proto_tp;
675 default: 697 default:
676 return tracing_func_proto(func_id, prog); 698 return tracing_func_proto(func_id, prog);
677 } 699 }
@@ -734,6 +756,8 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
734 return &bpf_perf_event_output_proto_tp; 756 return &bpf_perf_event_output_proto_tp;
735 case BPF_FUNC_get_stackid: 757 case BPF_FUNC_get_stackid:
736 return &bpf_get_stackid_proto_tp; 758 return &bpf_get_stackid_proto_tp;
759 case BPF_FUNC_get_stack:
760 return &bpf_get_stack_proto_tp;
737 case BPF_FUNC_perf_prog_read_value: 761 case BPF_FUNC_perf_prog_read_value:
738 return &bpf_perf_prog_read_value_proto; 762 return &bpf_perf_prog_read_value_proto;
739 default: 763 default:
@@ -744,7 +768,7 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
744/* 768/*
745 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp 769 * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
746 * to avoid potential recursive reuse issue when/if tracepoints are added 770 * to avoid potential recursive reuse issue when/if tracepoints are added
747 * inside bpf_*_event_output and/or bpf_get_stack_id 771 * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack
748 */ 772 */
749static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); 773static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs);
750BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, 774BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
@@ -787,6 +811,26 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
787 .arg3_type = ARG_ANYTHING, 811 .arg3_type = ARG_ANYTHING,
788}; 812};
789 813
814BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
815 void *, buf, u32, size, u64, flags)
816{
817 struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs);
818
819 perf_fetch_caller_regs(regs);
820 return bpf_get_stack((unsigned long) regs, (unsigned long) buf,
821 (unsigned long) size, flags, 0);
822}
823
824static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
825 .func = bpf_get_stack_raw_tp,
826 .gpl_only = true,
827 .ret_type = RET_INTEGER,
828 .arg1_type = ARG_PTR_TO_CTX,
829 .arg2_type = ARG_PTR_TO_MEM,
830 .arg3_type = ARG_CONST_SIZE_OR_ZERO,
831 .arg4_type = ARG_ANYTHING,
832};
833
790static const struct bpf_func_proto * 834static const struct bpf_func_proto *
791raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) 835raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
792{ 836{
@@ -795,6 +839,8 @@ raw_tp_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
795 return &bpf_perf_event_output_proto_raw_tp; 839 return &bpf_perf_event_output_proto_raw_tp;
796 case BPF_FUNC_get_stackid: 840 case BPF_FUNC_get_stackid:
797 return &bpf_get_stackid_proto_raw_tp; 841 return &bpf_get_stackid_proto_raw_tp;
842 case BPF_FUNC_get_stack:
843 return &bpf_get_stack_proto_raw_tp;
798 default: 844 default:
799 return tracing_func_proto(func_id, prog); 845 return tracing_func_proto(func_id, prog);
800 } 846 }