diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2015-03-25 15:49:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-02 07:25:49 -0400 |
commit | 2541517c32be2531e0da59dfd7efc1ce844644f5 (patch) | |
tree | a69f215a0bbc2f5db1a5d7aff83a465940e40e01 /kernel/trace | |
parent | 72cbbc8994242b5b43753738c01bf07bf29cb70d (diff) |
tracing, perf: Implement BPF programs attached to kprobes
BPF programs, attached to kprobes, provide a safe way to execute
user-defined BPF byte-code programs without being able to crash or
hang the kernel in any way. The BPF engine makes sure that such
programs have a finite execution time and that they cannot break
out of their sandbox.
The user interface is to attach to a kprobe via the perf syscall:
struct perf_event_attr attr = {
.type = PERF_TYPE_TRACEPOINT,
.config = event_id,
...
};
event_fd = perf_event_open(&attr,...);
ioctl(event_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
'prog_fd' is a file descriptor associated with BPF program
previously loaded.
'event_id' is an ID of the kprobe created.
Closing 'event_fd':
close(event_fd);
... automatically detaches BPF program from it.
BPF programs can call in-kernel helper functions to:
- lookup/update/delete elements in maps
- probe_read - wraper of probe_kernel_read() used to access any
kernel data structures
BPF programs receive 'struct pt_regs *' as an input ('struct pt_regs' is
architecture dependent) and return 0 to ignore the event and 1 to store
kprobe event into the ring buffer.
Note, kprobes are a fundamentally _not_ a stable kernel ABI,
so BPF programs attached to kprobes must be recompiled for
every kernel version and user must supply correct LINUX_VERSION_CODE
in attr.kern_version during bpf_prog_load() call.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Reviewed-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-4-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/Makefile | 1 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 130 | ||||
-rw-r--r-- | kernel/trace/trace_kprobe.c | 8 |
3 files changed, 139 insertions, 0 deletions
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile index 98f26588255e..c575a300103b 100644 --- a/kernel/trace/Makefile +++ b/kernel/trace/Makefile | |||
@@ -53,6 +53,7 @@ obj-$(CONFIG_EVENT_TRACING) += trace_event_perf.o | |||
53 | endif | 53 | endif |
54 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o | 54 | obj-$(CONFIG_EVENT_TRACING) += trace_events_filter.o |
55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o | 55 | obj-$(CONFIG_EVENT_TRACING) += trace_events_trigger.o |
56 | obj-$(CONFIG_BPF_SYSCALL) += bpf_trace.o | ||
56 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o | 57 | obj-$(CONFIG_KPROBE_EVENT) += trace_kprobe.o |
57 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o | 58 | obj-$(CONFIG_TRACEPOINTS) += power-traces.o |
58 | ifeq ($(CONFIG_PM),y) | 59 | ifeq ($(CONFIG_PM),y) |
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c new file mode 100644 index 000000000000..f1e87da91da3 --- /dev/null +++ b/kernel/trace/bpf_trace.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* Copyright (c) 2011-2015 PLUMgrid, http://plumgrid.com | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or | ||
4 | * modify it under the terms of version 2 of the GNU General Public | ||
5 | * License as published by the Free Software Foundation. | ||
6 | */ | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include <linux/bpf.h> | ||
11 | #include <linux/filter.h> | ||
12 | #include <linux/uaccess.h> | ||
13 | #include "trace.h" | ||
14 | |||
15 | static DEFINE_PER_CPU(int, bpf_prog_active); | ||
16 | |||
17 | /** | ||
18 | * trace_call_bpf - invoke BPF program | ||
19 | * @prog: BPF program | ||
20 | * @ctx: opaque context pointer | ||
21 | * | ||
22 | * kprobe handlers execute BPF programs via this helper. | ||
23 | * Can be used from static tracepoints in the future. | ||
24 | * | ||
25 | * Return: BPF programs always return an integer which is interpreted by | ||
26 | * kprobe handler as: | ||
27 | * 0 - return from kprobe (event is filtered out) | ||
28 | * 1 - store kprobe event into ring buffer | ||
29 | * Other values are reserved and currently alias to 1 | ||
30 | */ | ||
31 | unsigned int trace_call_bpf(struct bpf_prog *prog, void *ctx) | ||
32 | { | ||
33 | unsigned int ret; | ||
34 | |||
35 | if (in_nmi()) /* not supported yet */ | ||
36 | return 1; | ||
37 | |||
38 | preempt_disable(); | ||
39 | |||
40 | if (unlikely(__this_cpu_inc_return(bpf_prog_active) != 1)) { | ||
41 | /* | ||
42 | * since some bpf program is already running on this cpu, | ||
43 | * don't call into another bpf program (same or different) | ||
44 | * and don't send kprobe event into ring-buffer, | ||
45 | * so return zero here | ||
46 | */ | ||
47 | ret = 0; | ||
48 | goto out; | ||
49 | } | ||
50 | |||
51 | rcu_read_lock(); | ||
52 | ret = BPF_PROG_RUN(prog, ctx); | ||
53 | rcu_read_unlock(); | ||
54 | |||
55 | out: | ||
56 | __this_cpu_dec(bpf_prog_active); | ||
57 | preempt_enable(); | ||
58 | |||
59 | return ret; | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(trace_call_bpf); | ||
62 | |||
63 | static u64 bpf_probe_read(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) | ||
64 | { | ||
65 | void *dst = (void *) (long) r1; | ||
66 | int size = (int) r2; | ||
67 | void *unsafe_ptr = (void *) (long) r3; | ||
68 | |||
69 | return probe_kernel_read(dst, unsafe_ptr, size); | ||
70 | } | ||
71 | |||
72 | static const struct bpf_func_proto bpf_probe_read_proto = { | ||
73 | .func = bpf_probe_read, | ||
74 | .gpl_only = true, | ||
75 | .ret_type = RET_INTEGER, | ||
76 | .arg1_type = ARG_PTR_TO_STACK, | ||
77 | .arg2_type = ARG_CONST_STACK_SIZE, | ||
78 | .arg3_type = ARG_ANYTHING, | ||
79 | }; | ||
80 | |||
81 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) | ||
82 | { | ||
83 | switch (func_id) { | ||
84 | case BPF_FUNC_map_lookup_elem: | ||
85 | return &bpf_map_lookup_elem_proto; | ||
86 | case BPF_FUNC_map_update_elem: | ||
87 | return &bpf_map_update_elem_proto; | ||
88 | case BPF_FUNC_map_delete_elem: | ||
89 | return &bpf_map_delete_elem_proto; | ||
90 | case BPF_FUNC_probe_read: | ||
91 | return &bpf_probe_read_proto; | ||
92 | default: | ||
93 | return NULL; | ||
94 | } | ||
95 | } | ||
96 | |||
97 | /* bpf+kprobe programs can access fields of 'struct pt_regs' */ | ||
98 | static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type type) | ||
99 | { | ||
100 | /* check bounds */ | ||
101 | if (off < 0 || off >= sizeof(struct pt_regs)) | ||
102 | return false; | ||
103 | |||
104 | /* only read is allowed */ | ||
105 | if (type != BPF_READ) | ||
106 | return false; | ||
107 | |||
108 | /* disallow misaligned access */ | ||
109 | if (off % size != 0) | ||
110 | return false; | ||
111 | |||
112 | return true; | ||
113 | } | ||
114 | |||
115 | static struct bpf_verifier_ops kprobe_prog_ops = { | ||
116 | .get_func_proto = kprobe_prog_func_proto, | ||
117 | .is_valid_access = kprobe_prog_is_valid_access, | ||
118 | }; | ||
119 | |||
120 | static struct bpf_prog_type_list kprobe_tl = { | ||
121 | .ops = &kprobe_prog_ops, | ||
122 | .type = BPF_PROG_TYPE_KPROBE, | ||
123 | }; | ||
124 | |||
125 | static int __init register_kprobe_prog_ops(void) | ||
126 | { | ||
127 | bpf_register_prog_type(&kprobe_tl); | ||
128 | return 0; | ||
129 | } | ||
130 | late_initcall(register_kprobe_prog_ops); | ||
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8fa549f6f528..dc3462507d7c 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c | |||
@@ -1134,11 +1134,15 @@ static void | |||
1134 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) | 1134 | kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs) |
1135 | { | 1135 | { |
1136 | struct ftrace_event_call *call = &tk->tp.call; | 1136 | struct ftrace_event_call *call = &tk->tp.call; |
1137 | struct bpf_prog *prog = call->prog; | ||
1137 | struct kprobe_trace_entry_head *entry; | 1138 | struct kprobe_trace_entry_head *entry; |
1138 | struct hlist_head *head; | 1139 | struct hlist_head *head; |
1139 | int size, __size, dsize; | 1140 | int size, __size, dsize; |
1140 | int rctx; | 1141 | int rctx; |
1141 | 1142 | ||
1143 | if (prog && !trace_call_bpf(prog, regs)) | ||
1144 | return; | ||
1145 | |||
1142 | head = this_cpu_ptr(call->perf_events); | 1146 | head = this_cpu_ptr(call->perf_events); |
1143 | if (hlist_empty(head)) | 1147 | if (hlist_empty(head)) |
1144 | return; | 1148 | return; |
@@ -1165,11 +1169,15 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, | |||
1165 | struct pt_regs *regs) | 1169 | struct pt_regs *regs) |
1166 | { | 1170 | { |
1167 | struct ftrace_event_call *call = &tk->tp.call; | 1171 | struct ftrace_event_call *call = &tk->tp.call; |
1172 | struct bpf_prog *prog = call->prog; | ||
1168 | struct kretprobe_trace_entry_head *entry; | 1173 | struct kretprobe_trace_entry_head *entry; |
1169 | struct hlist_head *head; | 1174 | struct hlist_head *head; |
1170 | int size, __size, dsize; | 1175 | int size, __size, dsize; |
1171 | int rctx; | 1176 | int rctx; |
1172 | 1177 | ||
1178 | if (prog && !trace_call_bpf(prog, regs)) | ||
1179 | return; | ||
1180 | |||
1173 | head = this_cpu_ptr(call->perf_events); | 1181 | head = this_cpu_ptr(call->perf_events); |
1174 | if (hlist_empty(head)) | 1182 | if (hlist_empty(head)) |
1175 | return; | 1183 | return; |