diff options
author | Alexei Starovoitov <ast@plumgrid.com> | 2015-03-25 15:49:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-02 07:25:50 -0400 |
commit | 9c959c863f8217a2ff3d7c296e8223654d240569 (patch) | |
tree | 3e5367b2cb1c54fbe7028f554808b7359f053e19 | |
parent | d9847d310ab4003725e6ed1822682e24bd406908 (diff) |
tracing: Allow BPF programs to call bpf_trace_printk()
Debugging of BPF programs needs some form of printk from the
program, so let programs call limited trace_printk() with %d %u
%x %p modifiers only.
Similar to kernel modules, during program load verifier checks
whether program is calling bpf_trace_printk() and if so, kernel
allocates trace_printk buffers and emits big 'this is debug
only' banner.
Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Reviewed-by: Steven Rostedt <rostedt@goodmis.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: David S. Miller <davem@davemloft.net>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1427312966-8434-6-git-send-email-ast@plumgrid.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | include/uapi/linux/bpf.h | 1 | ||||
-rw-r--r-- | kernel/trace/bpf_trace.c | 78 |
2 files changed, 79 insertions, 0 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 238c6883877b..cc47ef41076a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
@@ -166,6 +166,7 @@ enum bpf_func_id { | |||
166 | BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ | 166 | BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ |
167 | BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ | 167 | BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ |
168 | BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ | 168 | BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ |
169 | BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */ | ||
169 | __BPF_FUNC_MAX_ID, | 170 | __BPF_FUNC_MAX_ID, |
170 | }; | 171 | }; |
171 | 172 | ||
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8f5787294971..2d56ce501632 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/bpf.h> | 10 | #include <linux/bpf.h> |
11 | #include <linux/filter.h> | 11 | #include <linux/filter.h> |
12 | #include <linux/uaccess.h> | 12 | #include <linux/uaccess.h> |
13 | #include <linux/ctype.h> | ||
13 | #include "trace.h" | 14 | #include "trace.h" |
14 | 15 | ||
15 | static DEFINE_PER_CPU(int, bpf_prog_active); | 16 | static DEFINE_PER_CPU(int, bpf_prog_active); |
@@ -90,6 +91,74 @@ static const struct bpf_func_proto bpf_ktime_get_ns_proto = { | |||
90 | .ret_type = RET_INTEGER, | 91 | .ret_type = RET_INTEGER, |
91 | }; | 92 | }; |
92 | 93 | ||
94 | /* | ||
95 | * limited trace_printk() | ||
96 | * only %d %u %x %ld %lu %lx %lld %llu %llx %p conversion specifiers allowed | ||
97 | */ | ||
98 | static u64 bpf_trace_printk(u64 r1, u64 fmt_size, u64 r3, u64 r4, u64 r5) | ||
99 | { | ||
100 | char *fmt = (char *) (long) r1; | ||
101 | int mod[3] = {}; | ||
102 | int fmt_cnt = 0; | ||
103 | int i; | ||
104 | |||
105 | /* | ||
106 | * bpf_check()->check_func_arg()->check_stack_boundary() | ||
107 | * guarantees that fmt points to bpf program stack, | ||
108 | * fmt_size bytes of it were initialized and fmt_size > 0 | ||
109 | */ | ||
110 | if (fmt[--fmt_size] != 0) | ||
111 | return -EINVAL; | ||
112 | |||
113 | /* check format string for allowed specifiers */ | ||
114 | for (i = 0; i < fmt_size; i++) { | ||
115 | if ((!isprint(fmt[i]) && !isspace(fmt[i])) || !isascii(fmt[i])) | ||
116 | return -EINVAL; | ||
117 | |||
118 | if (fmt[i] != '%') | ||
119 | continue; | ||
120 | |||
121 | if (fmt_cnt >= 3) | ||
122 | return -EINVAL; | ||
123 | |||
124 | /* fmt[i] != 0 && fmt[last] == 0, so we can access fmt[i + 1] */ | ||
125 | i++; | ||
126 | if (fmt[i] == 'l') { | ||
127 | mod[fmt_cnt]++; | ||
128 | i++; | ||
129 | } else if (fmt[i] == 'p') { | ||
130 | mod[fmt_cnt]++; | ||
131 | i++; | ||
132 | if (!isspace(fmt[i]) && !ispunct(fmt[i]) && fmt[i] != 0) | ||
133 | return -EINVAL; | ||
134 | fmt_cnt++; | ||
135 | continue; | ||
136 | } | ||
137 | |||
138 | if (fmt[i] == 'l') { | ||
139 | mod[fmt_cnt]++; | ||
140 | i++; | ||
141 | } | ||
142 | |||
143 | if (fmt[i] != 'd' && fmt[i] != 'u' && fmt[i] != 'x') | ||
144 | return -EINVAL; | ||
145 | fmt_cnt++; | ||
146 | } | ||
147 | |||
148 | return __trace_printk(1/* fake ip will not be printed */, fmt, | ||
149 | mod[0] == 2 ? r3 : mod[0] == 1 ? (long) r3 : (u32) r3, | ||
150 | mod[1] == 2 ? r4 : mod[1] == 1 ? (long) r4 : (u32) r4, | ||
151 | mod[2] == 2 ? r5 : mod[2] == 1 ? (long) r5 : (u32) r5); | ||
152 | } | ||
153 | |||
154 | static const struct bpf_func_proto bpf_trace_printk_proto = { | ||
155 | .func = bpf_trace_printk, | ||
156 | .gpl_only = true, | ||
157 | .ret_type = RET_INTEGER, | ||
158 | .arg1_type = ARG_PTR_TO_STACK, | ||
159 | .arg2_type = ARG_CONST_STACK_SIZE, | ||
160 | }; | ||
161 | |||
93 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) | 162 | static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) |
94 | { | 163 | { |
95 | switch (func_id) { | 164 | switch (func_id) { |
@@ -103,6 +172,15 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func | |||
103 | return &bpf_probe_read_proto; | 172 | return &bpf_probe_read_proto; |
104 | case BPF_FUNC_ktime_get_ns: | 173 | case BPF_FUNC_ktime_get_ns: |
105 | return &bpf_ktime_get_ns_proto; | 174 | return &bpf_ktime_get_ns_proto; |
175 | |||
176 | case BPF_FUNC_trace_printk: | ||
177 | /* | ||
178 | * this program might be calling bpf_trace_printk, | ||
179 | * so allocate per-cpu printk buffers | ||
180 | */ | ||
181 | trace_printk_init_buffers(); | ||
182 | |||
183 | return &bpf_trace_printk_proto; | ||
106 | default: | 184 | default: |
107 | return NULL; | 185 | return NULL; |
108 | } | 186 | } |