aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2015-10-20 23:02:34 -0400
committerDavid S. Miller <davem@davemloft.net>2015-10-22 09:42:15 -0400
commita43eec304259a6c637f4014a6d4767159b6a3aa3 (patch)
treeaecaeb92ff5263f446b002793d89a2a211dc246b /kernel/trace/bpf_trace.c
parentfa128e6a148a0a58355bd6814c6283515bbd028a (diff)
bpf: introduce bpf_perf_event_output() helper
This helper is used to send raw data from eBPF program into special PERF_TYPE_SOFTWARE/PERF_COUNT_SW_BPF_OUTPUT perf_event. User space needs to perf_event_open() it (either for one or all cpus) and store FD into perf_event_array (similar to bpf_perf_event_read() helper) before eBPF program can send data into it. Today the programs triggered by kprobe collect the data and either store it into the maps or print it via bpf_trace_printk() where latter is the debug facility and not suitable to stream the data. This new helper replaces such bpf_trace_printk() usage and allows programs to have dedicated channel into user space for post-processing of the raw data collected. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c46
1 files changed, 46 insertions, 0 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0fe96c7c8803..47febbe7998e 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -215,6 +215,50 @@ const struct bpf_func_proto bpf_perf_event_read_proto = {
215 .arg2_type = ARG_ANYTHING, 215 .arg2_type = ARG_ANYTHING,
216}; 216};
217 217
218static u64 bpf_perf_event_output(u64 r1, u64 r2, u64 index, u64 r4, u64 size)
219{
220 struct pt_regs *regs = (struct pt_regs *) (long) r1;
221 struct bpf_map *map = (struct bpf_map *) (long) r2;
222 struct bpf_array *array = container_of(map, struct bpf_array, map);
223 void *data = (void *) (long) r4;
224 struct perf_sample_data sample_data;
225 struct perf_event *event;
226 struct perf_raw_record raw = {
227 .size = size,
228 .data = data,
229 };
230
231 if (unlikely(index >= array->map.max_entries))
232 return -E2BIG;
233
234 event = (struct perf_event *)array->ptrs[index];
235 if (unlikely(!event))
236 return -ENOENT;
237
238 if (unlikely(event->attr.type != PERF_TYPE_SOFTWARE ||
239 event->attr.config != PERF_COUNT_SW_BPF_OUTPUT))
240 return -EINVAL;
241
242 if (unlikely(event->oncpu != smp_processor_id()))
243 return -EOPNOTSUPP;
244
245 perf_sample_data_init(&sample_data, 0, 0);
246 sample_data.raw = &raw;
247 perf_event_output(event, &sample_data, regs);
248 return 0;
249}
250
251static const struct bpf_func_proto bpf_perf_event_output_proto = {
252 .func = bpf_perf_event_output,
253 .gpl_only = false,
254 .ret_type = RET_INTEGER,
255 .arg1_type = ARG_PTR_TO_CTX,
256 .arg2_type = ARG_CONST_MAP_PTR,
257 .arg3_type = ARG_ANYTHING,
258 .arg4_type = ARG_PTR_TO_STACK,
259 .arg5_type = ARG_CONST_STACK_SIZE,
260};
261
218static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id) 262static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func_id)
219{ 263{
220 switch (func_id) { 264 switch (func_id) {
@@ -242,6 +286,8 @@ static const struct bpf_func_proto *kprobe_prog_func_proto(enum bpf_func_id func
242 return &bpf_get_smp_processor_id_proto; 286 return &bpf_get_smp_processor_id_proto;
243 case BPF_FUNC_perf_event_read: 287 case BPF_FUNC_perf_event_read:
244 return &bpf_perf_event_read_proto; 288 return &bpf_perf_event_read_proto;
289 case BPF_FUNC_perf_event_output:
290 return &bpf_perf_event_output_proto;
245 default: 291 default:
246 return NULL; 292 return NULL;
247 } 293 }