aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/bpf_trace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r--kernel/trace/bpf_trace.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 0ce99c379c30..40207c2a4113 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -343,14 +343,13 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
343 .arg4_type = ARG_CONST_SIZE, 343 .arg4_type = ARG_CONST_SIZE,
344}; 344};
345 345
346static DEFINE_PER_CPU(struct perf_sample_data, bpf_sd); 346static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
347 347
348static __always_inline u64 348static __always_inline u64
349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, 349__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
350 u64 flags, struct perf_raw_record *raw) 350 u64 flags, struct perf_sample_data *sd)
351{ 351{
352 struct bpf_array *array = container_of(map, struct bpf_array, map); 352 struct bpf_array *array = container_of(map, struct bpf_array, map);
353 struct perf_sample_data *sd = this_cpu_ptr(&bpf_sd);
354 unsigned int cpu = smp_processor_id(); 353 unsigned int cpu = smp_processor_id();
355 u64 index = flags & BPF_F_INDEX_MASK; 354 u64 index = flags & BPF_F_INDEX_MASK;
356 struct bpf_event_entry *ee; 355 struct bpf_event_entry *ee;
@@ -373,8 +372,6 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
373 if (unlikely(event->oncpu != cpu)) 372 if (unlikely(event->oncpu != cpu))
374 return -EOPNOTSUPP; 373 return -EOPNOTSUPP;
375 374
376 perf_sample_data_init(sd, 0, 0);
377 sd->raw = raw;
378 perf_event_output(event, sd, regs); 375 perf_event_output(event, sd, regs);
379 return 0; 376 return 0;
380} 377}
@@ -382,6 +379,7 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
382BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, 379BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
383 u64, flags, void *, data, u64, size) 380 u64, flags, void *, data, u64, size)
384{ 381{
382 struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd);
385 struct perf_raw_record raw = { 383 struct perf_raw_record raw = {
386 .frag = { 384 .frag = {
387 .size = size, 385 .size = size,
@@ -392,7 +390,10 @@ BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
392 if (unlikely(flags & ~(BPF_F_INDEX_MASK))) 390 if (unlikely(flags & ~(BPF_F_INDEX_MASK)))
393 return -EINVAL; 391 return -EINVAL;
394 392
395 return __bpf_perf_event_output(regs, map, flags, &raw); 393 perf_sample_data_init(sd, 0, 0);
394 sd->raw = &raw;
395
396 return __bpf_perf_event_output(regs, map, flags, sd);
396} 397}
397 398
398static const struct bpf_func_proto bpf_perf_event_output_proto = { 399static const struct bpf_func_proto bpf_perf_event_output_proto = {
@@ -407,10 +408,12 @@ static const struct bpf_func_proto bpf_perf_event_output_proto = {
407}; 408};
408 409
409static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs); 410static DEFINE_PER_CPU(struct pt_regs, bpf_pt_regs);
411static DEFINE_PER_CPU(struct perf_sample_data, bpf_misc_sd);
410 412
411u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, 413u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
412 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) 414 void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy)
413{ 415{
416 struct perf_sample_data *sd = this_cpu_ptr(&bpf_misc_sd);
414 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs); 417 struct pt_regs *regs = this_cpu_ptr(&bpf_pt_regs);
415 struct perf_raw_frag frag = { 418 struct perf_raw_frag frag = {
416 .copy = ctx_copy, 419 .copy = ctx_copy,
@@ -428,8 +431,10 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
428 }; 431 };
429 432
430 perf_fetch_caller_regs(regs); 433 perf_fetch_caller_regs(regs);
434 perf_sample_data_init(sd, 0, 0);
435 sd->raw = &raw;
431 436
432 return __bpf_perf_event_output(regs, map, flags, &raw); 437 return __bpf_perf_event_output(regs, map, flags, sd);
433} 438}
434 439
435BPF_CALL_0(bpf_get_current_task) 440BPF_CALL_0(bpf_get_current_task)