aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/examples
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/examples')
-rw-r--r--tools/perf/examples/bpf/augmented_raw_syscalls.c131
1 files changed, 131 insertions, 0 deletions
diff --git a/tools/perf/examples/bpf/augmented_raw_syscalls.c b/tools/perf/examples/bpf/augmented_raw_syscalls.c
new file mode 100644
index 000000000000..90a19336310b
--- /dev/null
+++ b/tools/perf/examples/bpf/augmented_raw_syscalls.c
@@ -0,0 +1,131 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Augment the raw_syscalls tracepoints with the contents of the pointer arguments.
4 *
5 * Test it with:
6 *
7 * perf trace -e tools/perf/examples/bpf/augmented_raw_syscalls.c cat /etc/passwd > /dev/null
8 *
9 * This exactly matches what is marshalled into the raw_syscall:sys_enter
10 * payload expected by the 'perf trace' beautifiers.
11 *
12 * For now it just uses the existing tracepoint augmentation code in 'perf
13 * trace', in the next csets we'll hook up these with the sys_enter/sys_exit
14 * code that will combine entry/exit in a strace like way.
15 */
16
17#include <stdio.h>
18#include <linux/socket.h>
19
20/* bpf-output associated map */
21struct bpf_map SEC("maps") __augmented_syscalls__ = {
22 .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
23 .key_size = sizeof(int),
24 .value_size = sizeof(u32),
25 .max_entries = __NR_CPUS__,
26};
27
28struct syscall_enter_args {
29 unsigned long long common_tp_fields;
30 long syscall_nr;
31 unsigned long args[6];
32};
33
34struct syscall_exit_args {
35 unsigned long long common_tp_fields;
36 long syscall_nr;
37 long ret;
38};
39
40struct augmented_filename {
41 unsigned int size;
42 int reserved;
43 char value[256];
44};
45
46#define SYS_OPEN 2
47#define SYS_OPENAT 257
48
49SEC("raw_syscalls:sys_enter")
50int sys_enter(struct syscall_enter_args *args)
51{
52 struct {
53 struct syscall_enter_args args;
54 struct augmented_filename filename;
55 } augmented_args;
56 unsigned int len = sizeof(augmented_args);
57 const void *filename_arg = NULL;
58
59 probe_read(&augmented_args.args, sizeof(augmented_args.args), args);
60 /*
61 * Yonghong and Edward Cree sayz:
62 *
63 * https://www.spinics.net/lists/netdev/msg531645.html
64 *
65 * >> R0=inv(id=0) R1=inv2 R6=ctx(id=0,off=0,imm=0) R7=inv64 R10=fp0,call_-1
66 * >> 10: (bf) r1 = r6
67 * >> 11: (07) r1 += 16
68 * >> 12: (05) goto pc+2
69 * >> 15: (79) r3 = *(u64 *)(r1 +0)
70 * >> dereference of modified ctx ptr R1 off=16 disallowed
71 * > Aha, we at least got a different error message this time.
72 * > And indeed llvm has done that optimisation, rather than the more obvious
73 * > 11: r3 = *(u64 *)(r1 +16)
74 * > because it wants to have lots of reads share a single insn. You may be able
75 * > to defeat that optimisation by adding compiler barriers, idk. Maybe someone
76 * > with llvm knowledge can figure out how to stop it (ideally, llvm would know
77 * > when it's generating for bpf backend and not do that). -O0? ¯\_(ツ)_/¯
78 *
79 * The optimization mostly likes below:
80 *
81 * br1:
82 * ...
83 * r1 += 16
84 * goto merge
85 * br2:
86 * ...
87 * r1 += 20
88 * goto merge
89 * merge:
90 * *(u64 *)(r1 + 0)
91 *
92 * The compiler tries to merge common loads. There is no easy way to
93 * stop this compiler optimization without turning off a lot of other
94 * optimizations. The easiest way is to add barriers:
95 *
96 * __asm__ __volatile__("": : :"memory")
97 *
98 * after the ctx memory access to prevent their down stream merging.
99 */
100 switch (augmented_args.args.syscall_nr) {
101 case SYS_OPEN: filename_arg = (const void *)args->args[0];
102 __asm__ __volatile__("": : :"memory");
103 break;
104 case SYS_OPENAT: filename_arg = (const void *)args->args[1];
105 break;
106 }
107
108 if (filename_arg != NULL) {
109 augmented_args.filename.reserved = 0;
110 augmented_args.filename.size = probe_read_str(&augmented_args.filename.value,
111 sizeof(augmented_args.filename.value),
112 filename_arg);
113 if (augmented_args.filename.size < sizeof(augmented_args.filename.value)) {
114 len -= sizeof(augmented_args.filename.value) - augmented_args.filename.size;
115 len &= sizeof(augmented_args.filename.value) - 1;
116 }
117 } else {
118 len = sizeof(augmented_args.args);
119 }
120
121 perf_event_output(args, &__augmented_syscalls__, BPF_F_CURRENT_CPU, &augmented_args, len);
122 return 0;
123}
124
125SEC("raw_syscalls:sys_exit")
126int sys_exit(struct syscall_exit_args *args)
127{
128 return 1; /* 0 as soon as we start copying data returned by the kernel, e.g. 'read' */
129}
130
131license(GPL);