diff options
34 files changed, 4353 insertions, 391 deletions
diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 01603bc2eff1..b5e060edfc38 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt | |||
@@ -464,10 +464,11 @@ breakpoints: 0 1 | |||
464 | JIT compiler | 464 | JIT compiler |
465 | ------------ | 465 | ------------ |
466 | 466 | ||
467 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, PowerPC, | 467 | The Linux kernel has a built-in BPF JIT compiler for x86_64, SPARC, |
468 | ARM, ARM64, MIPS and s390 and can be enabled through CONFIG_BPF_JIT. The JIT | 468 | PowerPC, ARM, ARM64, MIPS, RISC-V and s390 and can be enabled through |
469 | compiler is transparently invoked for each attached filter from user space | 469 | CONFIG_BPF_JIT. The JIT compiler is transparently invoked for each |
470 | or for internal kernel users if it has been previously enabled by root: | 470 | attached filter from user space or for internal kernel users if it has |
471 | been previously enabled by root: | ||
471 | 472 | ||
472 | echo 1 > /proc/sys/net/core/bpf_jit_enable | 473 | echo 1 > /proc/sys/net/core/bpf_jit_enable |
473 | 474 | ||
@@ -603,9 +604,10 @@ got from bpf_prog_create(), and 'ctx' the given context (e.g. | |||
603 | skb pointer). All constraints and restrictions from bpf_check_classic() apply | 604 | skb pointer). All constraints and restrictions from bpf_check_classic() apply |
604 | before a conversion to the new layout is being done behind the scenes! | 605 | before a conversion to the new layout is being done behind the scenes! |
605 | 606 | ||
606 | Currently, the classic BPF format is being used for JITing on most 32-bit | 607 | Currently, the classic BPF format is being used for JITing on most |
607 | architectures, whereas x86-64, aarch64, s390x, powerpc64, sparc64, arm32 perform | 608 | 32-bit architectures, whereas x86-64, aarch64, s390x, powerpc64, |
608 | JIT compilation from eBPF instruction set. | 609 | sparc64, arm32, riscv (RV64G) perform JIT compilation from eBPF |
610 | instruction set. | ||
609 | 611 | ||
610 | Some core changes of the new internal format: | 612 | Some core changes of the new internal format: |
611 | 613 | ||
diff --git a/Documentation/sysctl/net.txt b/Documentation/sysctl/net.txt index bc0680706870..2ae91d3873bb 100644 --- a/Documentation/sysctl/net.txt +++ b/Documentation/sysctl/net.txt | |||
@@ -52,6 +52,7 @@ two flavors of JITs, the newer eBPF JIT currently supported on: | |||
52 | - sparc64 | 52 | - sparc64 |
53 | - mips64 | 53 | - mips64 |
54 | - s390x | 54 | - s390x |
55 | - riscv | ||
55 | 56 | ||
56 | And the older cBPF JIT supported on the following archs: | 57 | And the older cBPF JIT supported on the following archs: |
57 | - mips | 58 | - mips |
diff --git a/MAINTAINERS b/MAINTAINERS index 019a2bcfbd09..b4491132b9ce 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -2907,6 +2907,12 @@ L: netdev@vger.kernel.org | |||
2907 | S: Maintained | 2907 | S: Maintained |
2908 | F: arch/powerpc/net/ | 2908 | F: arch/powerpc/net/ |
2909 | 2909 | ||
2910 | BPF JIT for RISC-V (RV64G) | ||
2911 | M: Björn Töpel <bjorn.topel@gmail.com> | ||
2912 | L: netdev@vger.kernel.org | ||
2913 | S: Maintained | ||
2914 | F: arch/riscv/net/ | ||
2915 | |||
2910 | BPF JIT for S390 | 2916 | BPF JIT for S390 |
2911 | M: Martin Schwidefsky <schwidefsky@de.ibm.com> | 2917 | M: Martin Schwidefsky <schwidefsky@de.ibm.com> |
2912 | M: Heiko Carstens <heiko.carstens@de.ibm.com> | 2918 | M: Heiko Carstens <heiko.carstens@de.ibm.com> |
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index feeeaa60697c..e64c657060bb 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig | |||
@@ -49,6 +49,7 @@ config RISCV | |||
49 | select RISCV_TIMER | 49 | select RISCV_TIMER |
50 | select GENERIC_IRQ_MULTI_HANDLER | 50 | select GENERIC_IRQ_MULTI_HANDLER |
51 | select ARCH_HAS_PTE_SPECIAL | 51 | select ARCH_HAS_PTE_SPECIAL |
52 | select HAVE_EBPF_JIT if 64BIT | ||
52 | 53 | ||
53 | config MMU | 54 | config MMU |
54 | def_bool y | 55 | def_bool y |
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index 4b594f2e4f7e..c6342e638ef7 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile | |||
@@ -77,7 +77,7 @@ KBUILD_IMAGE := $(boot)/Image.gz | |||
77 | 77 | ||
78 | head-y := arch/riscv/kernel/head.o | 78 | head-y := arch/riscv/kernel/head.o |
79 | 79 | ||
80 | core-y += arch/riscv/kernel/ arch/riscv/mm/ | 80 | core-y += arch/riscv/kernel/ arch/riscv/mm/ arch/riscv/net/ |
81 | 81 | ||
82 | libs-y += arch/riscv/lib/ | 82 | libs-y += arch/riscv/lib/ |
83 | 83 | ||
diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile new file mode 100644 index 000000000000..a132220cc582 --- /dev/null +++ b/arch/riscv/net/Makefile | |||
@@ -0,0 +1 @@ | |||
obj-$(CONFIG_BPF_JIT) += bpf_jit_comp.o | |||
diff --git a/arch/riscv/net/bpf_jit_comp.c b/arch/riscv/net/bpf_jit_comp.c new file mode 100644 index 000000000000..80b12aa5e10d --- /dev/null +++ b/arch/riscv/net/bpf_jit_comp.c | |||
@@ -0,0 +1,1602 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* BPF JIT compiler for RV64G | ||
3 | * | ||
4 | * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com> | ||
5 | * | ||
6 | */ | ||
7 | |||
8 | #include <linux/bpf.h> | ||
9 | #include <linux/filter.h> | ||
10 | #include <asm/cacheflush.h> | ||
11 | |||
12 | enum { | ||
13 | RV_REG_ZERO = 0, /* The constant value 0 */ | ||
14 | RV_REG_RA = 1, /* Return address */ | ||
15 | RV_REG_SP = 2, /* Stack pointer */ | ||
16 | RV_REG_GP = 3, /* Global pointer */ | ||
17 | RV_REG_TP = 4, /* Thread pointer */ | ||
18 | RV_REG_T0 = 5, /* Temporaries */ | ||
19 | RV_REG_T1 = 6, | ||
20 | RV_REG_T2 = 7, | ||
21 | RV_REG_FP = 8, | ||
22 | RV_REG_S1 = 9, /* Saved registers */ | ||
23 | RV_REG_A0 = 10, /* Function argument/return values */ | ||
24 | RV_REG_A1 = 11, /* Function arguments */ | ||
25 | RV_REG_A2 = 12, | ||
26 | RV_REG_A3 = 13, | ||
27 | RV_REG_A4 = 14, | ||
28 | RV_REG_A5 = 15, | ||
29 | RV_REG_A6 = 16, | ||
30 | RV_REG_A7 = 17, | ||
31 | RV_REG_S2 = 18, /* Saved registers */ | ||
32 | RV_REG_S3 = 19, | ||
33 | RV_REG_S4 = 20, | ||
34 | RV_REG_S5 = 21, | ||
35 | RV_REG_S6 = 22, | ||
36 | RV_REG_S7 = 23, | ||
37 | RV_REG_S8 = 24, | ||
38 | RV_REG_S9 = 25, | ||
39 | RV_REG_S10 = 26, | ||
40 | RV_REG_S11 = 27, | ||
41 | RV_REG_T3 = 28, /* Temporaries */ | ||
42 | RV_REG_T4 = 29, | ||
43 | RV_REG_T5 = 30, | ||
44 | RV_REG_T6 = 31, | ||
45 | }; | ||
46 | |||
47 | #define RV_REG_TCC RV_REG_A6 | ||
48 | #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ | ||
49 | |||
50 | static const int regmap[] = { | ||
51 | [BPF_REG_0] = RV_REG_A5, | ||
52 | [BPF_REG_1] = RV_REG_A0, | ||
53 | [BPF_REG_2] = RV_REG_A1, | ||
54 | [BPF_REG_3] = RV_REG_A2, | ||
55 | [BPF_REG_4] = RV_REG_A3, | ||
56 | [BPF_REG_5] = RV_REG_A4, | ||
57 | [BPF_REG_6] = RV_REG_S1, | ||
58 | [BPF_REG_7] = RV_REG_S2, | ||
59 | [BPF_REG_8] = RV_REG_S3, | ||
60 | [BPF_REG_9] = RV_REG_S4, | ||
61 | [BPF_REG_FP] = RV_REG_S5, | ||
62 | [BPF_REG_AX] = RV_REG_T0, | ||
63 | }; | ||
64 | |||
65 | enum { | ||
66 | RV_CTX_F_SEEN_TAIL_CALL = 0, | ||
67 | RV_CTX_F_SEEN_CALL = RV_REG_RA, | ||
68 | RV_CTX_F_SEEN_S1 = RV_REG_S1, | ||
69 | RV_CTX_F_SEEN_S2 = RV_REG_S2, | ||
70 | RV_CTX_F_SEEN_S3 = RV_REG_S3, | ||
71 | RV_CTX_F_SEEN_S4 = RV_REG_S4, | ||
72 | RV_CTX_F_SEEN_S5 = RV_REG_S5, | ||
73 | RV_CTX_F_SEEN_S6 = RV_REG_S6, | ||
74 | }; | ||
75 | |||
76 | struct rv_jit_context { | ||
77 | struct bpf_prog *prog; | ||
78 | u32 *insns; /* RV insns */ | ||
79 | int ninsns; | ||
80 | int epilogue_offset; | ||
81 | int *offset; /* BPF to RV */ | ||
82 | unsigned long flags; | ||
83 | int stack_size; | ||
84 | }; | ||
85 | |||
86 | struct rv_jit_data { | ||
87 | struct bpf_binary_header *header; | ||
88 | u8 *image; | ||
89 | struct rv_jit_context ctx; | ||
90 | }; | ||
91 | |||
92 | static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx) | ||
93 | { | ||
94 | u8 reg = regmap[bpf_reg]; | ||
95 | |||
96 | switch (reg) { | ||
97 | case RV_CTX_F_SEEN_S1: | ||
98 | case RV_CTX_F_SEEN_S2: | ||
99 | case RV_CTX_F_SEEN_S3: | ||
100 | case RV_CTX_F_SEEN_S4: | ||
101 | case RV_CTX_F_SEEN_S5: | ||
102 | case RV_CTX_F_SEEN_S6: | ||
103 | __set_bit(reg, &ctx->flags); | ||
104 | } | ||
105 | return reg; | ||
106 | }; | ||
107 | |||
108 | static bool seen_reg(int reg, struct rv_jit_context *ctx) | ||
109 | { | ||
110 | switch (reg) { | ||
111 | case RV_CTX_F_SEEN_CALL: | ||
112 | case RV_CTX_F_SEEN_S1: | ||
113 | case RV_CTX_F_SEEN_S2: | ||
114 | case RV_CTX_F_SEEN_S3: | ||
115 | case RV_CTX_F_SEEN_S4: | ||
116 | case RV_CTX_F_SEEN_S5: | ||
117 | case RV_CTX_F_SEEN_S6: | ||
118 | return test_bit(reg, &ctx->flags); | ||
119 | } | ||
120 | return false; | ||
121 | } | ||
122 | |||
123 | static void mark_call(struct rv_jit_context *ctx) | ||
124 | { | ||
125 | __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); | ||
126 | } | ||
127 | |||
128 | static bool seen_call(struct rv_jit_context *ctx) | ||
129 | { | ||
130 | return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags); | ||
131 | } | ||
132 | |||
133 | static void mark_tail_call(struct rv_jit_context *ctx) | ||
134 | { | ||
135 | __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); | ||
136 | } | ||
137 | |||
138 | static bool seen_tail_call(struct rv_jit_context *ctx) | ||
139 | { | ||
140 | return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags); | ||
141 | } | ||
142 | |||
143 | static u8 rv_tail_call_reg(struct rv_jit_context *ctx) | ||
144 | { | ||
145 | mark_tail_call(ctx); | ||
146 | |||
147 | if (seen_call(ctx)) { | ||
148 | __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags); | ||
149 | return RV_REG_S6; | ||
150 | } | ||
151 | return RV_REG_A6; | ||
152 | } | ||
153 | |||
154 | static void emit(const u32 insn, struct rv_jit_context *ctx) | ||
155 | { | ||
156 | if (ctx->insns) | ||
157 | ctx->insns[ctx->ninsns] = insn; | ||
158 | |||
159 | ctx->ninsns++; | ||
160 | } | ||
161 | |||
162 | static u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd, u8 opcode) | ||
163 | { | ||
164 | return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | | ||
165 | (rd << 7) | opcode; | ||
166 | } | ||
167 | |||
168 | static u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode) | ||
169 | { | ||
170 | return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) | | ||
171 | opcode; | ||
172 | } | ||
173 | |||
174 | static u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode) | ||
175 | { | ||
176 | u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f; | ||
177 | |||
178 | return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | | ||
179 | (imm4_0 << 7) | opcode; | ||
180 | } | ||
181 | |||
182 | static u32 rv_sb_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode) | ||
183 | { | ||
184 | u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4); | ||
185 | u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10); | ||
186 | |||
187 | return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) | | ||
188 | (imm4_1 << 7) | opcode; | ||
189 | } | ||
190 | |||
191 | static u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode) | ||
192 | { | ||
193 | return (imm31_12 << 12) | (rd << 7) | opcode; | ||
194 | } | ||
195 | |||
196 | static u32 rv_uj_insn(u32 imm20_1, u8 rd, u8 opcode) | ||
197 | { | ||
198 | u32 imm; | ||
199 | |||
200 | imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) | | ||
201 | ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11); | ||
202 | |||
203 | return (imm << 12) | (rd << 7) | opcode; | ||
204 | } | ||
205 | |||
206 | static u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1, | ||
207 | u8 funct3, u8 rd, u8 opcode) | ||
208 | { | ||
209 | u8 funct7 = (funct5 << 2) | (aq << 1) | rl; | ||
210 | |||
211 | return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode); | ||
212 | } | ||
213 | |||
214 | static u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0) | ||
215 | { | ||
216 | return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b); | ||
217 | } | ||
218 | |||
219 | static u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0) | ||
220 | { | ||
221 | return rv_i_insn(imm11_0, rs1, 0, rd, 0x13); | ||
222 | } | ||
223 | |||
224 | static u32 rv_addw(u8 rd, u8 rs1, u8 rs2) | ||
225 | { | ||
226 | return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b); | ||
227 | } | ||
228 | |||
229 | static u32 rv_add(u8 rd, u8 rs1, u8 rs2) | ||
230 | { | ||
231 | return rv_r_insn(0, rs2, rs1, 0, rd, 0x33); | ||
232 | } | ||
233 | |||
234 | static u32 rv_subw(u8 rd, u8 rs1, u8 rs2) | ||
235 | { | ||
236 | return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b); | ||
237 | } | ||
238 | |||
239 | static u32 rv_sub(u8 rd, u8 rs1, u8 rs2) | ||
240 | { | ||
241 | return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33); | ||
242 | } | ||
243 | |||
244 | static u32 rv_and(u8 rd, u8 rs1, u8 rs2) | ||
245 | { | ||
246 | return rv_r_insn(0, rs2, rs1, 7, rd, 0x33); | ||
247 | } | ||
248 | |||
249 | static u32 rv_or(u8 rd, u8 rs1, u8 rs2) | ||
250 | { | ||
251 | return rv_r_insn(0, rs2, rs1, 6, rd, 0x33); | ||
252 | } | ||
253 | |||
254 | static u32 rv_xor(u8 rd, u8 rs1, u8 rs2) | ||
255 | { | ||
256 | return rv_r_insn(0, rs2, rs1, 4, rd, 0x33); | ||
257 | } | ||
258 | |||
259 | static u32 rv_mulw(u8 rd, u8 rs1, u8 rs2) | ||
260 | { | ||
261 | return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b); | ||
262 | } | ||
263 | |||
264 | static u32 rv_mul(u8 rd, u8 rs1, u8 rs2) | ||
265 | { | ||
266 | return rv_r_insn(1, rs2, rs1, 0, rd, 0x33); | ||
267 | } | ||
268 | |||
269 | static u32 rv_divuw(u8 rd, u8 rs1, u8 rs2) | ||
270 | { | ||
271 | return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b); | ||
272 | } | ||
273 | |||
274 | static u32 rv_divu(u8 rd, u8 rs1, u8 rs2) | ||
275 | { | ||
276 | return rv_r_insn(1, rs2, rs1, 5, rd, 0x33); | ||
277 | } | ||
278 | |||
279 | static u32 rv_remuw(u8 rd, u8 rs1, u8 rs2) | ||
280 | { | ||
281 | return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b); | ||
282 | } | ||
283 | |||
284 | static u32 rv_remu(u8 rd, u8 rs1, u8 rs2) | ||
285 | { | ||
286 | return rv_r_insn(1, rs2, rs1, 7, rd, 0x33); | ||
287 | } | ||
288 | |||
289 | static u32 rv_sllw(u8 rd, u8 rs1, u8 rs2) | ||
290 | { | ||
291 | return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b); | ||
292 | } | ||
293 | |||
294 | static u32 rv_sll(u8 rd, u8 rs1, u8 rs2) | ||
295 | { | ||
296 | return rv_r_insn(0, rs2, rs1, 1, rd, 0x33); | ||
297 | } | ||
298 | |||
299 | static u32 rv_srlw(u8 rd, u8 rs1, u8 rs2) | ||
300 | { | ||
301 | return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b); | ||
302 | } | ||
303 | |||
304 | static u32 rv_srl(u8 rd, u8 rs1, u8 rs2) | ||
305 | { | ||
306 | return rv_r_insn(0, rs2, rs1, 5, rd, 0x33); | ||
307 | } | ||
308 | |||
309 | static u32 rv_sraw(u8 rd, u8 rs1, u8 rs2) | ||
310 | { | ||
311 | return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b); | ||
312 | } | ||
313 | |||
314 | static u32 rv_sra(u8 rd, u8 rs1, u8 rs2) | ||
315 | { | ||
316 | return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33); | ||
317 | } | ||
318 | |||
319 | static u32 rv_lui(u8 rd, u32 imm31_12) | ||
320 | { | ||
321 | return rv_u_insn(imm31_12, rd, 0x37); | ||
322 | } | ||
323 | |||
324 | static u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0) | ||
325 | { | ||
326 | return rv_i_insn(imm11_0, rs1, 1, rd, 0x13); | ||
327 | } | ||
328 | |||
329 | static u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0) | ||
330 | { | ||
331 | return rv_i_insn(imm11_0, rs1, 7, rd, 0x13); | ||
332 | } | ||
333 | |||
334 | static u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0) | ||
335 | { | ||
336 | return rv_i_insn(imm11_0, rs1, 6, rd, 0x13); | ||
337 | } | ||
338 | |||
339 | static u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0) | ||
340 | { | ||
341 | return rv_i_insn(imm11_0, rs1, 4, rd, 0x13); | ||
342 | } | ||
343 | |||
344 | static u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0) | ||
345 | { | ||
346 | return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b); | ||
347 | } | ||
348 | |||
349 | static u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0) | ||
350 | { | ||
351 | return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b); | ||
352 | } | ||
353 | |||
354 | static u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0) | ||
355 | { | ||
356 | return rv_i_insn(imm11_0, rs1, 5, rd, 0x13); | ||
357 | } | ||
358 | |||
359 | static u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0) | ||
360 | { | ||
361 | return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b); | ||
362 | } | ||
363 | |||
364 | static u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0) | ||
365 | { | ||
366 | return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13); | ||
367 | } | ||
368 | |||
369 | static u32 rv_jal(u8 rd, u32 imm20_1) | ||
370 | { | ||
371 | return rv_uj_insn(imm20_1, rd, 0x6f); | ||
372 | } | ||
373 | |||
374 | static u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0) | ||
375 | { | ||
376 | return rv_i_insn(imm11_0, rs1, 0, rd, 0x67); | ||
377 | } | ||
378 | |||
379 | static u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1) | ||
380 | { | ||
381 | return rv_sb_insn(imm12_1, rs2, rs1, 0, 0x63); | ||
382 | } | ||
383 | |||
384 | static u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1) | ||
385 | { | ||
386 | return rv_sb_insn(imm12_1, rs2, rs1, 6, 0x63); | ||
387 | } | ||
388 | |||
389 | static u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1) | ||
390 | { | ||
391 | return rv_sb_insn(imm12_1, rs2, rs1, 7, 0x63); | ||
392 | } | ||
393 | |||
394 | static u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1) | ||
395 | { | ||
396 | return rv_sb_insn(imm12_1, rs2, rs1, 1, 0x63); | ||
397 | } | ||
398 | |||
399 | static u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1) | ||
400 | { | ||
401 | return rv_sb_insn(imm12_1, rs2, rs1, 4, 0x63); | ||
402 | } | ||
403 | |||
404 | static u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1) | ||
405 | { | ||
406 | return rv_sb_insn(imm12_1, rs2, rs1, 5, 0x63); | ||
407 | } | ||
408 | |||
409 | static u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2) | ||
410 | { | ||
411 | return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23); | ||
412 | } | ||
413 | |||
414 | static u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2) | ||
415 | { | ||
416 | return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23); | ||
417 | } | ||
418 | |||
419 | static u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2) | ||
420 | { | ||
421 | return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23); | ||
422 | } | ||
423 | |||
424 | static u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2) | ||
425 | { | ||
426 | return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23); | ||
427 | } | ||
428 | |||
429 | static u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1) | ||
430 | { | ||
431 | return rv_i_insn(imm11_0, rs1, 4, rd, 0x03); | ||
432 | } | ||
433 | |||
434 | static u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1) | ||
435 | { | ||
436 | return rv_i_insn(imm11_0, rs1, 5, rd, 0x03); | ||
437 | } | ||
438 | |||
439 | static u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1) | ||
440 | { | ||
441 | return rv_i_insn(imm11_0, rs1, 6, rd, 0x03); | ||
442 | } | ||
443 | |||
444 | static u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1) | ||
445 | { | ||
446 | return rv_i_insn(imm11_0, rs1, 3, rd, 0x03); | ||
447 | } | ||
448 | |||
449 | static u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) | ||
450 | { | ||
451 | return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f); | ||
452 | } | ||
453 | |||
454 | static u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl) | ||
455 | { | ||
456 | return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f); | ||
457 | } | ||
458 | |||
459 | static bool is_12b_int(s64 val) | ||
460 | { | ||
461 | return -(1 << 11) <= val && val < (1 << 11); | ||
462 | } | ||
463 | |||
464 | static bool is_13b_int(s64 val) | ||
465 | { | ||
466 | return -(1 << 12) <= val && val < (1 << 12); | ||
467 | } | ||
468 | |||
469 | static bool is_21b_int(s64 val) | ||
470 | { | ||
471 | return -(1L << 20) <= val && val < (1L << 20); | ||
472 | } | ||
473 | |||
474 | static bool is_32b_int(s64 val) | ||
475 | { | ||
476 | return -(1L << 31) <= val && val < (1L << 31); | ||
477 | } | ||
478 | |||
479 | static int is_12b_check(int off, int insn) | ||
480 | { | ||
481 | if (!is_12b_int(off)) { | ||
482 | pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", | ||
483 | insn, (int)off); | ||
484 | return -1; | ||
485 | } | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | static int is_13b_check(int off, int insn) | ||
490 | { | ||
491 | if (!is_13b_int(off)) { | ||
492 | pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", | ||
493 | insn, (int)off); | ||
494 | return -1; | ||
495 | } | ||
496 | return 0; | ||
497 | } | ||
498 | |||
499 | static int is_21b_check(int off, int insn) | ||
500 | { | ||
501 | if (!is_21b_int(off)) { | ||
502 | pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", | ||
503 | insn, (int)off); | ||
504 | return -1; | ||
505 | } | ||
506 | return 0; | ||
507 | } | ||
508 | |||
509 | static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx) | ||
510 | { | ||
511 | /* Note that the immediate from the add is sign-extended, | ||
512 | * which means that we need to compensate this by adding 2^12, | ||
513 | * when the 12th bit is set. A simpler way of doing this, and | ||
514 | * getting rid of the check, is to just add 2**11 before the | ||
515 | * shift. The "Loading a 32-Bit constant" example from the | ||
516 | * "Computer Organization and Design, RISC-V edition" book by | ||
517 | * Patterson/Hennessy highlights this fact. | ||
518 | * | ||
519 | * This also means that we need to process LSB to MSB. | ||
520 | */ | ||
521 | s64 upper = (val + (1 << 11)) >> 12, lower = val & 0xfff; | ||
522 | int shift; | ||
523 | |||
524 | if (is_32b_int(val)) { | ||
525 | if (upper) | ||
526 | emit(rv_lui(rd, upper), ctx); | ||
527 | |||
528 | if (!upper) { | ||
529 | emit(rv_addi(rd, RV_REG_ZERO, lower), ctx); | ||
530 | return; | ||
531 | } | ||
532 | |||
533 | emit(rv_addiw(rd, rd, lower), ctx); | ||
534 | return; | ||
535 | } | ||
536 | |||
537 | shift = __ffs(upper); | ||
538 | upper >>= shift; | ||
539 | shift += 12; | ||
540 | |||
541 | emit_imm(rd, upper, ctx); | ||
542 | |||
543 | emit(rv_slli(rd, rd, shift), ctx); | ||
544 | if (lower) | ||
545 | emit(rv_addi(rd, rd, lower), ctx); | ||
546 | } | ||
547 | |||
548 | static int rv_offset(int bpf_to, int bpf_from, struct rv_jit_context *ctx) | ||
549 | { | ||
550 | int from = ctx->offset[bpf_from] - 1, to = ctx->offset[bpf_to]; | ||
551 | |||
552 | return (to - from) << 2; | ||
553 | } | ||
554 | |||
555 | static int epilogue_offset(struct rv_jit_context *ctx) | ||
556 | { | ||
557 | int to = ctx->epilogue_offset, from = ctx->ninsns; | ||
558 | |||
559 | return (to - from) << 2; | ||
560 | } | ||
561 | |||
562 | static void __build_epilogue(u8 reg, struct rv_jit_context *ctx) | ||
563 | { | ||
564 | int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8; | ||
565 | |||
566 | if (seen_reg(RV_REG_RA, ctx)) { | ||
567 | emit(rv_ld(RV_REG_RA, store_offset, RV_REG_SP), ctx); | ||
568 | store_offset -= 8; | ||
569 | } | ||
570 | emit(rv_ld(RV_REG_FP, store_offset, RV_REG_SP), ctx); | ||
571 | store_offset -= 8; | ||
572 | if (seen_reg(RV_REG_S1, ctx)) { | ||
573 | emit(rv_ld(RV_REG_S1, store_offset, RV_REG_SP), ctx); | ||
574 | store_offset -= 8; | ||
575 | } | ||
576 | if (seen_reg(RV_REG_S2, ctx)) { | ||
577 | emit(rv_ld(RV_REG_S2, store_offset, RV_REG_SP), ctx); | ||
578 | store_offset -= 8; | ||
579 | } | ||
580 | if (seen_reg(RV_REG_S3, ctx)) { | ||
581 | emit(rv_ld(RV_REG_S3, store_offset, RV_REG_SP), ctx); | ||
582 | store_offset -= 8; | ||
583 | } | ||
584 | if (seen_reg(RV_REG_S4, ctx)) { | ||
585 | emit(rv_ld(RV_REG_S4, store_offset, RV_REG_SP), ctx); | ||
586 | store_offset -= 8; | ||
587 | } | ||
588 | if (seen_reg(RV_REG_S5, ctx)) { | ||
589 | emit(rv_ld(RV_REG_S5, store_offset, RV_REG_SP), ctx); | ||
590 | store_offset -= 8; | ||
591 | } | ||
592 | if (seen_reg(RV_REG_S6, ctx)) { | ||
593 | emit(rv_ld(RV_REG_S6, store_offset, RV_REG_SP), ctx); | ||
594 | store_offset -= 8; | ||
595 | } | ||
596 | |||
597 | emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx); | ||
598 | /* Set return value. */ | ||
599 | emit(rv_addi(RV_REG_A0, RV_REG_A5, 0), ctx); | ||
600 | emit(rv_jalr(RV_REG_ZERO, reg, 0), ctx); | ||
601 | } | ||
602 | |||
603 | static void emit_zext_32(u8 reg, struct rv_jit_context *ctx) | ||
604 | { | ||
605 | emit(rv_slli(reg, reg, 32), ctx); | ||
606 | emit(rv_srli(reg, reg, 32), ctx); | ||
607 | } | ||
608 | |||
609 | static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) | ||
610 | { | ||
611 | int tc_ninsn, off, start_insn = ctx->ninsns; | ||
612 | u8 tcc = rv_tail_call_reg(ctx); | ||
613 | |||
614 | /* a0: &ctx | ||
615 | * a1: &array | ||
616 | * a2: index | ||
617 | * | ||
618 | * if (index >= array->map.max_entries) | ||
619 | * goto out; | ||
620 | */ | ||
621 | tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] : | ||
622 | ctx->offset[0]; | ||
623 | emit_zext_32(RV_REG_A2, ctx); | ||
624 | |||
625 | off = offsetof(struct bpf_array, map.max_entries); | ||
626 | if (is_12b_check(off, insn)) | ||
627 | return -1; | ||
628 | emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx); | ||
629 | off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; | ||
630 | if (is_13b_check(off, insn)) | ||
631 | return -1; | ||
632 | emit(rv_bgeu(RV_REG_A2, RV_REG_T1, off >> 1), ctx); | ||
633 | |||
634 | /* if (--TCC < 0) | ||
635 | * goto out; | ||
636 | */ | ||
637 | emit(rv_addi(RV_REG_T1, tcc, -1), ctx); | ||
638 | off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; | ||
639 | if (is_13b_check(off, insn)) | ||
640 | return -1; | ||
641 | emit(rv_blt(RV_REG_T1, RV_REG_ZERO, off >> 1), ctx); | ||
642 | |||
643 | /* prog = array->ptrs[index]; | ||
644 | * if (!prog) | ||
645 | * goto out; | ||
646 | */ | ||
647 | emit(rv_slli(RV_REG_T2, RV_REG_A2, 3), ctx); | ||
648 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_A1), ctx); | ||
649 | off = offsetof(struct bpf_array, ptrs); | ||
650 | if (is_12b_check(off, insn)) | ||
651 | return -1; | ||
652 | emit(rv_ld(RV_REG_T2, off, RV_REG_T2), ctx); | ||
653 | off = (tc_ninsn - (ctx->ninsns - start_insn)) << 2; | ||
654 | if (is_13b_check(off, insn)) | ||
655 | return -1; | ||
656 | emit(rv_beq(RV_REG_T2, RV_REG_ZERO, off >> 1), ctx); | ||
657 | |||
658 | /* goto *(prog->bpf_func + 4); */ | ||
659 | off = offsetof(struct bpf_prog, bpf_func); | ||
660 | if (is_12b_check(off, insn)) | ||
661 | return -1; | ||
662 | emit(rv_ld(RV_REG_T3, off, RV_REG_T2), ctx); | ||
663 | emit(rv_addi(RV_REG_T3, RV_REG_T3, 4), ctx); | ||
664 | emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx); | ||
665 | __build_epilogue(RV_REG_T3, ctx); | ||
666 | return 0; | ||
667 | } | ||
668 | |||
669 | static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn, | ||
670 | struct rv_jit_context *ctx) | ||
671 | { | ||
672 | u8 code = insn->code; | ||
673 | |||
674 | switch (code) { | ||
675 | case BPF_JMP | BPF_JA: | ||
676 | case BPF_JMP | BPF_CALL: | ||
677 | case BPF_JMP | BPF_EXIT: | ||
678 | case BPF_JMP | BPF_TAIL_CALL: | ||
679 | break; | ||
680 | default: | ||
681 | *rd = bpf_to_rv_reg(insn->dst_reg, ctx); | ||
682 | } | ||
683 | |||
684 | if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) || | ||
685 | code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) || | ||
686 | code & BPF_LDX || code & BPF_STX) | ||
687 | *rs = bpf_to_rv_reg(insn->src_reg, ctx); | ||
688 | } | ||
689 | |||
690 | static int rv_offset_check(int *rvoff, s16 off, int insn, | ||
691 | struct rv_jit_context *ctx) | ||
692 | { | ||
693 | *rvoff = rv_offset(insn + off, insn, ctx); | ||
694 | return is_13b_check(*rvoff, insn); | ||
695 | } | ||
696 | |||
697 | static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) | ||
698 | { | ||
699 | emit(rv_addi(RV_REG_T2, *rd, 0), ctx); | ||
700 | emit_zext_32(RV_REG_T2, ctx); | ||
701 | emit(rv_addi(RV_REG_T1, *rs, 0), ctx); | ||
702 | emit_zext_32(RV_REG_T1, ctx); | ||
703 | *rd = RV_REG_T2; | ||
704 | *rs = RV_REG_T1; | ||
705 | } | ||
706 | |||
707 | static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx) | ||
708 | { | ||
709 | emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); | ||
710 | emit(rv_addiw(RV_REG_T1, *rs, 0), ctx); | ||
711 | *rd = RV_REG_T2; | ||
712 | *rs = RV_REG_T1; | ||
713 | } | ||
714 | |||
715 | static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx) | ||
716 | { | ||
717 | emit(rv_addi(RV_REG_T2, *rd, 0), ctx); | ||
718 | emit_zext_32(RV_REG_T2, ctx); | ||
719 | emit_zext_32(RV_REG_T1, ctx); | ||
720 | *rd = RV_REG_T2; | ||
721 | } | ||
722 | |||
723 | static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx) | ||
724 | { | ||
725 | emit(rv_addiw(RV_REG_T2, *rd, 0), ctx); | ||
726 | *rd = RV_REG_T2; | ||
727 | } | ||
728 | |||
729 | static int emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, | ||
730 | bool extra_pass) | ||
731 | { | ||
732 | bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 || | ||
733 | BPF_CLASS(insn->code) == BPF_JMP; | ||
734 | int rvoff, i = insn - ctx->prog->insnsi; | ||
735 | u8 rd = -1, rs = -1, code = insn->code; | ||
736 | s16 off = insn->off; | ||
737 | s32 imm = insn->imm; | ||
738 | |||
739 | init_regs(&rd, &rs, insn, ctx); | ||
740 | |||
741 | switch (code) { | ||
742 | /* dst = src */ | ||
743 | case BPF_ALU | BPF_MOV | BPF_X: | ||
744 | case BPF_ALU64 | BPF_MOV | BPF_X: | ||
745 | emit(is64 ? rv_addi(rd, rs, 0) : rv_addiw(rd, rs, 0), ctx); | ||
746 | if (!is64) | ||
747 | emit_zext_32(rd, ctx); | ||
748 | break; | ||
749 | |||
750 | /* dst = dst OP src */ | ||
751 | case BPF_ALU | BPF_ADD | BPF_X: | ||
752 | case BPF_ALU64 | BPF_ADD | BPF_X: | ||
753 | emit(is64 ? rv_add(rd, rd, rs) : rv_addw(rd, rd, rs), ctx); | ||
754 | break; | ||
755 | case BPF_ALU | BPF_SUB | BPF_X: | ||
756 | case BPF_ALU64 | BPF_SUB | BPF_X: | ||
757 | emit(is64 ? rv_sub(rd, rd, rs) : rv_subw(rd, rd, rs), ctx); | ||
758 | break; | ||
759 | case BPF_ALU | BPF_AND | BPF_X: | ||
760 | case BPF_ALU64 | BPF_AND | BPF_X: | ||
761 | emit(rv_and(rd, rd, rs), ctx); | ||
762 | break; | ||
763 | case BPF_ALU | BPF_OR | BPF_X: | ||
764 | case BPF_ALU64 | BPF_OR | BPF_X: | ||
765 | emit(rv_or(rd, rd, rs), ctx); | ||
766 | break; | ||
767 | case BPF_ALU | BPF_XOR | BPF_X: | ||
768 | case BPF_ALU64 | BPF_XOR | BPF_X: | ||
769 | emit(rv_xor(rd, rd, rs), ctx); | ||
770 | break; | ||
771 | case BPF_ALU | BPF_MUL | BPF_X: | ||
772 | case BPF_ALU64 | BPF_MUL | BPF_X: | ||
773 | emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx); | ||
774 | if (!is64) | ||
775 | emit_zext_32(rd, ctx); | ||
776 | break; | ||
777 | case BPF_ALU | BPF_DIV | BPF_X: | ||
778 | case BPF_ALU64 | BPF_DIV | BPF_X: | ||
779 | emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx); | ||
780 | if (!is64) | ||
781 | emit_zext_32(rd, ctx); | ||
782 | break; | ||
783 | case BPF_ALU | BPF_MOD | BPF_X: | ||
784 | case BPF_ALU64 | BPF_MOD | BPF_X: | ||
785 | emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx); | ||
786 | if (!is64) | ||
787 | emit_zext_32(rd, ctx); | ||
788 | break; | ||
789 | case BPF_ALU | BPF_LSH | BPF_X: | ||
790 | case BPF_ALU64 | BPF_LSH | BPF_X: | ||
791 | emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx); | ||
792 | break; | ||
793 | case BPF_ALU | BPF_RSH | BPF_X: | ||
794 | case BPF_ALU64 | BPF_RSH | BPF_X: | ||
795 | emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx); | ||
796 | break; | ||
797 | case BPF_ALU | BPF_ARSH | BPF_X: | ||
798 | case BPF_ALU64 | BPF_ARSH | BPF_X: | ||
799 | emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx); | ||
800 | break; | ||
801 | |||
802 | /* dst = -dst */ | ||
803 | case BPF_ALU | BPF_NEG: | ||
804 | case BPF_ALU64 | BPF_NEG: | ||
805 | emit(is64 ? rv_sub(rd, RV_REG_ZERO, rd) : | ||
806 | rv_subw(rd, RV_REG_ZERO, rd), ctx); | ||
807 | break; | ||
808 | |||
809 | /* dst = BSWAP##imm(dst) */ | ||
810 | case BPF_ALU | BPF_END | BPF_FROM_LE: | ||
811 | { | ||
812 | int shift = 64 - imm; | ||
813 | |||
814 | emit(rv_slli(rd, rd, shift), ctx); | ||
815 | emit(rv_srli(rd, rd, shift), ctx); | ||
816 | break; | ||
817 | } | ||
818 | case BPF_ALU | BPF_END | BPF_FROM_BE: | ||
819 | emit(rv_addi(RV_REG_T2, RV_REG_ZERO, 0), ctx); | ||
820 | |||
821 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
822 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
823 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
824 | emit(rv_srli(rd, rd, 8), ctx); | ||
825 | if (imm == 16) | ||
826 | goto out_be; | ||
827 | |||
828 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
829 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
830 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
831 | emit(rv_srli(rd, rd, 8), ctx); | ||
832 | |||
833 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
834 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
835 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
836 | emit(rv_srli(rd, rd, 8), ctx); | ||
837 | if (imm == 32) | ||
838 | goto out_be; | ||
839 | |||
840 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
841 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
842 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
843 | emit(rv_srli(rd, rd, 8), ctx); | ||
844 | |||
845 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
846 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
847 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
848 | emit(rv_srli(rd, rd, 8), ctx); | ||
849 | |||
850 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
851 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
852 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
853 | emit(rv_srli(rd, rd, 8), ctx); | ||
854 | |||
855 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
856 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
857 | emit(rv_slli(RV_REG_T2, RV_REG_T2, 8), ctx); | ||
858 | emit(rv_srli(rd, rd, 8), ctx); | ||
859 | out_be: | ||
860 | emit(rv_andi(RV_REG_T1, rd, 0xff), ctx); | ||
861 | emit(rv_add(RV_REG_T2, RV_REG_T2, RV_REG_T1), ctx); | ||
862 | |||
863 | emit(rv_addi(rd, RV_REG_T2, 0), ctx); | ||
864 | break; | ||
865 | |||
866 | /* dst = imm */ | ||
867 | case BPF_ALU | BPF_MOV | BPF_K: | ||
868 | case BPF_ALU64 | BPF_MOV | BPF_K: | ||
869 | emit_imm(rd, imm, ctx); | ||
870 | if (!is64) | ||
871 | emit_zext_32(rd, ctx); | ||
872 | break; | ||
873 | |||
874 | /* dst = dst OP imm */ | ||
875 | case BPF_ALU | BPF_ADD | BPF_K: | ||
876 | case BPF_ALU64 | BPF_ADD | BPF_K: | ||
877 | if (is_12b_int(imm)) { | ||
878 | emit(is64 ? rv_addi(rd, rd, imm) : | ||
879 | rv_addiw(rd, rd, imm), ctx); | ||
880 | } else { | ||
881 | emit_imm(RV_REG_T1, imm, ctx); | ||
882 | emit(is64 ? rv_add(rd, rd, RV_REG_T1) : | ||
883 | rv_addw(rd, rd, RV_REG_T1), ctx); | ||
884 | } | ||
885 | if (!is64) | ||
886 | emit_zext_32(rd, ctx); | ||
887 | break; | ||
888 | case BPF_ALU | BPF_SUB | BPF_K: | ||
889 | case BPF_ALU64 | BPF_SUB | BPF_K: | ||
890 | if (is_12b_int(-imm)) { | ||
891 | emit(is64 ? rv_addi(rd, rd, -imm) : | ||
892 | rv_addiw(rd, rd, -imm), ctx); | ||
893 | } else { | ||
894 | emit_imm(RV_REG_T1, imm, ctx); | ||
895 | emit(is64 ? rv_sub(rd, rd, RV_REG_T1) : | ||
896 | rv_subw(rd, rd, RV_REG_T1), ctx); | ||
897 | } | ||
898 | if (!is64) | ||
899 | emit_zext_32(rd, ctx); | ||
900 | break; | ||
901 | case BPF_ALU | BPF_AND | BPF_K: | ||
902 | case BPF_ALU64 | BPF_AND | BPF_K: | ||
903 | if (is_12b_int(imm)) { | ||
904 | emit(rv_andi(rd, rd, imm), ctx); | ||
905 | } else { | ||
906 | emit_imm(RV_REG_T1, imm, ctx); | ||
907 | emit(rv_and(rd, rd, RV_REG_T1), ctx); | ||
908 | } | ||
909 | if (!is64) | ||
910 | emit_zext_32(rd, ctx); | ||
911 | break; | ||
912 | case BPF_ALU | BPF_OR | BPF_K: | ||
913 | case BPF_ALU64 | BPF_OR | BPF_K: | ||
914 | if (is_12b_int(imm)) { | ||
915 | emit(rv_ori(rd, rd, imm), ctx); | ||
916 | } else { | ||
917 | emit_imm(RV_REG_T1, imm, ctx); | ||
918 | emit(rv_or(rd, rd, RV_REG_T1), ctx); | ||
919 | } | ||
920 | if (!is64) | ||
921 | emit_zext_32(rd, ctx); | ||
922 | break; | ||
923 | case BPF_ALU | BPF_XOR | BPF_K: | ||
924 | case BPF_ALU64 | BPF_XOR | BPF_K: | ||
925 | if (is_12b_int(imm)) { | ||
926 | emit(rv_xori(rd, rd, imm), ctx); | ||
927 | } else { | ||
928 | emit_imm(RV_REG_T1, imm, ctx); | ||
929 | emit(rv_xor(rd, rd, RV_REG_T1), ctx); | ||
930 | } | ||
931 | if (!is64) | ||
932 | emit_zext_32(rd, ctx); | ||
933 | break; | ||
934 | case BPF_ALU | BPF_MUL | BPF_K: | ||
935 | case BPF_ALU64 | BPF_MUL | BPF_K: | ||
936 | emit_imm(RV_REG_T1, imm, ctx); | ||
937 | emit(is64 ? rv_mul(rd, rd, RV_REG_T1) : | ||
938 | rv_mulw(rd, rd, RV_REG_T1), ctx); | ||
939 | if (!is64) | ||
940 | emit_zext_32(rd, ctx); | ||
941 | break; | ||
942 | case BPF_ALU | BPF_DIV | BPF_K: | ||
943 | case BPF_ALU64 | BPF_DIV | BPF_K: | ||
944 | emit_imm(RV_REG_T1, imm, ctx); | ||
945 | emit(is64 ? rv_divu(rd, rd, RV_REG_T1) : | ||
946 | rv_divuw(rd, rd, RV_REG_T1), ctx); | ||
947 | if (!is64) | ||
948 | emit_zext_32(rd, ctx); | ||
949 | break; | ||
950 | case BPF_ALU | BPF_MOD | BPF_K: | ||
951 | case BPF_ALU64 | BPF_MOD | BPF_K: | ||
952 | emit_imm(RV_REG_T1, imm, ctx); | ||
953 | emit(is64 ? rv_remu(rd, rd, RV_REG_T1) : | ||
954 | rv_remuw(rd, rd, RV_REG_T1), ctx); | ||
955 | if (!is64) | ||
956 | emit_zext_32(rd, ctx); | ||
957 | break; | ||
958 | case BPF_ALU | BPF_LSH | BPF_K: | ||
959 | case BPF_ALU64 | BPF_LSH | BPF_K: | ||
960 | emit(is64 ? rv_slli(rd, rd, imm) : rv_slliw(rd, rd, imm), ctx); | ||
961 | break; | ||
962 | case BPF_ALU | BPF_RSH | BPF_K: | ||
963 | case BPF_ALU64 | BPF_RSH | BPF_K: | ||
964 | emit(is64 ? rv_srli(rd, rd, imm) : rv_srliw(rd, rd, imm), ctx); | ||
965 | break; | ||
966 | case BPF_ALU | BPF_ARSH | BPF_K: | ||
967 | case BPF_ALU64 | BPF_ARSH | BPF_K: | ||
968 | emit(is64 ? rv_srai(rd, rd, imm) : rv_sraiw(rd, rd, imm), ctx); | ||
969 | break; | ||
970 | |||
971 | /* JUMP off */ | ||
972 | case BPF_JMP | BPF_JA: | ||
973 | rvoff = rv_offset(i + off, i, ctx); | ||
974 | if (!is_21b_int(rvoff)) { | ||
975 | pr_err("bpf-jit: insn=%d offset=%d not supported yet!\n", | ||
976 | i, rvoff); | ||
977 | return -1; | ||
978 | } | ||
979 | |||
980 | emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); | ||
981 | break; | ||
982 | |||
983 | /* IF (dst COND src) JUMP off */ | ||
984 | case BPF_JMP | BPF_JEQ | BPF_X: | ||
985 | case BPF_JMP32 | BPF_JEQ | BPF_X: | ||
986 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
987 | return -1; | ||
988 | if (!is64) | ||
989 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
990 | emit(rv_beq(rd, rs, rvoff >> 1), ctx); | ||
991 | break; | ||
992 | case BPF_JMP | BPF_JGT | BPF_X: | ||
993 | case BPF_JMP32 | BPF_JGT | BPF_X: | ||
994 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
995 | return -1; | ||
996 | if (!is64) | ||
997 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
998 | emit(rv_bltu(rs, rd, rvoff >> 1), ctx); | ||
999 | break; | ||
1000 | case BPF_JMP | BPF_JLT | BPF_X: | ||
1001 | case BPF_JMP32 | BPF_JLT | BPF_X: | ||
1002 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1003 | return -1; | ||
1004 | if (!is64) | ||
1005 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
1006 | emit(rv_bltu(rd, rs, rvoff >> 1), ctx); | ||
1007 | break; | ||
1008 | case BPF_JMP | BPF_JGE | BPF_X: | ||
1009 | case BPF_JMP32 | BPF_JGE | BPF_X: | ||
1010 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1011 | return -1; | ||
1012 | if (!is64) | ||
1013 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
1014 | emit(rv_bgeu(rd, rs, rvoff >> 1), ctx); | ||
1015 | break; | ||
1016 | case BPF_JMP | BPF_JLE | BPF_X: | ||
1017 | case BPF_JMP32 | BPF_JLE | BPF_X: | ||
1018 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1019 | return -1; | ||
1020 | if (!is64) | ||
1021 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
1022 | emit(rv_bgeu(rs, rd, rvoff >> 1), ctx); | ||
1023 | break; | ||
1024 | case BPF_JMP | BPF_JNE | BPF_X: | ||
1025 | case BPF_JMP32 | BPF_JNE | BPF_X: | ||
1026 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1027 | return -1; | ||
1028 | if (!is64) | ||
1029 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
1030 | emit(rv_bne(rd, rs, rvoff >> 1), ctx); | ||
1031 | break; | ||
1032 | case BPF_JMP | BPF_JSGT | BPF_X: | ||
1033 | case BPF_JMP32 | BPF_JSGT | BPF_X: | ||
1034 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1035 | return -1; | ||
1036 | if (!is64) | ||
1037 | emit_sext_32_rd_rs(&rd, &rs, ctx); | ||
1038 | emit(rv_blt(rs, rd, rvoff >> 1), ctx); | ||
1039 | break; | ||
1040 | case BPF_JMP | BPF_JSLT | BPF_X: | ||
1041 | case BPF_JMP32 | BPF_JSLT | BPF_X: | ||
1042 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1043 | return -1; | ||
1044 | if (!is64) | ||
1045 | emit_sext_32_rd_rs(&rd, &rs, ctx); | ||
1046 | emit(rv_blt(rd, rs, rvoff >> 1), ctx); | ||
1047 | break; | ||
1048 | case BPF_JMP | BPF_JSGE | BPF_X: | ||
1049 | case BPF_JMP32 | BPF_JSGE | BPF_X: | ||
1050 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1051 | return -1; | ||
1052 | if (!is64) | ||
1053 | emit_sext_32_rd_rs(&rd, &rs, ctx); | ||
1054 | emit(rv_bge(rd, rs, rvoff >> 1), ctx); | ||
1055 | break; | ||
1056 | case BPF_JMP | BPF_JSLE | BPF_X: | ||
1057 | case BPF_JMP32 | BPF_JSLE | BPF_X: | ||
1058 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1059 | return -1; | ||
1060 | if (!is64) | ||
1061 | emit_sext_32_rd_rs(&rd, &rs, ctx); | ||
1062 | emit(rv_bge(rs, rd, rvoff >> 1), ctx); | ||
1063 | break; | ||
1064 | case BPF_JMP | BPF_JSET | BPF_X: | ||
1065 | case BPF_JMP32 | BPF_JSET | BPF_X: | ||
1066 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1067 | return -1; | ||
1068 | if (!is64) | ||
1069 | emit_zext_32_rd_rs(&rd, &rs, ctx); | ||
1070 | emit(rv_and(RV_REG_T1, rd, rs), ctx); | ||
1071 | emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); | ||
1072 | break; | ||
1073 | |||
1074 | /* IF (dst COND imm) JUMP off */ | ||
1075 | case BPF_JMP | BPF_JEQ | BPF_K: | ||
1076 | case BPF_JMP32 | BPF_JEQ | BPF_K: | ||
1077 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1078 | return -1; | ||
1079 | emit_imm(RV_REG_T1, imm, ctx); | ||
1080 | if (!is64) | ||
1081 | emit_zext_32_rd_t1(&rd, ctx); | ||
1082 | emit(rv_beq(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1083 | break; | ||
1084 | case BPF_JMP | BPF_JGT | BPF_K: | ||
1085 | case BPF_JMP32 | BPF_JGT | BPF_K: | ||
1086 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1087 | return -1; | ||
1088 | emit_imm(RV_REG_T1, imm, ctx); | ||
1089 | if (!is64) | ||
1090 | emit_zext_32_rd_t1(&rd, ctx); | ||
1091 | emit(rv_bltu(RV_REG_T1, rd, rvoff >> 1), ctx); | ||
1092 | break; | ||
1093 | case BPF_JMP | BPF_JLT | BPF_K: | ||
1094 | case BPF_JMP32 | BPF_JLT | BPF_K: | ||
1095 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1096 | return -1; | ||
1097 | emit_imm(RV_REG_T1, imm, ctx); | ||
1098 | if (!is64) | ||
1099 | emit_zext_32_rd_t1(&rd, ctx); | ||
1100 | emit(rv_bltu(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1101 | break; | ||
1102 | case BPF_JMP | BPF_JGE | BPF_K: | ||
1103 | case BPF_JMP32 | BPF_JGE | BPF_K: | ||
1104 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1105 | return -1; | ||
1106 | emit_imm(RV_REG_T1, imm, ctx); | ||
1107 | if (!is64) | ||
1108 | emit_zext_32_rd_t1(&rd, ctx); | ||
1109 | emit(rv_bgeu(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1110 | break; | ||
1111 | case BPF_JMP | BPF_JLE | BPF_K: | ||
1112 | case BPF_JMP32 | BPF_JLE | BPF_K: | ||
1113 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1114 | return -1; | ||
1115 | emit_imm(RV_REG_T1, imm, ctx); | ||
1116 | if (!is64) | ||
1117 | emit_zext_32_rd_t1(&rd, ctx); | ||
1118 | emit(rv_bgeu(RV_REG_T1, rd, rvoff >> 1), ctx); | ||
1119 | break; | ||
1120 | case BPF_JMP | BPF_JNE | BPF_K: | ||
1121 | case BPF_JMP32 | BPF_JNE | BPF_K: | ||
1122 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1123 | return -1; | ||
1124 | emit_imm(RV_REG_T1, imm, ctx); | ||
1125 | if (!is64) | ||
1126 | emit_zext_32_rd_t1(&rd, ctx); | ||
1127 | emit(rv_bne(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1128 | break; | ||
1129 | case BPF_JMP | BPF_JSGT | BPF_K: | ||
1130 | case BPF_JMP32 | BPF_JSGT | BPF_K: | ||
1131 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1132 | return -1; | ||
1133 | emit_imm(RV_REG_T1, imm, ctx); | ||
1134 | if (!is64) | ||
1135 | emit_sext_32_rd(&rd, ctx); | ||
1136 | emit(rv_blt(RV_REG_T1, rd, rvoff >> 1), ctx); | ||
1137 | break; | ||
1138 | case BPF_JMP | BPF_JSLT | BPF_K: | ||
1139 | case BPF_JMP32 | BPF_JSLT | BPF_K: | ||
1140 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1141 | return -1; | ||
1142 | emit_imm(RV_REG_T1, imm, ctx); | ||
1143 | if (!is64) | ||
1144 | emit_sext_32_rd(&rd, ctx); | ||
1145 | emit(rv_blt(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1146 | break; | ||
1147 | case BPF_JMP | BPF_JSGE | BPF_K: | ||
1148 | case BPF_JMP32 | BPF_JSGE | BPF_K: | ||
1149 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1150 | return -1; | ||
1151 | emit_imm(RV_REG_T1, imm, ctx); | ||
1152 | if (!is64) | ||
1153 | emit_sext_32_rd(&rd, ctx); | ||
1154 | emit(rv_bge(rd, RV_REG_T1, rvoff >> 1), ctx); | ||
1155 | break; | ||
1156 | case BPF_JMP | BPF_JSLE | BPF_K: | ||
1157 | case BPF_JMP32 | BPF_JSLE | BPF_K: | ||
1158 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1159 | return -1; | ||
1160 | emit_imm(RV_REG_T1, imm, ctx); | ||
1161 | if (!is64) | ||
1162 | emit_sext_32_rd(&rd, ctx); | ||
1163 | emit(rv_bge(RV_REG_T1, rd, rvoff >> 1), ctx); | ||
1164 | break; | ||
1165 | case BPF_JMP | BPF_JSET | BPF_K: | ||
1166 | case BPF_JMP32 | BPF_JSET | BPF_K: | ||
1167 | if (rv_offset_check(&rvoff, off, i, ctx)) | ||
1168 | return -1; | ||
1169 | emit_imm(RV_REG_T1, imm, ctx); | ||
1170 | if (!is64) | ||
1171 | emit_zext_32_rd_t1(&rd, ctx); | ||
1172 | emit(rv_and(RV_REG_T1, rd, RV_REG_T1), ctx); | ||
1173 | emit(rv_bne(RV_REG_T1, RV_REG_ZERO, rvoff >> 1), ctx); | ||
1174 | break; | ||
1175 | |||
1176 | /* function call */ | ||
1177 | case BPF_JMP | BPF_CALL: | ||
1178 | { | ||
1179 | bool fixed; | ||
1180 | int i, ret; | ||
1181 | u64 addr; | ||
1182 | |||
1183 | mark_call(ctx); | ||
1184 | ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr, | ||
1185 | &fixed); | ||
1186 | if (ret < 0) | ||
1187 | return ret; | ||
1188 | if (fixed) { | ||
1189 | emit_imm(RV_REG_T1, addr, ctx); | ||
1190 | } else { | ||
1191 | i = ctx->ninsns; | ||
1192 | emit_imm(RV_REG_T1, addr, ctx); | ||
1193 | for (i = ctx->ninsns - i; i < 8; i++) { | ||
1194 | /* nop */ | ||
1195 | emit(rv_addi(RV_REG_ZERO, RV_REG_ZERO, 0), | ||
1196 | ctx); | ||
1197 | } | ||
1198 | } | ||
1199 | emit(rv_jalr(RV_REG_RA, RV_REG_T1, 0), ctx); | ||
1200 | rd = bpf_to_rv_reg(BPF_REG_0, ctx); | ||
1201 | emit(rv_addi(rd, RV_REG_A0, 0), ctx); | ||
1202 | break; | ||
1203 | } | ||
1204 | /* tail call */ | ||
1205 | case BPF_JMP | BPF_TAIL_CALL: | ||
1206 | if (emit_bpf_tail_call(i, ctx)) | ||
1207 | return -1; | ||
1208 | break; | ||
1209 | |||
1210 | /* function return */ | ||
1211 | case BPF_JMP | BPF_EXIT: | ||
1212 | if (i == ctx->prog->len - 1) | ||
1213 | break; | ||
1214 | |||
1215 | rvoff = epilogue_offset(ctx); | ||
1216 | if (is_21b_check(rvoff, i)) | ||
1217 | return -1; | ||
1218 | emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx); | ||
1219 | break; | ||
1220 | |||
1221 | /* dst = imm64 */ | ||
1222 | case BPF_LD | BPF_IMM | BPF_DW: | ||
1223 | { | ||
1224 | struct bpf_insn insn1 = insn[1]; | ||
1225 | u64 imm64; | ||
1226 | |||
1227 | imm64 = (u64)insn1.imm << 32 | (u32)imm; | ||
1228 | emit_imm(rd, imm64, ctx); | ||
1229 | return 1; | ||
1230 | } | ||
1231 | |||
1232 | /* LDX: dst = *(size *)(src + off) */ | ||
1233 | case BPF_LDX | BPF_MEM | BPF_B: | ||
1234 | if (is_12b_int(off)) { | ||
1235 | emit(rv_lbu(rd, off, rs), ctx); | ||
1236 | break; | ||
1237 | } | ||
1238 | |||
1239 | emit_imm(RV_REG_T1, off, ctx); | ||
1240 | emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); | ||
1241 | emit(rv_lbu(rd, 0, RV_REG_T1), ctx); | ||
1242 | break; | ||
1243 | case BPF_LDX | BPF_MEM | BPF_H: | ||
1244 | if (is_12b_int(off)) { | ||
1245 | emit(rv_lhu(rd, off, rs), ctx); | ||
1246 | break; | ||
1247 | } | ||
1248 | |||
1249 | emit_imm(RV_REG_T1, off, ctx); | ||
1250 | emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); | ||
1251 | emit(rv_lhu(rd, 0, RV_REG_T1), ctx); | ||
1252 | break; | ||
1253 | case BPF_LDX | BPF_MEM | BPF_W: | ||
1254 | if (is_12b_int(off)) { | ||
1255 | emit(rv_lwu(rd, off, rs), ctx); | ||
1256 | break; | ||
1257 | } | ||
1258 | |||
1259 | emit_imm(RV_REG_T1, off, ctx); | ||
1260 | emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); | ||
1261 | emit(rv_lwu(rd, 0, RV_REG_T1), ctx); | ||
1262 | break; | ||
1263 | case BPF_LDX | BPF_MEM | BPF_DW: | ||
1264 | if (is_12b_int(off)) { | ||
1265 | emit(rv_ld(rd, off, rs), ctx); | ||
1266 | break; | ||
1267 | } | ||
1268 | |||
1269 | emit_imm(RV_REG_T1, off, ctx); | ||
1270 | emit(rv_add(RV_REG_T1, RV_REG_T1, rs), ctx); | ||
1271 | emit(rv_ld(rd, 0, RV_REG_T1), ctx); | ||
1272 | break; | ||
1273 | |||
1274 | /* ST: *(size *)(dst + off) = imm */ | ||
1275 | case BPF_ST | BPF_MEM | BPF_B: | ||
1276 | emit_imm(RV_REG_T1, imm, ctx); | ||
1277 | if (is_12b_int(off)) { | ||
1278 | emit(rv_sb(rd, off, RV_REG_T1), ctx); | ||
1279 | break; | ||
1280 | } | ||
1281 | |||
1282 | emit_imm(RV_REG_T2, off, ctx); | ||
1283 | emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); | ||
1284 | emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx); | ||
1285 | break; | ||
1286 | |||
1287 | case BPF_ST | BPF_MEM | BPF_H: | ||
1288 | emit_imm(RV_REG_T1, imm, ctx); | ||
1289 | if (is_12b_int(off)) { | ||
1290 | emit(rv_sh(rd, off, RV_REG_T1), ctx); | ||
1291 | break; | ||
1292 | } | ||
1293 | |||
1294 | emit_imm(RV_REG_T2, off, ctx); | ||
1295 | emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); | ||
1296 | emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx); | ||
1297 | break; | ||
1298 | case BPF_ST | BPF_MEM | BPF_W: | ||
1299 | emit_imm(RV_REG_T1, imm, ctx); | ||
1300 | if (is_12b_int(off)) { | ||
1301 | emit(rv_sw(rd, off, RV_REG_T1), ctx); | ||
1302 | break; | ||
1303 | } | ||
1304 | |||
1305 | emit_imm(RV_REG_T2, off, ctx); | ||
1306 | emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); | ||
1307 | emit(rv_sw(RV_REG_T2, 0, RV_REG_T1), ctx); | ||
1308 | break; | ||
1309 | case BPF_ST | BPF_MEM | BPF_DW: | ||
1310 | emit_imm(RV_REG_T1, imm, ctx); | ||
1311 | if (is_12b_int(off)) { | ||
1312 | emit(rv_sd(rd, off, RV_REG_T1), ctx); | ||
1313 | break; | ||
1314 | } | ||
1315 | |||
1316 | emit_imm(RV_REG_T2, off, ctx); | ||
1317 | emit(rv_add(RV_REG_T2, RV_REG_T2, rd), ctx); | ||
1318 | emit(rv_sd(RV_REG_T2, 0, RV_REG_T1), ctx); | ||
1319 | break; | ||
1320 | |||
1321 | /* STX: *(size *)(dst + off) = src */ | ||
1322 | case BPF_STX | BPF_MEM | BPF_B: | ||
1323 | if (is_12b_int(off)) { | ||
1324 | emit(rv_sb(rd, off, rs), ctx); | ||
1325 | break; | ||
1326 | } | ||
1327 | |||
1328 | emit_imm(RV_REG_T1, off, ctx); | ||
1329 | emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); | ||
1330 | emit(rv_sb(RV_REG_T1, 0, rs), ctx); | ||
1331 | break; | ||
1332 | case BPF_STX | BPF_MEM | BPF_H: | ||
1333 | if (is_12b_int(off)) { | ||
1334 | emit(rv_sh(rd, off, rs), ctx); | ||
1335 | break; | ||
1336 | } | ||
1337 | |||
1338 | emit_imm(RV_REG_T1, off, ctx); | ||
1339 | emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); | ||
1340 | emit(rv_sh(RV_REG_T1, 0, rs), ctx); | ||
1341 | break; | ||
1342 | case BPF_STX | BPF_MEM | BPF_W: | ||
1343 | if (is_12b_int(off)) { | ||
1344 | emit(rv_sw(rd, off, rs), ctx); | ||
1345 | break; | ||
1346 | } | ||
1347 | |||
1348 | emit_imm(RV_REG_T1, off, ctx); | ||
1349 | emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); | ||
1350 | emit(rv_sw(RV_REG_T1, 0, rs), ctx); | ||
1351 | break; | ||
1352 | case BPF_STX | BPF_MEM | BPF_DW: | ||
1353 | if (is_12b_int(off)) { | ||
1354 | emit(rv_sd(rd, off, rs), ctx); | ||
1355 | break; | ||
1356 | } | ||
1357 | |||
1358 | emit_imm(RV_REG_T1, off, ctx); | ||
1359 | emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); | ||
1360 | emit(rv_sd(RV_REG_T1, 0, rs), ctx); | ||
1361 | break; | ||
1362 | /* STX XADD: lock *(u32 *)(dst + off) += src */ | ||
1363 | case BPF_STX | BPF_XADD | BPF_W: | ||
1364 | /* STX XADD: lock *(u64 *)(dst + off) += src */ | ||
1365 | case BPF_STX | BPF_XADD | BPF_DW: | ||
1366 | if (off) { | ||
1367 | if (is_12b_int(off)) { | ||
1368 | emit(rv_addi(RV_REG_T1, rd, off), ctx); | ||
1369 | } else { | ||
1370 | emit_imm(RV_REG_T1, off, ctx); | ||
1371 | emit(rv_add(RV_REG_T1, RV_REG_T1, rd), ctx); | ||
1372 | } | ||
1373 | |||
1374 | rd = RV_REG_T1; | ||
1375 | } | ||
1376 | |||
1377 | emit(BPF_SIZE(code) == BPF_W ? | ||
1378 | rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) : | ||
1379 | rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx); | ||
1380 | break; | ||
1381 | default: | ||
1382 | pr_err("bpf-jit: unknown opcode %02x\n", code); | ||
1383 | return -EINVAL; | ||
1384 | } | ||
1385 | |||
1386 | return 0; | ||
1387 | } | ||
1388 | |||
1389 | static void build_prologue(struct rv_jit_context *ctx) | ||
1390 | { | ||
1391 | int stack_adjust = 0, store_offset, bpf_stack_adjust; | ||
1392 | |||
1393 | if (seen_reg(RV_REG_RA, ctx)) | ||
1394 | stack_adjust += 8; | ||
1395 | stack_adjust += 8; /* RV_REG_FP */ | ||
1396 | if (seen_reg(RV_REG_S1, ctx)) | ||
1397 | stack_adjust += 8; | ||
1398 | if (seen_reg(RV_REG_S2, ctx)) | ||
1399 | stack_adjust += 8; | ||
1400 | if (seen_reg(RV_REG_S3, ctx)) | ||
1401 | stack_adjust += 8; | ||
1402 | if (seen_reg(RV_REG_S4, ctx)) | ||
1403 | stack_adjust += 8; | ||
1404 | if (seen_reg(RV_REG_S5, ctx)) | ||
1405 | stack_adjust += 8; | ||
1406 | if (seen_reg(RV_REG_S6, ctx)) | ||
1407 | stack_adjust += 8; | ||
1408 | |||
1409 | stack_adjust = round_up(stack_adjust, 16); | ||
1410 | bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); | ||
1411 | stack_adjust += bpf_stack_adjust; | ||
1412 | |||
1413 | store_offset = stack_adjust - 8; | ||
1414 | |||
1415 | /* First instruction is always setting the tail-call-counter | ||
1416 | * (TCC) register. This instruction is skipped for tail calls. | ||
1417 | */ | ||
1418 | emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx); | ||
1419 | |||
1420 | emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx); | ||
1421 | |||
1422 | if (seen_reg(RV_REG_RA, ctx)) { | ||
1423 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_RA), ctx); | ||
1424 | store_offset -= 8; | ||
1425 | } | ||
1426 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_FP), ctx); | ||
1427 | store_offset -= 8; | ||
1428 | if (seen_reg(RV_REG_S1, ctx)) { | ||
1429 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S1), ctx); | ||
1430 | store_offset -= 8; | ||
1431 | } | ||
1432 | if (seen_reg(RV_REG_S2, ctx)) { | ||
1433 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S2), ctx); | ||
1434 | store_offset -= 8; | ||
1435 | } | ||
1436 | if (seen_reg(RV_REG_S3, ctx)) { | ||
1437 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S3), ctx); | ||
1438 | store_offset -= 8; | ||
1439 | } | ||
1440 | if (seen_reg(RV_REG_S4, ctx)) { | ||
1441 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S4), ctx); | ||
1442 | store_offset -= 8; | ||
1443 | } | ||
1444 | if (seen_reg(RV_REG_S5, ctx)) { | ||
1445 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S5), ctx); | ||
1446 | store_offset -= 8; | ||
1447 | } | ||
1448 | if (seen_reg(RV_REG_S6, ctx)) { | ||
1449 | emit(rv_sd(RV_REG_SP, store_offset, RV_REG_S6), ctx); | ||
1450 | store_offset -= 8; | ||
1451 | } | ||
1452 | |||
1453 | emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx); | ||
1454 | |||
1455 | if (bpf_stack_adjust) | ||
1456 | emit(rv_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust), ctx); | ||
1457 | |||
1458 | /* Program contains calls and tail calls, so RV_REG_TCC need | ||
1459 | * to be saved across calls. | ||
1460 | */ | ||
1461 | if (seen_tail_call(ctx) && seen_call(ctx)) | ||
1462 | emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx); | ||
1463 | |||
1464 | ctx->stack_size = stack_adjust; | ||
1465 | } | ||
1466 | |||
1467 | static void build_epilogue(struct rv_jit_context *ctx) | ||
1468 | { | ||
1469 | __build_epilogue(RV_REG_RA, ctx); | ||
1470 | } | ||
1471 | |||
1472 | static int build_body(struct rv_jit_context *ctx, bool extra_pass) | ||
1473 | { | ||
1474 | const struct bpf_prog *prog = ctx->prog; | ||
1475 | int i; | ||
1476 | |||
1477 | for (i = 0; i < prog->len; i++) { | ||
1478 | const struct bpf_insn *insn = &prog->insnsi[i]; | ||
1479 | int ret; | ||
1480 | |||
1481 | ret = emit_insn(insn, ctx, extra_pass); | ||
1482 | if (ret > 0) { | ||
1483 | i++; | ||
1484 | if (ctx->insns == NULL) | ||
1485 | ctx->offset[i] = ctx->ninsns; | ||
1486 | continue; | ||
1487 | } | ||
1488 | if (ctx->insns == NULL) | ||
1489 | ctx->offset[i] = ctx->ninsns; | ||
1490 | if (ret) | ||
1491 | return ret; | ||
1492 | } | ||
1493 | return 0; | ||
1494 | } | ||
1495 | |||
1496 | static void bpf_fill_ill_insns(void *area, unsigned int size) | ||
1497 | { | ||
1498 | memset(area, 0, size); | ||
1499 | } | ||
1500 | |||
1501 | static void bpf_flush_icache(void *start, void *end) | ||
1502 | { | ||
1503 | flush_icache_range((unsigned long)start, (unsigned long)end); | ||
1504 | } | ||
1505 | |||
1506 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) | ||
1507 | { | ||
1508 | bool tmp_blinded = false, extra_pass = false; | ||
1509 | struct bpf_prog *tmp, *orig_prog = prog; | ||
1510 | struct rv_jit_data *jit_data; | ||
1511 | struct rv_jit_context *ctx; | ||
1512 | unsigned int image_size; | ||
1513 | |||
1514 | if (!prog->jit_requested) | ||
1515 | return orig_prog; | ||
1516 | |||
1517 | tmp = bpf_jit_blind_constants(prog); | ||
1518 | if (IS_ERR(tmp)) | ||
1519 | return orig_prog; | ||
1520 | if (tmp != prog) { | ||
1521 | tmp_blinded = true; | ||
1522 | prog = tmp; | ||
1523 | } | ||
1524 | |||
1525 | jit_data = prog->aux->jit_data; | ||
1526 | if (!jit_data) { | ||
1527 | jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL); | ||
1528 | if (!jit_data) { | ||
1529 | prog = orig_prog; | ||
1530 | goto out; | ||
1531 | } | ||
1532 | prog->aux->jit_data = jit_data; | ||
1533 | } | ||
1534 | |||
1535 | ctx = &jit_data->ctx; | ||
1536 | |||
1537 | if (ctx->offset) { | ||
1538 | extra_pass = true; | ||
1539 | image_size = sizeof(u32) * ctx->ninsns; | ||
1540 | goto skip_init_ctx; | ||
1541 | } | ||
1542 | |||
1543 | ctx->prog = prog; | ||
1544 | ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); | ||
1545 | if (!ctx->offset) { | ||
1546 | prog = orig_prog; | ||
1547 | goto out_offset; | ||
1548 | } | ||
1549 | |||
1550 | /* First pass generates the ctx->offset, but does not emit an image. */ | ||
1551 | if (build_body(ctx, extra_pass)) { | ||
1552 | prog = orig_prog; | ||
1553 | goto out_offset; | ||
1554 | } | ||
1555 | build_prologue(ctx); | ||
1556 | ctx->epilogue_offset = ctx->ninsns; | ||
1557 | build_epilogue(ctx); | ||
1558 | |||
1559 | /* Allocate image, now that we know the size. */ | ||
1560 | image_size = sizeof(u32) * ctx->ninsns; | ||
1561 | jit_data->header = bpf_jit_binary_alloc(image_size, &jit_data->image, | ||
1562 | sizeof(u32), | ||
1563 | bpf_fill_ill_insns); | ||
1564 | if (!jit_data->header) { | ||
1565 | prog = orig_prog; | ||
1566 | goto out_offset; | ||
1567 | } | ||
1568 | |||
1569 | /* Second, real pass, that acutally emits the image. */ | ||
1570 | ctx->insns = (u32 *)jit_data->image; | ||
1571 | skip_init_ctx: | ||
1572 | ctx->ninsns = 0; | ||
1573 | |||
1574 | build_prologue(ctx); | ||
1575 | if (build_body(ctx, extra_pass)) { | ||
1576 | bpf_jit_binary_free(jit_data->header); | ||
1577 | prog = orig_prog; | ||
1578 | goto out_offset; | ||
1579 | } | ||
1580 | build_epilogue(ctx); | ||
1581 | |||
1582 | if (bpf_jit_enable > 1) | ||
1583 | bpf_jit_dump(prog->len, image_size, 2, ctx->insns); | ||
1584 | |||
1585 | prog->bpf_func = (void *)ctx->insns; | ||
1586 | prog->jited = 1; | ||
1587 | prog->jited_len = image_size; | ||
1588 | |||
1589 | bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns); | ||
1590 | |||
1591 | if (!prog->is_func || extra_pass) { | ||
1592 | out_offset: | ||
1593 | kfree(ctx->offset); | ||
1594 | kfree(jit_data); | ||
1595 | prog->aux->jit_data = NULL; | ||
1596 | } | ||
1597 | out: | ||
1598 | if (tmp_blinded) | ||
1599 | bpf_jit_prog_release_other(prog, prog == orig_prog ? | ||
1600 | tmp : orig_prog); | ||
1601 | return prog; | ||
1602 | } | ||
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index ce9defdff62a..51dd0267d014 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c | |||
@@ -1154,7 +1154,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i | |||
1154 | mask = 0x7000; /* jnz */ | 1154 | mask = 0x7000; /* jnz */ |
1155 | if (BPF_CLASS(insn->code) == BPF_JMP32) { | 1155 | if (BPF_CLASS(insn->code) == BPF_JMP32) { |
1156 | /* llilf %w1,imm (load zero extend imm) */ | 1156 | /* llilf %w1,imm (load zero extend imm) */ |
1157 | EMIT6_IMM(0xc0010000, REG_W1, imm); | 1157 | EMIT6_IMM(0xc00f0000, REG_W1, imm); |
1158 | /* nr %w1,%dst */ | 1158 | /* nr %w1,%dst */ |
1159 | EMIT2(0x1400, REG_W1, dst_reg); | 1159 | EMIT2(0x1400, REG_W1, dst_reg); |
1160 | } else { | 1160 | } else { |
@@ -1216,6 +1216,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i | |||
1216 | REG_W1, dst_reg, src_reg); | 1216 | REG_W1, dst_reg, src_reg); |
1217 | goto branch_oc; | 1217 | goto branch_oc; |
1218 | branch_ks: | 1218 | branch_ks: |
1219 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; | ||
1219 | /* lgfi %w1,imm (load sign extend imm) */ | 1220 | /* lgfi %w1,imm (load sign extend imm) */ |
1220 | EMIT6_IMM(0xc0010000, REG_W1, imm); | 1221 | EMIT6_IMM(0xc0010000, REG_W1, imm); |
1221 | /* crj or cgrj %dst,%w1,mask,off */ | 1222 | /* crj or cgrj %dst,%w1,mask,off */ |
@@ -1223,6 +1224,7 @@ branch_ks: | |||
1223 | dst_reg, REG_W1, i, off, mask); | 1224 | dst_reg, REG_W1, i, off, mask); |
1224 | break; | 1225 | break; |
1225 | branch_ku: | 1226 | branch_ku: |
1227 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; | ||
1226 | /* lgfi %w1,imm (load sign extend imm) */ | 1228 | /* lgfi %w1,imm (load sign extend imm) */ |
1227 | EMIT6_IMM(0xc0010000, REG_W1, imm); | 1229 | EMIT6_IMM(0xc0010000, REG_W1, imm); |
1228 | /* clrj or clgrj %dst,%w1,mask,off */ | 1230 | /* clrj or clgrj %dst,%w1,mask,off */ |
@@ -1230,11 +1232,13 @@ branch_ku: | |||
1230 | dst_reg, REG_W1, i, off, mask); | 1232 | dst_reg, REG_W1, i, off, mask); |
1231 | break; | 1233 | break; |
1232 | branch_xs: | 1234 | branch_xs: |
1235 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; | ||
1233 | /* crj or cgrj %dst,%src,mask,off */ | 1236 | /* crj or cgrj %dst,%src,mask,off */ |
1234 | EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), | 1237 | EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0076 : 0x0064), |
1235 | dst_reg, src_reg, i, off, mask); | 1238 | dst_reg, src_reg, i, off, mask); |
1236 | break; | 1239 | break; |
1237 | branch_xu: | 1240 | branch_xu: |
1241 | is_jmp32 = BPF_CLASS(insn->code) == BPF_JMP32; | ||
1238 | /* clrj or clgrj %dst,%src,mask,off */ | 1242 | /* clrj or clgrj %dst,%src,mask,off */ |
1239 | EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), | 1243 | EMIT6_PCREL(0xec000000, (is_jmp32 ? 0x0077 : 0x0065), |
1240 | dst_reg, src_reg, i, off, mask); | 1244 | dst_reg, src_reg, i, off, mask); |
diff --git a/net/core/dev.c b/net/core/dev.c index 8c6d5cf8a308..ecbe419e05ab 100644 --- a/net/core/dev.c +++ b/net/core/dev.c | |||
@@ -8033,11 +8033,13 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | |||
8033 | enum bpf_netdev_command query; | 8033 | enum bpf_netdev_command query; |
8034 | struct bpf_prog *prog = NULL; | 8034 | struct bpf_prog *prog = NULL; |
8035 | bpf_op_t bpf_op, bpf_chk; | 8035 | bpf_op_t bpf_op, bpf_chk; |
8036 | bool offload; | ||
8036 | int err; | 8037 | int err; |
8037 | 8038 | ||
8038 | ASSERT_RTNL(); | 8039 | ASSERT_RTNL(); |
8039 | 8040 | ||
8040 | query = flags & XDP_FLAGS_HW_MODE ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; | 8041 | offload = flags & XDP_FLAGS_HW_MODE; |
8042 | query = offload ? XDP_QUERY_PROG_HW : XDP_QUERY_PROG; | ||
8041 | 8043 | ||
8042 | bpf_op = bpf_chk = ops->ndo_bpf; | 8044 | bpf_op = bpf_chk = ops->ndo_bpf; |
8043 | if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { | 8045 | if (!bpf_op && (flags & (XDP_FLAGS_DRV_MODE | XDP_FLAGS_HW_MODE))) { |
@@ -8050,8 +8052,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | |||
8050 | bpf_chk = generic_xdp_install; | 8052 | bpf_chk = generic_xdp_install; |
8051 | 8053 | ||
8052 | if (fd >= 0) { | 8054 | if (fd >= 0) { |
8053 | if (__dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG) || | 8055 | if (!offload && __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG)) { |
8054 | __dev_xdp_query(dev, bpf_chk, XDP_QUERY_PROG_HW)) { | ||
8055 | NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); | 8056 | NL_SET_ERR_MSG(extack, "native and generic XDP can't be active at the same time"); |
8056 | return -EEXIST; | 8057 | return -EEXIST; |
8057 | } | 8058 | } |
@@ -8066,8 +8067,7 @@ int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | |||
8066 | if (IS_ERR(prog)) | 8067 | if (IS_ERR(prog)) |
8067 | return PTR_ERR(prog); | 8068 | return PTR_ERR(prog); |
8068 | 8069 | ||
8069 | if (!(flags & XDP_FLAGS_HW_MODE) && | 8070 | if (!offload && bpf_prog_is_dev_bound(prog->aux)) { |
8070 | bpf_prog_is_dev_bound(prog->aux)) { | ||
8071 | NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); | 8071 | NL_SET_ERR_MSG(extack, "using device-bound program without HW_MODE flag is not supported"); |
8072 | bpf_prog_put(prog); | 8072 | bpf_prog_put(prog); |
8073 | return -EINVAL; | 8073 | return -EINVAL; |
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst index d43fce568ef7..9bb9ace54ba8 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst | |||
@@ -17,8 +17,8 @@ SYNOPSIS | |||
17 | *COMMANDS* := | 17 | *COMMANDS* := |
18 | { **show** | **list** | **tree** | **attach** | **detach** | **help** } | 18 | { **show** | **list** | **tree** | **attach** | **detach** | **help** } |
19 | 19 | ||
20 | MAP COMMANDS | 20 | CGROUP COMMANDS |
21 | ============= | 21 | =============== |
22 | 22 | ||
23 | | **bpftool** **cgroup { show | list }** *CGROUP* | 23 | | **bpftool** **cgroup { show | list }** *CGROUP* |
24 | | **bpftool** **cgroup tree** [*CGROUP_ROOT*] | 24 | | **bpftool** **cgroup tree** [*CGROUP_ROOT*] |
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst index 8d489a26e3c9..82de03dd8f52 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst | |||
@@ -16,8 +16,8 @@ SYNOPSIS | |||
16 | 16 | ||
17 | *COMMANDS* := { **probe** | **help** } | 17 | *COMMANDS* := { **probe** | **help** } |
18 | 18 | ||
19 | MAP COMMANDS | 19 | FEATURE COMMANDS |
20 | ============= | 20 | ================ |
21 | 21 | ||
22 | | **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]] | 22 | | **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]] |
23 | | **bpftool** **feature help** | 23 | | **bpftool** **feature help** |
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst index 13b56102f528..7e59495cb028 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst | |||
@@ -18,7 +18,7 @@ SYNOPSIS | |||
18 | { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** | 18 | { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** |
19 | | **loadall** | **help** } | 19 | | **loadall** | **help** } |
20 | 20 | ||
21 | MAP COMMANDS | 21 | PROG COMMANDS |
22 | ============= | 22 | ============= |
23 | 23 | ||
24 | | **bpftool** **prog { show | list }** [*PROG*] | 24 | | **bpftool** **prog { show | list }** [*PROG*] |
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index d682d3b8f7b9..ab6528c935a1 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c | |||
@@ -1,6 +1,7 @@ | |||
1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) | 1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
2 | /* Copyright (c) 2018 Facebook */ | 2 | /* Copyright (c) 2018 Facebook */ |
3 | 3 | ||
4 | #include <stdio.h> | ||
4 | #include <stdlib.h> | 5 | #include <stdlib.h> |
5 | #include <string.h> | 6 | #include <string.h> |
6 | #include <unistd.h> | 7 | #include <unistd.h> |
@@ -9,8 +10,9 @@ | |||
9 | #include <linux/btf.h> | 10 | #include <linux/btf.h> |
10 | #include "btf.h" | 11 | #include "btf.h" |
11 | #include "bpf.h" | 12 | #include "bpf.h" |
13 | #include "libbpf.h" | ||
14 | #include "libbpf_util.h" | ||
12 | 15 | ||
13 | #define elog(fmt, ...) { if (err_log) err_log(fmt, ##__VA_ARGS__); } | ||
14 | #define max(a, b) ((a) > (b) ? (a) : (b)) | 16 | #define max(a, b) ((a) > (b) ? (a) : (b)) |
15 | #define min(a, b) ((a) < (b) ? (a) : (b)) | 17 | #define min(a, b) ((a) < (b) ? (a) : (b)) |
16 | 18 | ||
@@ -107,54 +109,54 @@ static int btf_add_type(struct btf *btf, struct btf_type *t) | |||
107 | return 0; | 109 | return 0; |
108 | } | 110 | } |
109 | 111 | ||
110 | static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) | 112 | static int btf_parse_hdr(struct btf *btf) |
111 | { | 113 | { |
112 | const struct btf_header *hdr = btf->hdr; | 114 | const struct btf_header *hdr = btf->hdr; |
113 | __u32 meta_left; | 115 | __u32 meta_left; |
114 | 116 | ||
115 | if (btf->data_size < sizeof(struct btf_header)) { | 117 | if (btf->data_size < sizeof(struct btf_header)) { |
116 | elog("BTF header not found\n"); | 118 | pr_debug("BTF header not found\n"); |
117 | return -EINVAL; | 119 | return -EINVAL; |
118 | } | 120 | } |
119 | 121 | ||
120 | if (hdr->magic != BTF_MAGIC) { | 122 | if (hdr->magic != BTF_MAGIC) { |
121 | elog("Invalid BTF magic:%x\n", hdr->magic); | 123 | pr_debug("Invalid BTF magic:%x\n", hdr->magic); |
122 | return -EINVAL; | 124 | return -EINVAL; |
123 | } | 125 | } |
124 | 126 | ||
125 | if (hdr->version != BTF_VERSION) { | 127 | if (hdr->version != BTF_VERSION) { |
126 | elog("Unsupported BTF version:%u\n", hdr->version); | 128 | pr_debug("Unsupported BTF version:%u\n", hdr->version); |
127 | return -ENOTSUP; | 129 | return -ENOTSUP; |
128 | } | 130 | } |
129 | 131 | ||
130 | if (hdr->flags) { | 132 | if (hdr->flags) { |
131 | elog("Unsupported BTF flags:%x\n", hdr->flags); | 133 | pr_debug("Unsupported BTF flags:%x\n", hdr->flags); |
132 | return -ENOTSUP; | 134 | return -ENOTSUP; |
133 | } | 135 | } |
134 | 136 | ||
135 | meta_left = btf->data_size - sizeof(*hdr); | 137 | meta_left = btf->data_size - sizeof(*hdr); |
136 | if (!meta_left) { | 138 | if (!meta_left) { |
137 | elog("BTF has no data\n"); | 139 | pr_debug("BTF has no data\n"); |
138 | return -EINVAL; | 140 | return -EINVAL; |
139 | } | 141 | } |
140 | 142 | ||
141 | if (meta_left < hdr->type_off) { | 143 | if (meta_left < hdr->type_off) { |
142 | elog("Invalid BTF type section offset:%u\n", hdr->type_off); | 144 | pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off); |
143 | return -EINVAL; | 145 | return -EINVAL; |
144 | } | 146 | } |
145 | 147 | ||
146 | if (meta_left < hdr->str_off) { | 148 | if (meta_left < hdr->str_off) { |
147 | elog("Invalid BTF string section offset:%u\n", hdr->str_off); | 149 | pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off); |
148 | return -EINVAL; | 150 | return -EINVAL; |
149 | } | 151 | } |
150 | 152 | ||
151 | if (hdr->type_off >= hdr->str_off) { | 153 | if (hdr->type_off >= hdr->str_off) { |
152 | elog("BTF type section offset >= string section offset. No type?\n"); | 154 | pr_debug("BTF type section offset >= string section offset. No type?\n"); |
153 | return -EINVAL; | 155 | return -EINVAL; |
154 | } | 156 | } |
155 | 157 | ||
156 | if (hdr->type_off & 0x02) { | 158 | if (hdr->type_off & 0x02) { |
157 | elog("BTF type section is not aligned to 4 bytes\n"); | 159 | pr_debug("BTF type section is not aligned to 4 bytes\n"); |
158 | return -EINVAL; | 160 | return -EINVAL; |
159 | } | 161 | } |
160 | 162 | ||
@@ -163,7 +165,7 @@ static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) | |||
163 | return 0; | 165 | return 0; |
164 | } | 166 | } |
165 | 167 | ||
166 | static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log) | 168 | static int btf_parse_str_sec(struct btf *btf) |
167 | { | 169 | { |
168 | const struct btf_header *hdr = btf->hdr; | 170 | const struct btf_header *hdr = btf->hdr; |
169 | const char *start = btf->nohdr_data + hdr->str_off; | 171 | const char *start = btf->nohdr_data + hdr->str_off; |
@@ -171,7 +173,7 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log) | |||
171 | 173 | ||
172 | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || | 174 | if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || |
173 | start[0] || end[-1]) { | 175 | start[0] || end[-1]) { |
174 | elog("Invalid BTF string section\n"); | 176 | pr_debug("Invalid BTF string section\n"); |
175 | return -EINVAL; | 177 | return -EINVAL; |
176 | } | 178 | } |
177 | 179 | ||
@@ -180,7 +182,38 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log) | |||
180 | return 0; | 182 | return 0; |
181 | } | 183 | } |
182 | 184 | ||
183 | static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) | 185 | static int btf_type_size(struct btf_type *t) |
186 | { | ||
187 | int base_size = sizeof(struct btf_type); | ||
188 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
189 | |||
190 | switch (BTF_INFO_KIND(t->info)) { | ||
191 | case BTF_KIND_FWD: | ||
192 | case BTF_KIND_CONST: | ||
193 | case BTF_KIND_VOLATILE: | ||
194 | case BTF_KIND_RESTRICT: | ||
195 | case BTF_KIND_PTR: | ||
196 | case BTF_KIND_TYPEDEF: | ||
197 | case BTF_KIND_FUNC: | ||
198 | return base_size; | ||
199 | case BTF_KIND_INT: | ||
200 | return base_size + sizeof(__u32); | ||
201 | case BTF_KIND_ENUM: | ||
202 | return base_size + vlen * sizeof(struct btf_enum); | ||
203 | case BTF_KIND_ARRAY: | ||
204 | return base_size + sizeof(struct btf_array); | ||
205 | case BTF_KIND_STRUCT: | ||
206 | case BTF_KIND_UNION: | ||
207 | return base_size + vlen * sizeof(struct btf_member); | ||
208 | case BTF_KIND_FUNC_PROTO: | ||
209 | return base_size + vlen * sizeof(struct btf_param); | ||
210 | default: | ||
211 | pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info)); | ||
212 | return -EINVAL; | ||
213 | } | ||
214 | } | ||
215 | |||
216 | static int btf_parse_type_sec(struct btf *btf) | ||
184 | { | 217 | { |
185 | struct btf_header *hdr = btf->hdr; | 218 | struct btf_header *hdr = btf->hdr; |
186 | void *nohdr_data = btf->nohdr_data; | 219 | void *nohdr_data = btf->nohdr_data; |
@@ -189,41 +222,13 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) | |||
189 | 222 | ||
190 | while (next_type < end_type) { | 223 | while (next_type < end_type) { |
191 | struct btf_type *t = next_type; | 224 | struct btf_type *t = next_type; |
192 | __u16 vlen = BTF_INFO_VLEN(t->info); | 225 | int type_size; |
193 | int err; | 226 | int err; |
194 | 227 | ||
195 | next_type += sizeof(*t); | 228 | type_size = btf_type_size(t); |
196 | switch (BTF_INFO_KIND(t->info)) { | 229 | if (type_size < 0) |
197 | case BTF_KIND_INT: | 230 | return type_size; |
198 | next_type += sizeof(int); | 231 | next_type += type_size; |
199 | break; | ||
200 | case BTF_KIND_ARRAY: | ||
201 | next_type += sizeof(struct btf_array); | ||
202 | break; | ||
203 | case BTF_KIND_STRUCT: | ||
204 | case BTF_KIND_UNION: | ||
205 | next_type += vlen * sizeof(struct btf_member); | ||
206 | break; | ||
207 | case BTF_KIND_ENUM: | ||
208 | next_type += vlen * sizeof(struct btf_enum); | ||
209 | break; | ||
210 | case BTF_KIND_FUNC_PROTO: | ||
211 | next_type += vlen * sizeof(struct btf_param); | ||
212 | break; | ||
213 | case BTF_KIND_FUNC: | ||
214 | case BTF_KIND_TYPEDEF: | ||
215 | case BTF_KIND_PTR: | ||
216 | case BTF_KIND_FWD: | ||
217 | case BTF_KIND_VOLATILE: | ||
218 | case BTF_KIND_CONST: | ||
219 | case BTF_KIND_RESTRICT: | ||
220 | break; | ||
221 | default: | ||
222 | elog("Unsupported BTF_KIND:%u\n", | ||
223 | BTF_INFO_KIND(t->info)); | ||
224 | return -EINVAL; | ||
225 | } | ||
226 | |||
227 | err = btf_add_type(btf, t); | 232 | err = btf_add_type(btf, t); |
228 | if (err) | 233 | if (err) |
229 | return err; | 234 | return err; |
@@ -232,6 +237,11 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) | |||
232 | return 0; | 237 | return 0; |
233 | } | 238 | } |
234 | 239 | ||
240 | __u32 btf__get_nr_types(const struct btf *btf) | ||
241 | { | ||
242 | return btf->nr_types; | ||
243 | } | ||
244 | |||
235 | const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) | 245 | const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) |
236 | { | 246 | { |
237 | if (type_id > btf->nr_types) | 247 | if (type_id > btf->nr_types) |
@@ -250,21 +260,6 @@ static bool btf_type_is_void_or_null(const struct btf_type *t) | |||
250 | return !t || btf_type_is_void(t); | 260 | return !t || btf_type_is_void(t); |
251 | } | 261 | } |
252 | 262 | ||
253 | static __s64 btf_type_size(const struct btf_type *t) | ||
254 | { | ||
255 | switch (BTF_INFO_KIND(t->info)) { | ||
256 | case BTF_KIND_INT: | ||
257 | case BTF_KIND_STRUCT: | ||
258 | case BTF_KIND_UNION: | ||
259 | case BTF_KIND_ENUM: | ||
260 | return t->size; | ||
261 | case BTF_KIND_PTR: | ||
262 | return sizeof(void *); | ||
263 | default: | ||
264 | return -EINVAL; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | #define MAX_RESOLVE_DEPTH 32 | 263 | #define MAX_RESOLVE_DEPTH 32 |
269 | 264 | ||
270 | __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) | 265 | __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) |
@@ -278,11 +273,16 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) | |||
278 | t = btf__type_by_id(btf, type_id); | 273 | t = btf__type_by_id(btf, type_id); |
279 | for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); | 274 | for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); |
280 | i++) { | 275 | i++) { |
281 | size = btf_type_size(t); | ||
282 | if (size >= 0) | ||
283 | break; | ||
284 | |||
285 | switch (BTF_INFO_KIND(t->info)) { | 276 | switch (BTF_INFO_KIND(t->info)) { |
277 | case BTF_KIND_INT: | ||
278 | case BTF_KIND_STRUCT: | ||
279 | case BTF_KIND_UNION: | ||
280 | case BTF_KIND_ENUM: | ||
281 | size = t->size; | ||
282 | goto done; | ||
283 | case BTF_KIND_PTR: | ||
284 | size = sizeof(void *); | ||
285 | goto done; | ||
286 | case BTF_KIND_TYPEDEF: | 286 | case BTF_KIND_TYPEDEF: |
287 | case BTF_KIND_VOLATILE: | 287 | case BTF_KIND_VOLATILE: |
288 | case BTF_KIND_CONST: | 288 | case BTF_KIND_CONST: |
@@ -306,6 +306,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id) | |||
306 | if (size < 0) | 306 | if (size < 0) |
307 | return -EINVAL; | 307 | return -EINVAL; |
308 | 308 | ||
309 | done: | ||
309 | if (nelems && size > UINT32_MAX / nelems) | 310 | if (nelems && size > UINT32_MAX / nelems) |
310 | return -E2BIG; | 311 | return -E2BIG; |
311 | 312 | ||
@@ -363,7 +364,7 @@ void btf__free(struct btf *btf) | |||
363 | free(btf); | 364 | free(btf); |
364 | } | 365 | } |
365 | 366 | ||
366 | struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) | 367 | struct btf *btf__new(__u8 *data, __u32 size) |
367 | { | 368 | { |
368 | __u32 log_buf_size = 0; | 369 | __u32 log_buf_size = 0; |
369 | char *log_buf = NULL; | 370 | char *log_buf = NULL; |
@@ -376,16 +377,15 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) | |||
376 | 377 | ||
377 | btf->fd = -1; | 378 | btf->fd = -1; |
378 | 379 | ||
379 | if (err_log) { | 380 | log_buf = malloc(BPF_LOG_BUF_SIZE); |
380 | log_buf = malloc(BPF_LOG_BUF_SIZE); | 381 | if (!log_buf) { |
381 | if (!log_buf) { | 382 | err = -ENOMEM; |
382 | err = -ENOMEM; | 383 | goto done; |
383 | goto done; | ||
384 | } | ||
385 | *log_buf = 0; | ||
386 | log_buf_size = BPF_LOG_BUF_SIZE; | ||
387 | } | 384 | } |
388 | 385 | ||
386 | *log_buf = 0; | ||
387 | log_buf_size = BPF_LOG_BUF_SIZE; | ||
388 | |||
389 | btf->data = malloc(size); | 389 | btf->data = malloc(size); |
390 | if (!btf->data) { | 390 | if (!btf->data) { |
391 | err = -ENOMEM; | 391 | err = -ENOMEM; |
@@ -400,21 +400,21 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) | |||
400 | 400 | ||
401 | if (btf->fd == -1) { | 401 | if (btf->fd == -1) { |
402 | err = -errno; | 402 | err = -errno; |
403 | elog("Error loading BTF: %s(%d)\n", strerror(errno), errno); | 403 | pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno); |
404 | if (log_buf && *log_buf) | 404 | if (log_buf && *log_buf) |
405 | elog("%s\n", log_buf); | 405 | pr_warning("%s\n", log_buf); |
406 | goto done; | 406 | goto done; |
407 | } | 407 | } |
408 | 408 | ||
409 | err = btf_parse_hdr(btf, err_log); | 409 | err = btf_parse_hdr(btf); |
410 | if (err) | 410 | if (err) |
411 | goto done; | 411 | goto done; |
412 | 412 | ||
413 | err = btf_parse_str_sec(btf, err_log); | 413 | err = btf_parse_str_sec(btf); |
414 | if (err) | 414 | if (err) |
415 | goto done; | 415 | goto done; |
416 | 416 | ||
417 | err = btf_parse_type_sec(btf, err_log); | 417 | err = btf_parse_type_sec(btf); |
418 | 418 | ||
419 | done: | 419 | done: |
420 | free(log_buf); | 420 | free(log_buf); |
@@ -432,6 +432,13 @@ int btf__fd(const struct btf *btf) | |||
432 | return btf->fd; | 432 | return btf->fd; |
433 | } | 433 | } |
434 | 434 | ||
435 | void btf__get_strings(const struct btf *btf, const char **strings, | ||
436 | __u32 *str_len) | ||
437 | { | ||
438 | *strings = btf->strings; | ||
439 | *str_len = btf->hdr->str_len; | ||
440 | } | ||
441 | |||
435 | const char *btf__name_by_offset(const struct btf *btf, __u32 offset) | 442 | const char *btf__name_by_offset(const struct btf *btf, __u32 offset) |
436 | { | 443 | { |
437 | if (offset < btf->hdr->str_len) | 444 | if (offset < btf->hdr->str_len) |
@@ -491,7 +498,7 @@ int btf__get_from_id(__u32 id, struct btf **btf) | |||
491 | goto exit_free; | 498 | goto exit_free; |
492 | } | 499 | } |
493 | 500 | ||
494 | *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL); | 501 | *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size); |
495 | if (IS_ERR(*btf)) { | 502 | if (IS_ERR(*btf)) { |
496 | err = PTR_ERR(*btf); | 503 | err = PTR_ERR(*btf); |
497 | *btf = NULL; | 504 | *btf = NULL; |
@@ -504,6 +511,78 @@ exit_free: | |||
504 | return err; | 511 | return err; |
505 | } | 512 | } |
506 | 513 | ||
514 | int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, | ||
515 | __u32 expected_key_size, __u32 expected_value_size, | ||
516 | __u32 *key_type_id, __u32 *value_type_id) | ||
517 | { | ||
518 | const struct btf_type *container_type; | ||
519 | const struct btf_member *key, *value; | ||
520 | const size_t max_name = 256; | ||
521 | char container_name[max_name]; | ||
522 | __s64 key_size, value_size; | ||
523 | __s32 container_id; | ||
524 | |||
525 | if (snprintf(container_name, max_name, "____btf_map_%s", map_name) == | ||
526 | max_name) { | ||
527 | pr_warning("map:%s length of '____btf_map_%s' is too long\n", | ||
528 | map_name, map_name); | ||
529 | return -EINVAL; | ||
530 | } | ||
531 | |||
532 | container_id = btf__find_by_name(btf, container_name); | ||
533 | if (container_id < 0) { | ||
534 | pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", | ||
535 | map_name, container_name); | ||
536 | return container_id; | ||
537 | } | ||
538 | |||
539 | container_type = btf__type_by_id(btf, container_id); | ||
540 | if (!container_type) { | ||
541 | pr_warning("map:%s cannot find BTF type for container_id:%u\n", | ||
542 | map_name, container_id); | ||
543 | return -EINVAL; | ||
544 | } | ||
545 | |||
546 | if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT || | ||
547 | BTF_INFO_VLEN(container_type->info) < 2) { | ||
548 | pr_warning("map:%s container_name:%s is an invalid container struct\n", | ||
549 | map_name, container_name); | ||
550 | return -EINVAL; | ||
551 | } | ||
552 | |||
553 | key = (struct btf_member *)(container_type + 1); | ||
554 | value = key + 1; | ||
555 | |||
556 | key_size = btf__resolve_size(btf, key->type); | ||
557 | if (key_size < 0) { | ||
558 | pr_warning("map:%s invalid BTF key_type_size\n", map_name); | ||
559 | return key_size; | ||
560 | } | ||
561 | |||
562 | if (expected_key_size != key_size) { | ||
563 | pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", | ||
564 | map_name, (__u32)key_size, expected_key_size); | ||
565 | return -EINVAL; | ||
566 | } | ||
567 | |||
568 | value_size = btf__resolve_size(btf, value->type); | ||
569 | if (value_size < 0) { | ||
570 | pr_warning("map:%s invalid BTF value_type_size\n", map_name); | ||
571 | return value_size; | ||
572 | } | ||
573 | |||
574 | if (expected_value_size != value_size) { | ||
575 | pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", | ||
576 | map_name, (__u32)value_size, expected_value_size); | ||
577 | return -EINVAL; | ||
578 | } | ||
579 | |||
580 | *key_type_id = key->type; | ||
581 | *value_type_id = value->type; | ||
582 | |||
583 | return 0; | ||
584 | } | ||
585 | |||
507 | struct btf_ext_sec_copy_param { | 586 | struct btf_ext_sec_copy_param { |
508 | __u32 off; | 587 | __u32 off; |
509 | __u32 len; | 588 | __u32 len; |
@@ -514,8 +593,7 @@ struct btf_ext_sec_copy_param { | |||
514 | 593 | ||
515 | static int btf_ext_copy_info(struct btf_ext *btf_ext, | 594 | static int btf_ext_copy_info(struct btf_ext *btf_ext, |
516 | __u8 *data, __u32 data_size, | 595 | __u8 *data, __u32 data_size, |
517 | struct btf_ext_sec_copy_param *ext_sec, | 596 | struct btf_ext_sec_copy_param *ext_sec) |
518 | btf_print_fn_t err_log) | ||
519 | { | 597 | { |
520 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; | 598 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; |
521 | const struct btf_ext_info_sec *sinfo; | 599 | const struct btf_ext_info_sec *sinfo; |
@@ -529,14 +607,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
529 | data_size -= hdr->hdr_len; | 607 | data_size -= hdr->hdr_len; |
530 | 608 | ||
531 | if (ext_sec->off & 0x03) { | 609 | if (ext_sec->off & 0x03) { |
532 | elog(".BTF.ext %s section is not aligned to 4 bytes\n", | 610 | pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n", |
533 | ext_sec->desc); | 611 | ext_sec->desc); |
534 | return -EINVAL; | 612 | return -EINVAL; |
535 | } | 613 | } |
536 | 614 | ||
537 | if (data_size < ext_sec->off || | 615 | if (data_size < ext_sec->off || |
538 | ext_sec->len > data_size - ext_sec->off) { | 616 | ext_sec->len > data_size - ext_sec->off) { |
539 | elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", | 617 | pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", |
540 | ext_sec->desc, ext_sec->off, ext_sec->len); | 618 | ext_sec->desc, ext_sec->off, ext_sec->len); |
541 | return -EINVAL; | 619 | return -EINVAL; |
542 | } | 620 | } |
@@ -546,7 +624,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
546 | 624 | ||
547 | /* At least a record size */ | 625 | /* At least a record size */ |
548 | if (info_left < sizeof(__u32)) { | 626 | if (info_left < sizeof(__u32)) { |
549 | elog(".BTF.ext %s record size not found\n", ext_sec->desc); | 627 | pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc); |
550 | return -EINVAL; | 628 | return -EINVAL; |
551 | } | 629 | } |
552 | 630 | ||
@@ -554,7 +632,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
554 | record_size = *(__u32 *)info; | 632 | record_size = *(__u32 *)info; |
555 | if (record_size < ext_sec->min_rec_size || | 633 | if (record_size < ext_sec->min_rec_size || |
556 | record_size & 0x03) { | 634 | record_size & 0x03) { |
557 | elog("%s section in .BTF.ext has invalid record size %u\n", | 635 | pr_debug("%s section in .BTF.ext has invalid record size %u\n", |
558 | ext_sec->desc, record_size); | 636 | ext_sec->desc, record_size); |
559 | return -EINVAL; | 637 | return -EINVAL; |
560 | } | 638 | } |
@@ -564,7 +642,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
564 | 642 | ||
565 | /* If no records, return failure now so .BTF.ext won't be used. */ | 643 | /* If no records, return failure now so .BTF.ext won't be used. */ |
566 | if (!info_left) { | 644 | if (!info_left) { |
567 | elog("%s section in .BTF.ext has no records", ext_sec->desc); | 645 | pr_debug("%s section in .BTF.ext has no records", ext_sec->desc); |
568 | return -EINVAL; | 646 | return -EINVAL; |
569 | } | 647 | } |
570 | 648 | ||
@@ -574,14 +652,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
574 | __u32 num_records; | 652 | __u32 num_records; |
575 | 653 | ||
576 | if (info_left < sec_hdrlen) { | 654 | if (info_left < sec_hdrlen) { |
577 | elog("%s section header is not found in .BTF.ext\n", | 655 | pr_debug("%s section header is not found in .BTF.ext\n", |
578 | ext_sec->desc); | 656 | ext_sec->desc); |
579 | return -EINVAL; | 657 | return -EINVAL; |
580 | } | 658 | } |
581 | 659 | ||
582 | num_records = sinfo->num_info; | 660 | num_records = sinfo->num_info; |
583 | if (num_records == 0) { | 661 | if (num_records == 0) { |
584 | elog("%s section has incorrect num_records in .BTF.ext\n", | 662 | pr_debug("%s section has incorrect num_records in .BTF.ext\n", |
585 | ext_sec->desc); | 663 | ext_sec->desc); |
586 | return -EINVAL; | 664 | return -EINVAL; |
587 | } | 665 | } |
@@ -589,7 +667,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
589 | total_record_size = sec_hdrlen + | 667 | total_record_size = sec_hdrlen + |
590 | (__u64)num_records * record_size; | 668 | (__u64)num_records * record_size; |
591 | if (info_left < total_record_size) { | 669 | if (info_left < total_record_size) { |
592 | elog("%s section has incorrect num_records in .BTF.ext\n", | 670 | pr_debug("%s section has incorrect num_records in .BTF.ext\n", |
593 | ext_sec->desc); | 671 | ext_sec->desc); |
594 | return -EINVAL; | 672 | return -EINVAL; |
595 | } | 673 | } |
@@ -610,8 +688,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext, | |||
610 | } | 688 | } |
611 | 689 | ||
612 | static int btf_ext_copy_func_info(struct btf_ext *btf_ext, | 690 | static int btf_ext_copy_func_info(struct btf_ext *btf_ext, |
613 | __u8 *data, __u32 data_size, | 691 | __u8 *data, __u32 data_size) |
614 | btf_print_fn_t err_log) | ||
615 | { | 692 | { |
616 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; | 693 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; |
617 | struct btf_ext_sec_copy_param param = { | 694 | struct btf_ext_sec_copy_param param = { |
@@ -622,12 +699,11 @@ static int btf_ext_copy_func_info(struct btf_ext *btf_ext, | |||
622 | .desc = "func_info" | 699 | .desc = "func_info" |
623 | }; | 700 | }; |
624 | 701 | ||
625 | return btf_ext_copy_info(btf_ext, data, data_size, ¶m, err_log); | 702 | return btf_ext_copy_info(btf_ext, data, data_size, ¶m); |
626 | } | 703 | } |
627 | 704 | ||
628 | static int btf_ext_copy_line_info(struct btf_ext *btf_ext, | 705 | static int btf_ext_copy_line_info(struct btf_ext *btf_ext, |
629 | __u8 *data, __u32 data_size, | 706 | __u8 *data, __u32 data_size) |
630 | btf_print_fn_t err_log) | ||
631 | { | 707 | { |
632 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; | 708 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; |
633 | struct btf_ext_sec_copy_param param = { | 709 | struct btf_ext_sec_copy_param param = { |
@@ -638,37 +714,36 @@ static int btf_ext_copy_line_info(struct btf_ext *btf_ext, | |||
638 | .desc = "line_info", | 714 | .desc = "line_info", |
639 | }; | 715 | }; |
640 | 716 | ||
641 | return btf_ext_copy_info(btf_ext, data, data_size, ¶m, err_log); | 717 | return btf_ext_copy_info(btf_ext, data, data_size, ¶m); |
642 | } | 718 | } |
643 | 719 | ||
644 | static int btf_ext_parse_hdr(__u8 *data, __u32 data_size, | 720 | static int btf_ext_parse_hdr(__u8 *data, __u32 data_size) |
645 | btf_print_fn_t err_log) | ||
646 | { | 721 | { |
647 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; | 722 | const struct btf_ext_header *hdr = (struct btf_ext_header *)data; |
648 | 723 | ||
649 | if (data_size < offsetof(struct btf_ext_header, func_info_off) || | 724 | if (data_size < offsetof(struct btf_ext_header, func_info_off) || |
650 | data_size < hdr->hdr_len) { | 725 | data_size < hdr->hdr_len) { |
651 | elog("BTF.ext header not found"); | 726 | pr_debug("BTF.ext header not found"); |
652 | return -EINVAL; | 727 | return -EINVAL; |
653 | } | 728 | } |
654 | 729 | ||
655 | if (hdr->magic != BTF_MAGIC) { | 730 | if (hdr->magic != BTF_MAGIC) { |
656 | elog("Invalid BTF.ext magic:%x\n", hdr->magic); | 731 | pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic); |
657 | return -EINVAL; | 732 | return -EINVAL; |
658 | } | 733 | } |
659 | 734 | ||
660 | if (hdr->version != BTF_VERSION) { | 735 | if (hdr->version != BTF_VERSION) { |
661 | elog("Unsupported BTF.ext version:%u\n", hdr->version); | 736 | pr_debug("Unsupported BTF.ext version:%u\n", hdr->version); |
662 | return -ENOTSUP; | 737 | return -ENOTSUP; |
663 | } | 738 | } |
664 | 739 | ||
665 | if (hdr->flags) { | 740 | if (hdr->flags) { |
666 | elog("Unsupported BTF.ext flags:%x\n", hdr->flags); | 741 | pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags); |
667 | return -ENOTSUP; | 742 | return -ENOTSUP; |
668 | } | 743 | } |
669 | 744 | ||
670 | if (data_size == hdr->hdr_len) { | 745 | if (data_size == hdr->hdr_len) { |
671 | elog("BTF.ext has no data\n"); | 746 | pr_debug("BTF.ext has no data\n"); |
672 | return -EINVAL; | 747 | return -EINVAL; |
673 | } | 748 | } |
674 | 749 | ||
@@ -685,12 +760,12 @@ void btf_ext__free(struct btf_ext *btf_ext) | |||
685 | free(btf_ext); | 760 | free(btf_ext); |
686 | } | 761 | } |
687 | 762 | ||
688 | struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log) | 763 | struct btf_ext *btf_ext__new(__u8 *data, __u32 size) |
689 | { | 764 | { |
690 | struct btf_ext *btf_ext; | 765 | struct btf_ext *btf_ext; |
691 | int err; | 766 | int err; |
692 | 767 | ||
693 | err = btf_ext_parse_hdr(data, size, err_log); | 768 | err = btf_ext_parse_hdr(data, size); |
694 | if (err) | 769 | if (err) |
695 | return ERR_PTR(err); | 770 | return ERR_PTR(err); |
696 | 771 | ||
@@ -698,13 +773,13 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log) | |||
698 | if (!btf_ext) | 773 | if (!btf_ext) |
699 | return ERR_PTR(-ENOMEM); | 774 | return ERR_PTR(-ENOMEM); |
700 | 775 | ||
701 | err = btf_ext_copy_func_info(btf_ext, data, size, err_log); | 776 | err = btf_ext_copy_func_info(btf_ext, data, size); |
702 | if (err) { | 777 | if (err) { |
703 | btf_ext__free(btf_ext); | 778 | btf_ext__free(btf_ext); |
704 | return ERR_PTR(err); | 779 | return ERR_PTR(err); |
705 | } | 780 | } |
706 | 781 | ||
707 | err = btf_ext_copy_line_info(btf_ext, data, size, err_log); | 782 | err = btf_ext_copy_line_info(btf_ext, data, size); |
708 | if (err) { | 783 | if (err) { |
709 | btf_ext__free(btf_ext); | 784 | btf_ext__free(btf_ext); |
710 | return ERR_PTR(err); | 785 | return ERR_PTR(err); |
@@ -786,3 +861,1744 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext) | |||
786 | { | 861 | { |
787 | return btf_ext->line_info.rec_size; | 862 | return btf_ext->line_info.rec_size; |
788 | } | 863 | } |
864 | |||
865 | struct btf_dedup; | ||
866 | |||
867 | static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, | ||
868 | const struct btf_dedup_opts *opts); | ||
869 | static void btf_dedup_free(struct btf_dedup *d); | ||
870 | static int btf_dedup_strings(struct btf_dedup *d); | ||
871 | static int btf_dedup_prim_types(struct btf_dedup *d); | ||
872 | static int btf_dedup_struct_types(struct btf_dedup *d); | ||
873 | static int btf_dedup_ref_types(struct btf_dedup *d); | ||
874 | static int btf_dedup_compact_types(struct btf_dedup *d); | ||
875 | static int btf_dedup_remap_types(struct btf_dedup *d); | ||
876 | |||
877 | /* | ||
878 | * Deduplicate BTF types and strings. | ||
879 | * | ||
880 | * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF | ||
881 | * section with all BTF type descriptors and string data. It overwrites that | ||
882 | * memory in-place with deduplicated types and strings without any loss of | ||
883 | * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section | ||
884 | * is provided, all the strings referenced from .BTF.ext section are honored | ||
885 | * and updated to point to the right offsets after deduplication. | ||
886 | * | ||
887 | * If function returns with error, type/string data might be garbled and should | ||
888 | * be discarded. | ||
889 | * | ||
890 | * More verbose and detailed description of both problem btf_dedup is solving, | ||
891 | * as well as solution could be found at: | ||
892 | * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html | ||
893 | * | ||
894 | * Problem description and justification | ||
895 | * ===================================== | ||
896 | * | ||
897 | * BTF type information is typically emitted either as a result of conversion | ||
898 | * from DWARF to BTF or directly by compiler. In both cases, each compilation | ||
899 | * unit contains information about a subset of all the types that are used | ||
900 | * in an application. These subsets are frequently overlapping and contain a lot | ||
901 | * of duplicated information when later concatenated together into a single | ||
902 | * binary. This algorithm ensures that each unique type is represented by single | ||
903 | * BTF type descriptor, greatly reducing resulting size of BTF data. | ||
904 | * | ||
905 | * Compilation unit isolation and subsequent duplication of data is not the only | ||
906 | * problem. The same type hierarchy (e.g., struct and all the type that struct | ||
907 | * references) in different compilation units can be represented in BTF to | ||
908 | * various degrees of completeness (or, rather, incompleteness) due to | ||
909 | * struct/union forward declarations. | ||
910 | * | ||
911 | * Let's take a look at an example, that we'll use to better understand the | ||
912 | * problem (and solution). Suppose we have two compilation units, each using | ||
913 | * same `struct S`, but each of them having incomplete type information about | ||
914 | * struct's fields: | ||
915 | * | ||
916 | * // CU #1: | ||
917 | * struct S; | ||
918 | * struct A { | ||
919 | * int a; | ||
920 | * struct A* self; | ||
921 | * struct S* parent; | ||
922 | * }; | ||
923 | * struct B; | ||
924 | * struct S { | ||
925 | * struct A* a_ptr; | ||
926 | * struct B* b_ptr; | ||
927 | * }; | ||
928 | * | ||
929 | * // CU #2: | ||
930 | * struct S; | ||
931 | * struct A; | ||
932 | * struct B { | ||
933 | * int b; | ||
934 | * struct B* self; | ||
935 | * struct S* parent; | ||
936 | * }; | ||
937 | * struct S { | ||
938 | * struct A* a_ptr; | ||
939 | * struct B* b_ptr; | ||
940 | * }; | ||
941 | * | ||
942 | * In case of CU #1, BTF data will know only that `struct B` exist (but no | ||
943 | * more), but will know the complete type information about `struct A`. While | ||
944 | * for CU #2, it will know full type information about `struct B`, but will | ||
945 | * only know about forward declaration of `struct A` (in BTF terms, it will | ||
946 | * have `BTF_KIND_FWD` type descriptor with name `B`). | ||
947 | * | ||
948 | * This compilation unit isolation means that it's possible that there is no | ||
949 | * single CU with complete type information describing structs `S`, `A`, and | ||
950 | * `B`. Also, we might get tons of duplicated and redundant type information. | ||
951 | * | ||
952 | * Additional complication we need to keep in mind comes from the fact that | ||
953 | * types, in general, can form graphs containing cycles, not just DAGs. | ||
954 | * | ||
955 | * While algorithm does deduplication, it also merges and resolves type | ||
956 | * information (unless disabled throught `struct btf_opts`), whenever possible. | ||
957 | * E.g., in the example above with two compilation units having partial type | ||
958 | * information for structs `A` and `B`, the output of algorithm will emit | ||
959 | * a single copy of each BTF type that describes structs `A`, `B`, and `S` | ||
960 | * (as well as type information for `int` and pointers), as if they were defined | ||
961 | * in a single compilation unit as: | ||
962 | * | ||
963 | * struct A { | ||
964 | * int a; | ||
965 | * struct A* self; | ||
966 | * struct S* parent; | ||
967 | * }; | ||
968 | * struct B { | ||
969 | * int b; | ||
970 | * struct B* self; | ||
971 | * struct S* parent; | ||
972 | * }; | ||
973 | * struct S { | ||
974 | * struct A* a_ptr; | ||
975 | * struct B* b_ptr; | ||
976 | * }; | ||
977 | * | ||
978 | * Algorithm summary | ||
979 | * ================= | ||
980 | * | ||
981 | * Algorithm completes its work in 6 separate passes: | ||
982 | * | ||
983 | * 1. Strings deduplication. | ||
984 | * 2. Primitive types deduplication (int, enum, fwd). | ||
985 | * 3. Struct/union types deduplication. | ||
986 | * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func | ||
987 | * protos, and const/volatile/restrict modifiers). | ||
988 | * 5. Types compaction. | ||
989 | * 6. Types remapping. | ||
990 | * | ||
991 | * Algorithm determines canonical type descriptor, which is a single | ||
992 | * representative type for each truly unique type. This canonical type is the | ||
993 | * one that will go into final deduplicated BTF type information. For | ||
994 | * struct/unions, it is also the type that algorithm will merge additional type | ||
995 | * information into (while resolving FWDs), as it discovers it from data in | ||
996 | * other CUs. Each input BTF type eventually gets either mapped to itself, if | ||
997 | * that type is canonical, or to some other type, if that type is equivalent | ||
998 | * and was chosen as canonical representative. This mapping is stored in | ||
999 | * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that | ||
1000 | * FWD type got resolved to. | ||
1001 | * | ||
1002 | * To facilitate fast discovery of canonical types, we also maintain canonical | ||
1003 | * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash | ||
1004 | * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types | ||
1005 | * that match that signature. With sufficiently good choice of type signature | ||
1006 | * hashing function, we can limit number of canonical types for each unique type | ||
1007 | * signature to a very small number, allowing to find canonical type for any | ||
1008 | * duplicated type very quickly. | ||
1009 | * | ||
1010 | * Struct/union deduplication is the most critical part and algorithm for | ||
1011 | * deduplicating structs/unions is described in greater details in comments for | ||
1012 | * `btf_dedup_is_equiv` function. | ||
1013 | */ | ||
1014 | int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, | ||
1015 | const struct btf_dedup_opts *opts) | ||
1016 | { | ||
1017 | struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts); | ||
1018 | int err; | ||
1019 | |||
1020 | if (IS_ERR(d)) { | ||
1021 | pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d)); | ||
1022 | return -EINVAL; | ||
1023 | } | ||
1024 | |||
1025 | err = btf_dedup_strings(d); | ||
1026 | if (err < 0) { | ||
1027 | pr_debug("btf_dedup_strings failed:%d\n", err); | ||
1028 | goto done; | ||
1029 | } | ||
1030 | err = btf_dedup_prim_types(d); | ||
1031 | if (err < 0) { | ||
1032 | pr_debug("btf_dedup_prim_types failed:%d\n", err); | ||
1033 | goto done; | ||
1034 | } | ||
1035 | err = btf_dedup_struct_types(d); | ||
1036 | if (err < 0) { | ||
1037 | pr_debug("btf_dedup_struct_types failed:%d\n", err); | ||
1038 | goto done; | ||
1039 | } | ||
1040 | err = btf_dedup_ref_types(d); | ||
1041 | if (err < 0) { | ||
1042 | pr_debug("btf_dedup_ref_types failed:%d\n", err); | ||
1043 | goto done; | ||
1044 | } | ||
1045 | err = btf_dedup_compact_types(d); | ||
1046 | if (err < 0) { | ||
1047 | pr_debug("btf_dedup_compact_types failed:%d\n", err); | ||
1048 | goto done; | ||
1049 | } | ||
1050 | err = btf_dedup_remap_types(d); | ||
1051 | if (err < 0) { | ||
1052 | pr_debug("btf_dedup_remap_types failed:%d\n", err); | ||
1053 | goto done; | ||
1054 | } | ||
1055 | |||
1056 | done: | ||
1057 | btf_dedup_free(d); | ||
1058 | return err; | ||
1059 | } | ||
1060 | |||
1061 | #define BTF_DEDUP_TABLE_SIZE_LOG 14 | ||
1062 | #define BTF_DEDUP_TABLE_MOD ((1 << BTF_DEDUP_TABLE_SIZE_LOG) - 1) | ||
1063 | #define BTF_UNPROCESSED_ID ((__u32)-1) | ||
1064 | #define BTF_IN_PROGRESS_ID ((__u32)-2) | ||
1065 | |||
1066 | struct btf_dedup_node { | ||
1067 | struct btf_dedup_node *next; | ||
1068 | __u32 type_id; | ||
1069 | }; | ||
1070 | |||
1071 | struct btf_dedup { | ||
1072 | /* .BTF section to be deduped in-place */ | ||
1073 | struct btf *btf; | ||
1074 | /* | ||
1075 | * Optional .BTF.ext section. When provided, any strings referenced | ||
1076 | * from it will be taken into account when deduping strings | ||
1077 | */ | ||
1078 | struct btf_ext *btf_ext; | ||
1079 | /* | ||
1080 | * This is a map from any type's signature hash to a list of possible | ||
1081 | * canonical representative type candidates. Hash collisions are | ||
1082 | * ignored, so even types of various kinds can share same list of | ||
1083 | * candidates, which is fine because we rely on subsequent | ||
1084 | * btf_xxx_equal() checks to authoritatively verify type equality. | ||
1085 | */ | ||
1086 | struct btf_dedup_node **dedup_table; | ||
1087 | /* Canonical types map */ | ||
1088 | __u32 *map; | ||
1089 | /* Hypothetical mapping, used during type graph equivalence checks */ | ||
1090 | __u32 *hypot_map; | ||
1091 | __u32 *hypot_list; | ||
1092 | size_t hypot_cnt; | ||
1093 | size_t hypot_cap; | ||
1094 | /* Various option modifying behavior of algorithm */ | ||
1095 | struct btf_dedup_opts opts; | ||
1096 | }; | ||
1097 | |||
1098 | struct btf_str_ptr { | ||
1099 | const char *str; | ||
1100 | __u32 new_off; | ||
1101 | bool used; | ||
1102 | }; | ||
1103 | |||
1104 | struct btf_str_ptrs { | ||
1105 | struct btf_str_ptr *ptrs; | ||
1106 | const char *data; | ||
1107 | __u32 cnt; | ||
1108 | __u32 cap; | ||
1109 | }; | ||
1110 | |||
1111 | static inline __u32 hash_combine(__u32 h, __u32 value) | ||
1112 | { | ||
1113 | /* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ | ||
1114 | #define GOLDEN_RATIO_PRIME 0x9e370001UL | ||
1115 | return h * 37 + value * GOLDEN_RATIO_PRIME; | ||
1116 | #undef GOLDEN_RATIO_PRIME | ||
1117 | } | ||
1118 | |||
1119 | #define for_each_hash_node(table, hash, node) \ | ||
1120 | for (node = table[hash & BTF_DEDUP_TABLE_MOD]; node; node = node->next) | ||
1121 | |||
1122 | static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) | ||
1123 | { | ||
1124 | struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); | ||
1125 | |||
1126 | if (!node) | ||
1127 | return -ENOMEM; | ||
1128 | node->type_id = type_id; | ||
1129 | node->next = d->dedup_table[hash & BTF_DEDUP_TABLE_MOD]; | ||
1130 | d->dedup_table[hash & BTF_DEDUP_TABLE_MOD] = node; | ||
1131 | return 0; | ||
1132 | } | ||
1133 | |||
1134 | static int btf_dedup_hypot_map_add(struct btf_dedup *d, | ||
1135 | __u32 from_id, __u32 to_id) | ||
1136 | { | ||
1137 | if (d->hypot_cnt == d->hypot_cap) { | ||
1138 | __u32 *new_list; | ||
1139 | |||
1140 | d->hypot_cap += max(16, d->hypot_cap / 2); | ||
1141 | new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap); | ||
1142 | if (!new_list) | ||
1143 | return -ENOMEM; | ||
1144 | d->hypot_list = new_list; | ||
1145 | } | ||
1146 | d->hypot_list[d->hypot_cnt++] = from_id; | ||
1147 | d->hypot_map[from_id] = to_id; | ||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | static void btf_dedup_clear_hypot_map(struct btf_dedup *d) | ||
1152 | { | ||
1153 | int i; | ||
1154 | |||
1155 | for (i = 0; i < d->hypot_cnt; i++) | ||
1156 | d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID; | ||
1157 | d->hypot_cnt = 0; | ||
1158 | } | ||
1159 | |||
1160 | static void btf_dedup_table_free(struct btf_dedup *d) | ||
1161 | { | ||
1162 | struct btf_dedup_node *head, *tmp; | ||
1163 | int i; | ||
1164 | |||
1165 | if (!d->dedup_table) | ||
1166 | return; | ||
1167 | |||
1168 | for (i = 0; i < (1 << BTF_DEDUP_TABLE_SIZE_LOG); i++) { | ||
1169 | while (d->dedup_table[i]) { | ||
1170 | tmp = d->dedup_table[i]; | ||
1171 | d->dedup_table[i] = tmp->next; | ||
1172 | free(tmp); | ||
1173 | } | ||
1174 | |||
1175 | head = d->dedup_table[i]; | ||
1176 | while (head) { | ||
1177 | tmp = head; | ||
1178 | head = head->next; | ||
1179 | free(tmp); | ||
1180 | } | ||
1181 | } | ||
1182 | |||
1183 | free(d->dedup_table); | ||
1184 | d->dedup_table = NULL; | ||
1185 | } | ||
1186 | |||
1187 | static void btf_dedup_free(struct btf_dedup *d) | ||
1188 | { | ||
1189 | btf_dedup_table_free(d); | ||
1190 | |||
1191 | free(d->map); | ||
1192 | d->map = NULL; | ||
1193 | |||
1194 | free(d->hypot_map); | ||
1195 | d->hypot_map = NULL; | ||
1196 | |||
1197 | free(d->hypot_list); | ||
1198 | d->hypot_list = NULL; | ||
1199 | |||
1200 | free(d); | ||
1201 | } | ||
1202 | |||
1203 | static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, | ||
1204 | const struct btf_dedup_opts *opts) | ||
1205 | { | ||
1206 | struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); | ||
1207 | int i, err = 0; | ||
1208 | |||
1209 | if (!d) | ||
1210 | return ERR_PTR(-ENOMEM); | ||
1211 | |||
1212 | d->btf = btf; | ||
1213 | d->btf_ext = btf_ext; | ||
1214 | |||
1215 | d->dedup_table = calloc(1 << BTF_DEDUP_TABLE_SIZE_LOG, | ||
1216 | sizeof(struct btf_dedup_node *)); | ||
1217 | if (!d->dedup_table) { | ||
1218 | err = -ENOMEM; | ||
1219 | goto done; | ||
1220 | } | ||
1221 | |||
1222 | d->map = malloc(sizeof(__u32) * (1 + btf->nr_types)); | ||
1223 | if (!d->map) { | ||
1224 | err = -ENOMEM; | ||
1225 | goto done; | ||
1226 | } | ||
1227 | /* special BTF "void" type is made canonical immediately */ | ||
1228 | d->map[0] = 0; | ||
1229 | for (i = 1; i <= btf->nr_types; i++) | ||
1230 | d->map[i] = BTF_UNPROCESSED_ID; | ||
1231 | |||
1232 | d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types)); | ||
1233 | if (!d->hypot_map) { | ||
1234 | err = -ENOMEM; | ||
1235 | goto done; | ||
1236 | } | ||
1237 | for (i = 0; i <= btf->nr_types; i++) | ||
1238 | d->hypot_map[i] = BTF_UNPROCESSED_ID; | ||
1239 | |||
1240 | d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; | ||
1241 | |||
1242 | done: | ||
1243 | if (err) { | ||
1244 | btf_dedup_free(d); | ||
1245 | return ERR_PTR(err); | ||
1246 | } | ||
1247 | |||
1248 | return d; | ||
1249 | } | ||
1250 | |||
1251 | typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx); | ||
1252 | |||
1253 | /* | ||
1254 | * Iterate over all possible places in .BTF and .BTF.ext that can reference | ||
1255 | * string and pass pointer to it to a provided callback `fn`. | ||
1256 | */ | ||
1257 | static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx) | ||
1258 | { | ||
1259 | void *line_data_cur, *line_data_end; | ||
1260 | int i, j, r, rec_size; | ||
1261 | struct btf_type *t; | ||
1262 | |||
1263 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
1264 | t = d->btf->types[i]; | ||
1265 | r = fn(&t->name_off, ctx); | ||
1266 | if (r) | ||
1267 | return r; | ||
1268 | |||
1269 | switch (BTF_INFO_KIND(t->info)) { | ||
1270 | case BTF_KIND_STRUCT: | ||
1271 | case BTF_KIND_UNION: { | ||
1272 | struct btf_member *m = (struct btf_member *)(t + 1); | ||
1273 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
1274 | |||
1275 | for (j = 0; j < vlen; j++) { | ||
1276 | r = fn(&m->name_off, ctx); | ||
1277 | if (r) | ||
1278 | return r; | ||
1279 | m++; | ||
1280 | } | ||
1281 | break; | ||
1282 | } | ||
1283 | case BTF_KIND_ENUM: { | ||
1284 | struct btf_enum *m = (struct btf_enum *)(t + 1); | ||
1285 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
1286 | |||
1287 | for (j = 0; j < vlen; j++) { | ||
1288 | r = fn(&m->name_off, ctx); | ||
1289 | if (r) | ||
1290 | return r; | ||
1291 | m++; | ||
1292 | } | ||
1293 | break; | ||
1294 | } | ||
1295 | case BTF_KIND_FUNC_PROTO: { | ||
1296 | struct btf_param *m = (struct btf_param *)(t + 1); | ||
1297 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
1298 | |||
1299 | for (j = 0; j < vlen; j++) { | ||
1300 | r = fn(&m->name_off, ctx); | ||
1301 | if (r) | ||
1302 | return r; | ||
1303 | m++; | ||
1304 | } | ||
1305 | break; | ||
1306 | } | ||
1307 | default: | ||
1308 | break; | ||
1309 | } | ||
1310 | } | ||
1311 | |||
1312 | if (!d->btf_ext) | ||
1313 | return 0; | ||
1314 | |||
1315 | line_data_cur = d->btf_ext->line_info.info; | ||
1316 | line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len; | ||
1317 | rec_size = d->btf_ext->line_info.rec_size; | ||
1318 | |||
1319 | while (line_data_cur < line_data_end) { | ||
1320 | struct btf_ext_info_sec *sec = line_data_cur; | ||
1321 | struct bpf_line_info_min *line_info; | ||
1322 | __u32 num_info = sec->num_info; | ||
1323 | |||
1324 | r = fn(&sec->sec_name_off, ctx); | ||
1325 | if (r) | ||
1326 | return r; | ||
1327 | |||
1328 | line_data_cur += sizeof(struct btf_ext_info_sec); | ||
1329 | for (i = 0; i < num_info; i++) { | ||
1330 | line_info = line_data_cur; | ||
1331 | r = fn(&line_info->file_name_off, ctx); | ||
1332 | if (r) | ||
1333 | return r; | ||
1334 | r = fn(&line_info->line_off, ctx); | ||
1335 | if (r) | ||
1336 | return r; | ||
1337 | line_data_cur += rec_size; | ||
1338 | } | ||
1339 | } | ||
1340 | |||
1341 | return 0; | ||
1342 | } | ||
1343 | |||
1344 | static int str_sort_by_content(const void *a1, const void *a2) | ||
1345 | { | ||
1346 | const struct btf_str_ptr *p1 = a1; | ||
1347 | const struct btf_str_ptr *p2 = a2; | ||
1348 | |||
1349 | return strcmp(p1->str, p2->str); | ||
1350 | } | ||
1351 | |||
1352 | static int str_sort_by_offset(const void *a1, const void *a2) | ||
1353 | { | ||
1354 | const struct btf_str_ptr *p1 = a1; | ||
1355 | const struct btf_str_ptr *p2 = a2; | ||
1356 | |||
1357 | if (p1->str != p2->str) | ||
1358 | return p1->str < p2->str ? -1 : 1; | ||
1359 | return 0; | ||
1360 | } | ||
1361 | |||
1362 | static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem) | ||
1363 | { | ||
1364 | const struct btf_str_ptr *p = pelem; | ||
1365 | |||
1366 | if (str_ptr != p->str) | ||
1367 | return (const char *)str_ptr < p->str ? -1 : 1; | ||
1368 | return 0; | ||
1369 | } | ||
1370 | |||
1371 | static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx) | ||
1372 | { | ||
1373 | struct btf_str_ptrs *strs; | ||
1374 | struct btf_str_ptr *s; | ||
1375 | |||
1376 | if (*str_off_ptr == 0) | ||
1377 | return 0; | ||
1378 | |||
1379 | strs = ctx; | ||
1380 | s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, | ||
1381 | sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); | ||
1382 | if (!s) | ||
1383 | return -EINVAL; | ||
1384 | s->used = true; | ||
1385 | return 0; | ||
1386 | } | ||
1387 | |||
1388 | static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx) | ||
1389 | { | ||
1390 | struct btf_str_ptrs *strs; | ||
1391 | struct btf_str_ptr *s; | ||
1392 | |||
1393 | if (*str_off_ptr == 0) | ||
1394 | return 0; | ||
1395 | |||
1396 | strs = ctx; | ||
1397 | s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt, | ||
1398 | sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp); | ||
1399 | if (!s) | ||
1400 | return -EINVAL; | ||
1401 | *str_off_ptr = s->new_off; | ||
1402 | return 0; | ||
1403 | } | ||
1404 | |||
1405 | /* | ||
1406 | * Dedup string and filter out those that are not referenced from either .BTF | ||
1407 | * or .BTF.ext (if provided) sections. | ||
1408 | * | ||
1409 | * This is done by building index of all strings in BTF's string section, | ||
1410 | * then iterating over all entities that can reference strings (e.g., type | ||
1411 | * names, struct field names, .BTF.ext line info, etc) and marking corresponding | ||
1412 | * strings as used. After that all used strings are deduped and compacted into | ||
1413 | * sequential blob of memory and new offsets are calculated. Then all the string | ||
1414 | * references are iterated again and rewritten using new offsets. | ||
1415 | */ | ||
1416 | static int btf_dedup_strings(struct btf_dedup *d) | ||
1417 | { | ||
1418 | const struct btf_header *hdr = d->btf->hdr; | ||
1419 | char *start = (char *)d->btf->nohdr_data + hdr->str_off; | ||
1420 | char *end = start + d->btf->hdr->str_len; | ||
1421 | char *p = start, *tmp_strs = NULL; | ||
1422 | struct btf_str_ptrs strs = { | ||
1423 | .cnt = 0, | ||
1424 | .cap = 0, | ||
1425 | .ptrs = NULL, | ||
1426 | .data = start, | ||
1427 | }; | ||
1428 | int i, j, err = 0, grp_idx; | ||
1429 | bool grp_used; | ||
1430 | |||
1431 | /* build index of all strings */ | ||
1432 | while (p < end) { | ||
1433 | if (strs.cnt + 1 > strs.cap) { | ||
1434 | struct btf_str_ptr *new_ptrs; | ||
1435 | |||
1436 | strs.cap += max(strs.cnt / 2, 16); | ||
1437 | new_ptrs = realloc(strs.ptrs, | ||
1438 | sizeof(strs.ptrs[0]) * strs.cap); | ||
1439 | if (!new_ptrs) { | ||
1440 | err = -ENOMEM; | ||
1441 | goto done; | ||
1442 | } | ||
1443 | strs.ptrs = new_ptrs; | ||
1444 | } | ||
1445 | |||
1446 | strs.ptrs[strs.cnt].str = p; | ||
1447 | strs.ptrs[strs.cnt].used = false; | ||
1448 | |||
1449 | p += strlen(p) + 1; | ||
1450 | strs.cnt++; | ||
1451 | } | ||
1452 | |||
1453 | /* temporary storage for deduplicated strings */ | ||
1454 | tmp_strs = malloc(d->btf->hdr->str_len); | ||
1455 | if (!tmp_strs) { | ||
1456 | err = -ENOMEM; | ||
1457 | goto done; | ||
1458 | } | ||
1459 | |||
1460 | /* mark all used strings */ | ||
1461 | strs.ptrs[0].used = true; | ||
1462 | err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs); | ||
1463 | if (err) | ||
1464 | goto done; | ||
1465 | |||
1466 | /* sort strings by context, so that we can identify duplicates */ | ||
1467 | qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content); | ||
1468 | |||
1469 | /* | ||
1470 | * iterate groups of equal strings and if any instance in a group was | ||
1471 | * referenced, emit single instance and remember new offset | ||
1472 | */ | ||
1473 | p = tmp_strs; | ||
1474 | grp_idx = 0; | ||
1475 | grp_used = strs.ptrs[0].used; | ||
1476 | /* iterate past end to avoid code duplication after loop */ | ||
1477 | for (i = 1; i <= strs.cnt; i++) { | ||
1478 | /* | ||
1479 | * when i == strs.cnt, we want to skip string comparison and go | ||
1480 | * straight to handling last group of strings (otherwise we'd | ||
1481 | * need to handle last group after the loop w/ duplicated code) | ||
1482 | */ | ||
1483 | if (i < strs.cnt && | ||
1484 | !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) { | ||
1485 | grp_used = grp_used || strs.ptrs[i].used; | ||
1486 | continue; | ||
1487 | } | ||
1488 | |||
1489 | /* | ||
1490 | * this check would have been required after the loop to handle | ||
1491 | * last group of strings, but due to <= condition in a loop | ||
1492 | * we avoid that duplication | ||
1493 | */ | ||
1494 | if (grp_used) { | ||
1495 | int new_off = p - tmp_strs; | ||
1496 | __u32 len = strlen(strs.ptrs[grp_idx].str); | ||
1497 | |||
1498 | memmove(p, strs.ptrs[grp_idx].str, len + 1); | ||
1499 | for (j = grp_idx; j < i; j++) | ||
1500 | strs.ptrs[j].new_off = new_off; | ||
1501 | p += len + 1; | ||
1502 | } | ||
1503 | |||
1504 | if (i < strs.cnt) { | ||
1505 | grp_idx = i; | ||
1506 | grp_used = strs.ptrs[i].used; | ||
1507 | } | ||
1508 | } | ||
1509 | |||
1510 | /* replace original strings with deduped ones */ | ||
1511 | d->btf->hdr->str_len = p - tmp_strs; | ||
1512 | memmove(start, tmp_strs, d->btf->hdr->str_len); | ||
1513 | end = start + d->btf->hdr->str_len; | ||
1514 | |||
1515 | /* restore original order for further binary search lookups */ | ||
1516 | qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset); | ||
1517 | |||
1518 | /* remap string offsets */ | ||
1519 | err = btf_for_each_str_off(d, btf_str_remap_offset, &strs); | ||
1520 | if (err) | ||
1521 | goto done; | ||
1522 | |||
1523 | d->btf->hdr->str_len = end - start; | ||
1524 | |||
1525 | done: | ||
1526 | free(tmp_strs); | ||
1527 | free(strs.ptrs); | ||
1528 | return err; | ||
1529 | } | ||
1530 | |||
1531 | static __u32 btf_hash_common(struct btf_type *t) | ||
1532 | { | ||
1533 | __u32 h; | ||
1534 | |||
1535 | h = hash_combine(0, t->name_off); | ||
1536 | h = hash_combine(h, t->info); | ||
1537 | h = hash_combine(h, t->size); | ||
1538 | return h; | ||
1539 | } | ||
1540 | |||
1541 | static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2) | ||
1542 | { | ||
1543 | return t1->name_off == t2->name_off && | ||
1544 | t1->info == t2->info && | ||
1545 | t1->size == t2->size; | ||
1546 | } | ||
1547 | |||
1548 | /* Calculate type signature hash of INT. */ | ||
1549 | static __u32 btf_hash_int(struct btf_type *t) | ||
1550 | { | ||
1551 | __u32 info = *(__u32 *)(t + 1); | ||
1552 | __u32 h; | ||
1553 | |||
1554 | h = btf_hash_common(t); | ||
1555 | h = hash_combine(h, info); | ||
1556 | return h; | ||
1557 | } | ||
1558 | |||
1559 | /* Check structural equality of two INTs. */ | ||
1560 | static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2) | ||
1561 | { | ||
1562 | __u32 info1, info2; | ||
1563 | |||
1564 | if (!btf_equal_common(t1, t2)) | ||
1565 | return false; | ||
1566 | info1 = *(__u32 *)(t1 + 1); | ||
1567 | info2 = *(__u32 *)(t2 + 1); | ||
1568 | return info1 == info2; | ||
1569 | } | ||
1570 | |||
1571 | /* Calculate type signature hash of ENUM. */ | ||
1572 | static __u32 btf_hash_enum(struct btf_type *t) | ||
1573 | { | ||
1574 | struct btf_enum *member = (struct btf_enum *)(t + 1); | ||
1575 | __u32 vlen = BTF_INFO_VLEN(t->info); | ||
1576 | __u32 h = btf_hash_common(t); | ||
1577 | int i; | ||
1578 | |||
1579 | for (i = 0; i < vlen; i++) { | ||
1580 | h = hash_combine(h, member->name_off); | ||
1581 | h = hash_combine(h, member->val); | ||
1582 | member++; | ||
1583 | } | ||
1584 | return h; | ||
1585 | } | ||
1586 | |||
1587 | /* Check structural equality of two ENUMs. */ | ||
1588 | static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2) | ||
1589 | { | ||
1590 | struct btf_enum *m1, *m2; | ||
1591 | __u16 vlen; | ||
1592 | int i; | ||
1593 | |||
1594 | if (!btf_equal_common(t1, t2)) | ||
1595 | return false; | ||
1596 | |||
1597 | vlen = BTF_INFO_VLEN(t1->info); | ||
1598 | m1 = (struct btf_enum *)(t1 + 1); | ||
1599 | m2 = (struct btf_enum *)(t2 + 1); | ||
1600 | for (i = 0; i < vlen; i++) { | ||
1601 | if (m1->name_off != m2->name_off || m1->val != m2->val) | ||
1602 | return false; | ||
1603 | m1++; | ||
1604 | m2++; | ||
1605 | } | ||
1606 | return true; | ||
1607 | } | ||
1608 | |||
1609 | /* | ||
1610 | * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs, | ||
1611 | * as referenced type IDs equivalence is established separately during type | ||
1612 | * graph equivalence check algorithm. | ||
1613 | */ | ||
1614 | static __u32 btf_hash_struct(struct btf_type *t) | ||
1615 | { | ||
1616 | struct btf_member *member = (struct btf_member *)(t + 1); | ||
1617 | __u32 vlen = BTF_INFO_VLEN(t->info); | ||
1618 | __u32 h = btf_hash_common(t); | ||
1619 | int i; | ||
1620 | |||
1621 | for (i = 0; i < vlen; i++) { | ||
1622 | h = hash_combine(h, member->name_off); | ||
1623 | h = hash_combine(h, member->offset); | ||
1624 | /* no hashing of referenced type ID, it can be unresolved yet */ | ||
1625 | member++; | ||
1626 | } | ||
1627 | return h; | ||
1628 | } | ||
1629 | |||
1630 | /* | ||
1631 | * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type | ||
1632 | * IDs. This check is performed during type graph equivalence check and | ||
1633 | * referenced types equivalence is checked separately. | ||
1634 | */ | ||
1635 | static bool btf_equal_struct(struct btf_type *t1, struct btf_type *t2) | ||
1636 | { | ||
1637 | struct btf_member *m1, *m2; | ||
1638 | __u16 vlen; | ||
1639 | int i; | ||
1640 | |||
1641 | if (!btf_equal_common(t1, t2)) | ||
1642 | return false; | ||
1643 | |||
1644 | vlen = BTF_INFO_VLEN(t1->info); | ||
1645 | m1 = (struct btf_member *)(t1 + 1); | ||
1646 | m2 = (struct btf_member *)(t2 + 1); | ||
1647 | for (i = 0; i < vlen; i++) { | ||
1648 | if (m1->name_off != m2->name_off || m1->offset != m2->offset) | ||
1649 | return false; | ||
1650 | m1++; | ||
1651 | m2++; | ||
1652 | } | ||
1653 | return true; | ||
1654 | } | ||
1655 | |||
1656 | /* | ||
1657 | * Calculate type signature hash of ARRAY, including referenced type IDs, | ||
1658 | * under assumption that they were already resolved to canonical type IDs and | ||
1659 | * are not going to change. | ||
1660 | */ | ||
1661 | static __u32 btf_hash_array(struct btf_type *t) | ||
1662 | { | ||
1663 | struct btf_array *info = (struct btf_array *)(t + 1); | ||
1664 | __u32 h = btf_hash_common(t); | ||
1665 | |||
1666 | h = hash_combine(h, info->type); | ||
1667 | h = hash_combine(h, info->index_type); | ||
1668 | h = hash_combine(h, info->nelems); | ||
1669 | return h; | ||
1670 | } | ||
1671 | |||
1672 | /* | ||
1673 | * Check exact equality of two ARRAYs, taking into account referenced | ||
1674 | * type IDs, under assumption that they were already resolved to canonical | ||
1675 | * type IDs and are not going to change. | ||
1676 | * This function is called during reference types deduplication to compare | ||
1677 | * ARRAY to potential canonical representative. | ||
1678 | */ | ||
1679 | static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2) | ||
1680 | { | ||
1681 | struct btf_array *info1, *info2; | ||
1682 | |||
1683 | if (!btf_equal_common(t1, t2)) | ||
1684 | return false; | ||
1685 | |||
1686 | info1 = (struct btf_array *)(t1 + 1); | ||
1687 | info2 = (struct btf_array *)(t2 + 1); | ||
1688 | return info1->type == info2->type && | ||
1689 | info1->index_type == info2->index_type && | ||
1690 | info1->nelems == info2->nelems; | ||
1691 | } | ||
1692 | |||
1693 | /* | ||
1694 | * Check structural compatibility of two ARRAYs, ignoring referenced type | ||
1695 | * IDs. This check is performed during type graph equivalence check and | ||
1696 | * referenced types equivalence is checked separately. | ||
1697 | */ | ||
1698 | static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2) | ||
1699 | { | ||
1700 | struct btf_array *info1, *info2; | ||
1701 | |||
1702 | if (!btf_equal_common(t1, t2)) | ||
1703 | return false; | ||
1704 | |||
1705 | info1 = (struct btf_array *)(t1 + 1); | ||
1706 | info2 = (struct btf_array *)(t2 + 1); | ||
1707 | return info1->nelems == info2->nelems; | ||
1708 | } | ||
1709 | |||
1710 | /* | ||
1711 | * Calculate type signature hash of FUNC_PROTO, including referenced type IDs, | ||
1712 | * under assumption that they were already resolved to canonical type IDs and | ||
1713 | * are not going to change. | ||
1714 | */ | ||
1715 | static inline __u32 btf_hash_fnproto(struct btf_type *t) | ||
1716 | { | ||
1717 | struct btf_param *member = (struct btf_param *)(t + 1); | ||
1718 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
1719 | __u32 h = btf_hash_common(t); | ||
1720 | int i; | ||
1721 | |||
1722 | for (i = 0; i < vlen; i++) { | ||
1723 | h = hash_combine(h, member->name_off); | ||
1724 | h = hash_combine(h, member->type); | ||
1725 | member++; | ||
1726 | } | ||
1727 | return h; | ||
1728 | } | ||
1729 | |||
1730 | /* | ||
1731 | * Check exact equality of two FUNC_PROTOs, taking into account referenced | ||
1732 | * type IDs, under assumption that they were already resolved to canonical | ||
1733 | * type IDs and are not going to change. | ||
1734 | * This function is called during reference types deduplication to compare | ||
1735 | * FUNC_PROTO to potential canonical representative. | ||
1736 | */ | ||
1737 | static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) | ||
1738 | { | ||
1739 | struct btf_param *m1, *m2; | ||
1740 | __u16 vlen; | ||
1741 | int i; | ||
1742 | |||
1743 | if (!btf_equal_common(t1, t2)) | ||
1744 | return false; | ||
1745 | |||
1746 | vlen = BTF_INFO_VLEN(t1->info); | ||
1747 | m1 = (struct btf_param *)(t1 + 1); | ||
1748 | m2 = (struct btf_param *)(t2 + 1); | ||
1749 | for (i = 0; i < vlen; i++) { | ||
1750 | if (m1->name_off != m2->name_off || m1->type != m2->type) | ||
1751 | return false; | ||
1752 | m1++; | ||
1753 | m2++; | ||
1754 | } | ||
1755 | return true; | ||
1756 | } | ||
1757 | |||
1758 | /* | ||
1759 | * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type | ||
1760 | * IDs. This check is performed during type graph equivalence check and | ||
1761 | * referenced types equivalence is checked separately. | ||
1762 | */ | ||
1763 | static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) | ||
1764 | { | ||
1765 | struct btf_param *m1, *m2; | ||
1766 | __u16 vlen; | ||
1767 | int i; | ||
1768 | |||
1769 | /* skip return type ID */ | ||
1770 | if (t1->name_off != t2->name_off || t1->info != t2->info) | ||
1771 | return false; | ||
1772 | |||
1773 | vlen = BTF_INFO_VLEN(t1->info); | ||
1774 | m1 = (struct btf_param *)(t1 + 1); | ||
1775 | m2 = (struct btf_param *)(t2 + 1); | ||
1776 | for (i = 0; i < vlen; i++) { | ||
1777 | if (m1->name_off != m2->name_off) | ||
1778 | return false; | ||
1779 | m1++; | ||
1780 | m2++; | ||
1781 | } | ||
1782 | return true; | ||
1783 | } | ||
1784 | |||
1785 | /* | ||
1786 | * Deduplicate primitive types, that can't reference other types, by calculating | ||
1787 | * their type signature hash and comparing them with any possible canonical | ||
1788 | * candidate. If no canonical candidate matches, type itself is marked as | ||
1789 | * canonical and is added into `btf_dedup->dedup_table` as another candidate. | ||
1790 | */ | ||
1791 | static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) | ||
1792 | { | ||
1793 | struct btf_type *t = d->btf->types[type_id]; | ||
1794 | struct btf_type *cand; | ||
1795 | struct btf_dedup_node *cand_node; | ||
1796 | /* if we don't find equivalent type, then we are canonical */ | ||
1797 | __u32 new_id = type_id; | ||
1798 | __u32 h; | ||
1799 | |||
1800 | switch (BTF_INFO_KIND(t->info)) { | ||
1801 | case BTF_KIND_CONST: | ||
1802 | case BTF_KIND_VOLATILE: | ||
1803 | case BTF_KIND_RESTRICT: | ||
1804 | case BTF_KIND_PTR: | ||
1805 | case BTF_KIND_TYPEDEF: | ||
1806 | case BTF_KIND_ARRAY: | ||
1807 | case BTF_KIND_STRUCT: | ||
1808 | case BTF_KIND_UNION: | ||
1809 | case BTF_KIND_FUNC: | ||
1810 | case BTF_KIND_FUNC_PROTO: | ||
1811 | return 0; | ||
1812 | |||
1813 | case BTF_KIND_INT: | ||
1814 | h = btf_hash_int(t); | ||
1815 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
1816 | cand = d->btf->types[cand_node->type_id]; | ||
1817 | if (btf_equal_int(t, cand)) { | ||
1818 | new_id = cand_node->type_id; | ||
1819 | break; | ||
1820 | } | ||
1821 | } | ||
1822 | break; | ||
1823 | |||
1824 | case BTF_KIND_ENUM: | ||
1825 | h = btf_hash_enum(t); | ||
1826 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
1827 | cand = d->btf->types[cand_node->type_id]; | ||
1828 | if (btf_equal_enum(t, cand)) { | ||
1829 | new_id = cand_node->type_id; | ||
1830 | break; | ||
1831 | } | ||
1832 | } | ||
1833 | break; | ||
1834 | |||
1835 | case BTF_KIND_FWD: | ||
1836 | h = btf_hash_common(t); | ||
1837 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
1838 | cand = d->btf->types[cand_node->type_id]; | ||
1839 | if (btf_equal_common(t, cand)) { | ||
1840 | new_id = cand_node->type_id; | ||
1841 | break; | ||
1842 | } | ||
1843 | } | ||
1844 | break; | ||
1845 | |||
1846 | default: | ||
1847 | return -EINVAL; | ||
1848 | } | ||
1849 | |||
1850 | d->map[type_id] = new_id; | ||
1851 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) | ||
1852 | return -ENOMEM; | ||
1853 | |||
1854 | return 0; | ||
1855 | } | ||
1856 | |||
1857 | static int btf_dedup_prim_types(struct btf_dedup *d) | ||
1858 | { | ||
1859 | int i, err; | ||
1860 | |||
1861 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
1862 | err = btf_dedup_prim_type(d, i); | ||
1863 | if (err) | ||
1864 | return err; | ||
1865 | } | ||
1866 | return 0; | ||
1867 | } | ||
1868 | |||
1869 | /* | ||
1870 | * Check whether type is already mapped into canonical one (could be to itself). | ||
1871 | */ | ||
1872 | static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id) | ||
1873 | { | ||
1874 | return d->map[type_id] <= BTF_MAX_TYPE; | ||
1875 | } | ||
1876 | |||
1877 | /* | ||
1878 | * Resolve type ID into its canonical type ID, if any; otherwise return original | ||
1879 | * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow | ||
1880 | * STRUCT/UNION link and resolve it into canonical type ID as well. | ||
1881 | */ | ||
1882 | static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id) | ||
1883 | { | ||
1884 | while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) | ||
1885 | type_id = d->map[type_id]; | ||
1886 | return type_id; | ||
1887 | } | ||
1888 | |||
1889 | /* | ||
1890 | * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original | ||
1891 | * type ID. | ||
1892 | */ | ||
1893 | static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id) | ||
1894 | { | ||
1895 | __u32 orig_type_id = type_id; | ||
1896 | |||
1897 | if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD) | ||
1898 | return type_id; | ||
1899 | |||
1900 | while (is_type_mapped(d, type_id) && d->map[type_id] != type_id) | ||
1901 | type_id = d->map[type_id]; | ||
1902 | |||
1903 | if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD) | ||
1904 | return type_id; | ||
1905 | |||
1906 | return orig_type_id; | ||
1907 | } | ||
1908 | |||
1909 | |||
1910 | static inline __u16 btf_fwd_kind(struct btf_type *t) | ||
1911 | { | ||
1912 | return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT; | ||
1913 | } | ||
1914 | |||
1915 | /* | ||
1916 | * Check equivalence of BTF type graph formed by candidate struct/union (we'll | ||
1917 | * call it "candidate graph" in this description for brevity) to a type graph | ||
1918 | * formed by (potential) canonical struct/union ("canonical graph" for brevity | ||
1919 | * here, though keep in mind that not all types in canonical graph are | ||
1920 | * necessarily canonical representatives themselves, some of them might be | ||
1921 | * duplicates or its uniqueness might not have been established yet). | ||
1922 | * Returns: | ||
1923 | * - >0, if type graphs are equivalent; | ||
1924 | * - 0, if not equivalent; | ||
1925 | * - <0, on error. | ||
1926 | * | ||
1927 | * Algorithm performs side-by-side DFS traversal of both type graphs and checks | ||
1928 | * equivalence of BTF types at each step. If at any point BTF types in candidate | ||
1929 | * and canonical graphs are not compatible structurally, whole graphs are | ||
1930 | * incompatible. If types are structurally equivalent (i.e., all information | ||
1931 | * except referenced type IDs is exactly the same), a mapping from `canon_id` to | ||
1932 | * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`). | ||
1933 | * If a type references other types, then those referenced types are checked | ||
1934 | * for equivalence recursively. | ||
1935 | * | ||
1936 | * During DFS traversal, if we find that for current `canon_id` type we | ||
1937 | * already have some mapping in hypothetical map, we check for two possible | ||
1938 | * situations: | ||
1939 | * - `canon_id` is mapped to exactly the same type as `cand_id`. This will | ||
1940 | * happen when type graphs have cycles. In this case we assume those two | ||
1941 | * types are equivalent. | ||
1942 | * - `canon_id` is mapped to different type. This is contradiction in our | ||
1943 | * hypothetical mapping, because same graph in canonical graph corresponds | ||
1944 | * to two different types in candidate graph, which for equivalent type | ||
1945 | * graphs shouldn't happen. This condition terminates equivalence check | ||
1946 | * with negative result. | ||
1947 | * | ||
1948 | * If type graphs traversal exhausts types to check and find no contradiction, | ||
1949 | * then type graphs are equivalent. | ||
1950 | * | ||
1951 | * When checking types for equivalence, there is one special case: FWD types. | ||
1952 | * If FWD type resolution is allowed and one of the types (either from canonical | ||
1953 | * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind | ||
1954 | * flag) and their names match, hypothetical mapping is updated to point from | ||
1955 | * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully, | ||
1956 | * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently. | ||
1957 | * | ||
1958 | * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution, | ||
1959 | * if there are two exactly named (or anonymous) structs/unions that are | ||
1960 | * compatible structurally, one of which has FWD field, while other is concrete | ||
1961 | * STRUCT/UNION, but according to C sources they are different structs/unions | ||
1962 | * that are referencing different types with the same name. This is extremely | ||
1963 | * unlikely to happen, but btf_dedup API allows to disable FWD resolution if | ||
1964 | * this logic is causing problems. | ||
1965 | * | ||
1966 | * Doing FWD resolution means that both candidate and/or canonical graphs can | ||
1967 | * consists of portions of the graph that come from multiple compilation units. | ||
1968 | * This is due to the fact that types within single compilation unit are always | ||
1969 | * deduplicated and FWDs are already resolved, if referenced struct/union | ||
1970 | * definiton is available. So, if we had unresolved FWD and found corresponding | ||
1971 | * STRUCT/UNION, they will be from different compilation units. This | ||
1972 | * consequently means that when we "link" FWD to corresponding STRUCT/UNION, | ||
1973 | * type graph will likely have at least two different BTF types that describe | ||
1974 | * same type (e.g., most probably there will be two different BTF types for the | ||
1975 | * same 'int' primitive type) and could even have "overlapping" parts of type | ||
1976 | * graph that describe same subset of types. | ||
1977 | * | ||
1978 | * This in turn means that our assumption that each type in canonical graph | ||
1979 | * must correspond to exactly one type in candidate graph might not hold | ||
1980 | * anymore and will make it harder to detect contradictions using hypothetical | ||
1981 | * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION | ||
1982 | * resolution only in canonical graph. FWDs in candidate graphs are never | ||
1983 | * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs | ||
1984 | * that can occur: | ||
1985 | * - Both types in canonical and candidate graphs are FWDs. If they are | ||
1986 | * structurally equivalent, then they can either be both resolved to the | ||
1987 | * same STRUCT/UNION or not resolved at all. In both cases they are | ||
1988 | * equivalent and there is no need to resolve FWD on candidate side. | ||
1989 | * - Both types in canonical and candidate graphs are concrete STRUCT/UNION, | ||
1990 | * so nothing to resolve as well, algorithm will check equivalence anyway. | ||
1991 | * - Type in canonical graph is FWD, while type in candidate is concrete | ||
1992 | * STRUCT/UNION. In this case candidate graph comes from single compilation | ||
1993 | * unit, so there is exactly one BTF type for each unique C type. After | ||
1994 | * resolving FWD into STRUCT/UNION, there might be more than one BTF type | ||
1995 | * in canonical graph mapping to single BTF type in candidate graph, but | ||
1996 | * because hypothetical mapping maps from canonical to candidate types, it's | ||
1997 | * alright, and we still maintain the property of having single `canon_id` | ||
1998 | * mapping to single `cand_id` (there could be two different `canon_id` | ||
1999 | * mapped to the same `cand_id`, but it's not contradictory). | ||
2000 | * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate | ||
2001 | * graph is FWD. In this case we are just going to check compatibility of | ||
2002 | * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll | ||
2003 | * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to | ||
2004 | * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs | ||
2005 | * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from | ||
2006 | * canonical graph. | ||
2007 | */ | ||
2008 | static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id, | ||
2009 | __u32 canon_id) | ||
2010 | { | ||
2011 | struct btf_type *cand_type; | ||
2012 | struct btf_type *canon_type; | ||
2013 | __u32 hypot_type_id; | ||
2014 | __u16 cand_kind; | ||
2015 | __u16 canon_kind; | ||
2016 | int i, eq; | ||
2017 | |||
2018 | /* if both resolve to the same canonical, they must be equivalent */ | ||
2019 | if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id)) | ||
2020 | return 1; | ||
2021 | |||
2022 | canon_id = resolve_fwd_id(d, canon_id); | ||
2023 | |||
2024 | hypot_type_id = d->hypot_map[canon_id]; | ||
2025 | if (hypot_type_id <= BTF_MAX_TYPE) | ||
2026 | return hypot_type_id == cand_id; | ||
2027 | |||
2028 | if (btf_dedup_hypot_map_add(d, canon_id, cand_id)) | ||
2029 | return -ENOMEM; | ||
2030 | |||
2031 | cand_type = d->btf->types[cand_id]; | ||
2032 | canon_type = d->btf->types[canon_id]; | ||
2033 | cand_kind = BTF_INFO_KIND(cand_type->info); | ||
2034 | canon_kind = BTF_INFO_KIND(canon_type->info); | ||
2035 | |||
2036 | if (cand_type->name_off != canon_type->name_off) | ||
2037 | return 0; | ||
2038 | |||
2039 | /* FWD <--> STRUCT/UNION equivalence check, if enabled */ | ||
2040 | if (!d->opts.dont_resolve_fwds | ||
2041 | && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD) | ||
2042 | && cand_kind != canon_kind) { | ||
2043 | __u16 real_kind; | ||
2044 | __u16 fwd_kind; | ||
2045 | |||
2046 | if (cand_kind == BTF_KIND_FWD) { | ||
2047 | real_kind = canon_kind; | ||
2048 | fwd_kind = btf_fwd_kind(cand_type); | ||
2049 | } else { | ||
2050 | real_kind = cand_kind; | ||
2051 | fwd_kind = btf_fwd_kind(canon_type); | ||
2052 | } | ||
2053 | return fwd_kind == real_kind; | ||
2054 | } | ||
2055 | |||
2056 | if (cand_type->info != canon_type->info) | ||
2057 | return 0; | ||
2058 | |||
2059 | switch (cand_kind) { | ||
2060 | case BTF_KIND_INT: | ||
2061 | return btf_equal_int(cand_type, canon_type); | ||
2062 | |||
2063 | case BTF_KIND_ENUM: | ||
2064 | return btf_equal_enum(cand_type, canon_type); | ||
2065 | |||
2066 | case BTF_KIND_FWD: | ||
2067 | return btf_equal_common(cand_type, canon_type); | ||
2068 | |||
2069 | case BTF_KIND_CONST: | ||
2070 | case BTF_KIND_VOLATILE: | ||
2071 | case BTF_KIND_RESTRICT: | ||
2072 | case BTF_KIND_PTR: | ||
2073 | case BTF_KIND_TYPEDEF: | ||
2074 | case BTF_KIND_FUNC: | ||
2075 | return btf_dedup_is_equiv(d, cand_type->type, canon_type->type); | ||
2076 | |||
2077 | case BTF_KIND_ARRAY: { | ||
2078 | struct btf_array *cand_arr, *canon_arr; | ||
2079 | |||
2080 | if (!btf_compat_array(cand_type, canon_type)) | ||
2081 | return 0; | ||
2082 | cand_arr = (struct btf_array *)(cand_type + 1); | ||
2083 | canon_arr = (struct btf_array *)(canon_type + 1); | ||
2084 | eq = btf_dedup_is_equiv(d, | ||
2085 | cand_arr->index_type, canon_arr->index_type); | ||
2086 | if (eq <= 0) | ||
2087 | return eq; | ||
2088 | return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type); | ||
2089 | } | ||
2090 | |||
2091 | case BTF_KIND_STRUCT: | ||
2092 | case BTF_KIND_UNION: { | ||
2093 | struct btf_member *cand_m, *canon_m; | ||
2094 | __u16 vlen; | ||
2095 | |||
2096 | if (!btf_equal_struct(cand_type, canon_type)) | ||
2097 | return 0; | ||
2098 | vlen = BTF_INFO_VLEN(cand_type->info); | ||
2099 | cand_m = (struct btf_member *)(cand_type + 1); | ||
2100 | canon_m = (struct btf_member *)(canon_type + 1); | ||
2101 | for (i = 0; i < vlen; i++) { | ||
2102 | eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type); | ||
2103 | if (eq <= 0) | ||
2104 | return eq; | ||
2105 | cand_m++; | ||
2106 | canon_m++; | ||
2107 | } | ||
2108 | |||
2109 | return 1; | ||
2110 | } | ||
2111 | |||
2112 | case BTF_KIND_FUNC_PROTO: { | ||
2113 | struct btf_param *cand_p, *canon_p; | ||
2114 | __u16 vlen; | ||
2115 | |||
2116 | if (!btf_compat_fnproto(cand_type, canon_type)) | ||
2117 | return 0; | ||
2118 | eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type); | ||
2119 | if (eq <= 0) | ||
2120 | return eq; | ||
2121 | vlen = BTF_INFO_VLEN(cand_type->info); | ||
2122 | cand_p = (struct btf_param *)(cand_type + 1); | ||
2123 | canon_p = (struct btf_param *)(canon_type + 1); | ||
2124 | for (i = 0; i < vlen; i++) { | ||
2125 | eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type); | ||
2126 | if (eq <= 0) | ||
2127 | return eq; | ||
2128 | cand_p++; | ||
2129 | canon_p++; | ||
2130 | } | ||
2131 | return 1; | ||
2132 | } | ||
2133 | |||
2134 | default: | ||
2135 | return -EINVAL; | ||
2136 | } | ||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | /* | ||
2141 | * Use hypothetical mapping, produced by successful type graph equivalence | ||
2142 | * check, to augment existing struct/union canonical mapping, where possible. | ||
2143 | * | ||
2144 | * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record | ||
2145 | * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional: | ||
2146 | * it doesn't matter if FWD type was part of canonical graph or candidate one, | ||
2147 | * we are recording the mapping anyway. As opposed to carefulness required | ||
2148 | * for struct/union correspondence mapping (described below), for FWD resolution | ||
2149 | * it's not important, as by the time that FWD type (reference type) will be | ||
2150 | * deduplicated all structs/unions will be deduped already anyway. | ||
2151 | * | ||
2152 | * Recording STRUCT/UNION mapping is purely a performance optimization and is | ||
2153 | * not required for correctness. It needs to be done carefully to ensure that | ||
2154 | * struct/union from candidate's type graph is not mapped into corresponding | ||
2155 | * struct/union from canonical type graph that itself hasn't been resolved into | ||
2156 | * canonical representative. The only guarantee we have is that canonical | ||
2157 | * struct/union was determined as canonical and that won't change. But any | ||
2158 | * types referenced through that struct/union fields could have been not yet | ||
2159 | * resolved, so in case like that it's too early to establish any kind of | ||
2160 | * correspondence between structs/unions. | ||
2161 | * | ||
2162 | * No canonical correspondence is derived for primitive types (they are already | ||
2163 | * deduplicated completely already anyway) or reference types (they rely on | ||
2164 | * stability of struct/union canonical relationship for equivalence checks). | ||
2165 | */ | ||
2166 | static void btf_dedup_merge_hypot_map(struct btf_dedup *d) | ||
2167 | { | ||
2168 | __u32 cand_type_id, targ_type_id; | ||
2169 | __u16 t_kind, c_kind; | ||
2170 | __u32 t_id, c_id; | ||
2171 | int i; | ||
2172 | |||
2173 | for (i = 0; i < d->hypot_cnt; i++) { | ||
2174 | cand_type_id = d->hypot_list[i]; | ||
2175 | targ_type_id = d->hypot_map[cand_type_id]; | ||
2176 | t_id = resolve_type_id(d, targ_type_id); | ||
2177 | c_id = resolve_type_id(d, cand_type_id); | ||
2178 | t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info); | ||
2179 | c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info); | ||
2180 | /* | ||
2181 | * Resolve FWD into STRUCT/UNION. | ||
2182 | * It's ok to resolve FWD into STRUCT/UNION that's not yet | ||
2183 | * mapped to canonical representative (as opposed to | ||
2184 | * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because | ||
2185 | * eventually that struct is going to be mapped and all resolved | ||
2186 | * FWDs will automatically resolve to correct canonical | ||
2187 | * representative. This will happen before ref type deduping, | ||
2188 | * which critically depends on stability of these mapping. This | ||
2189 | * stability is not a requirement for STRUCT/UNION equivalence | ||
2190 | * checks, though. | ||
2191 | */ | ||
2192 | if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD) | ||
2193 | d->map[c_id] = t_id; | ||
2194 | else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD) | ||
2195 | d->map[t_id] = c_id; | ||
2196 | |||
2197 | if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) && | ||
2198 | c_kind != BTF_KIND_FWD && | ||
2199 | is_type_mapped(d, c_id) && | ||
2200 | !is_type_mapped(d, t_id)) { | ||
2201 | /* | ||
2202 | * as a perf optimization, we can map struct/union | ||
2203 | * that's part of type graph we just verified for | ||
2204 | * equivalence. We can do that for struct/union that has | ||
2205 | * canonical representative only, though. | ||
2206 | */ | ||
2207 | d->map[t_id] = c_id; | ||
2208 | } | ||
2209 | } | ||
2210 | } | ||
2211 | |||
2212 | /* | ||
2213 | * Deduplicate struct/union types. | ||
2214 | * | ||
2215 | * For each struct/union type its type signature hash is calculated, taking | ||
2216 | * into account type's name, size, number, order and names of fields, but | ||
2217 | * ignoring type ID's referenced from fields, because they might not be deduped | ||
2218 | * completely until after reference types deduplication phase. This type hash | ||
2219 | * is used to iterate over all potential canonical types, sharing same hash. | ||
2220 | * For each canonical candidate we check whether type graphs that they form | ||
2221 | * (through referenced types in fields and so on) are equivalent using algorithm | ||
2222 | * implemented in `btf_dedup_is_equiv`. If such equivalence is found and | ||
2223 | * BTF_KIND_FWD resolution is allowed, then hypothetical mapping | ||
2224 | * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence | ||
2225 | * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to | ||
2226 | * potentially map other structs/unions to their canonical representatives, | ||
2227 | * if such relationship hasn't yet been established. This speeds up algorithm | ||
2228 | * by eliminating some of the duplicate work. | ||
2229 | * | ||
2230 | * If no matching canonical representative was found, struct/union is marked | ||
2231 | * as canonical for itself and is added into btf_dedup->dedup_table hash map | ||
2232 | * for further look ups. | ||
2233 | */ | ||
2234 | static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) | ||
2235 | { | ||
2236 | struct btf_dedup_node *cand_node; | ||
2237 | struct btf_type *t; | ||
2238 | /* if we don't find equivalent type, then we are canonical */ | ||
2239 | __u32 new_id = type_id; | ||
2240 | __u16 kind; | ||
2241 | __u32 h; | ||
2242 | |||
2243 | /* already deduped or is in process of deduping (loop detected) */ | ||
2244 | if (d->map[type_id] <= BTF_MAX_TYPE) | ||
2245 | return 0; | ||
2246 | |||
2247 | t = d->btf->types[type_id]; | ||
2248 | kind = BTF_INFO_KIND(t->info); | ||
2249 | |||
2250 | if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION) | ||
2251 | return 0; | ||
2252 | |||
2253 | h = btf_hash_struct(t); | ||
2254 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
2255 | int eq; | ||
2256 | |||
2257 | btf_dedup_clear_hypot_map(d); | ||
2258 | eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); | ||
2259 | if (eq < 0) | ||
2260 | return eq; | ||
2261 | if (!eq) | ||
2262 | continue; | ||
2263 | new_id = cand_node->type_id; | ||
2264 | btf_dedup_merge_hypot_map(d); | ||
2265 | break; | ||
2266 | } | ||
2267 | |||
2268 | d->map[type_id] = new_id; | ||
2269 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) | ||
2270 | return -ENOMEM; | ||
2271 | |||
2272 | return 0; | ||
2273 | } | ||
2274 | |||
2275 | static int btf_dedup_struct_types(struct btf_dedup *d) | ||
2276 | { | ||
2277 | int i, err; | ||
2278 | |||
2279 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
2280 | err = btf_dedup_struct_type(d, i); | ||
2281 | if (err) | ||
2282 | return err; | ||
2283 | } | ||
2284 | return 0; | ||
2285 | } | ||
2286 | |||
2287 | /* | ||
2288 | * Deduplicate reference type. | ||
2289 | * | ||
2290 | * Once all primitive and struct/union types got deduplicated, we can easily | ||
2291 | * deduplicate all other (reference) BTF types. This is done in two steps: | ||
2292 | * | ||
2293 | * 1. Resolve all referenced type IDs into their canonical type IDs. This | ||
2294 | * resolution can be done either immediately for primitive or struct/union types | ||
2295 | * (because they were deduped in previous two phases) or recursively for | ||
2296 | * reference types. Recursion will always terminate at either primitive or | ||
2297 | * struct/union type, at which point we can "unwind" chain of reference types | ||
2298 | * one by one. There is no danger of encountering cycles because in C type | ||
2299 | * system the only way to form type cycle is through struct/union, so any chain | ||
2300 | * of reference types, even those taking part in a type cycle, will inevitably | ||
2301 | * reach struct/union at some point. | ||
2302 | * | ||
2303 | * 2. Once all referenced type IDs are resolved into canonical ones, BTF type | ||
2304 | * becomes "stable", in the sense that no further deduplication will cause | ||
2305 | * any changes to it. With that, it's now possible to calculate type's signature | ||
2306 | * hash (this time taking into account referenced type IDs) and loop over all | ||
2307 | * potential canonical representatives. If no match was found, current type | ||
2308 | * will become canonical representative of itself and will be added into | ||
2309 | * btf_dedup->dedup_table as another possible canonical representative. | ||
2310 | */ | ||
2311 | static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) | ||
2312 | { | ||
2313 | struct btf_dedup_node *cand_node; | ||
2314 | struct btf_type *t, *cand; | ||
2315 | /* if we don't find equivalent type, then we are representative type */ | ||
2316 | __u32 new_id = type_id; | ||
2317 | __u32 h, ref_type_id; | ||
2318 | |||
2319 | if (d->map[type_id] == BTF_IN_PROGRESS_ID) | ||
2320 | return -ELOOP; | ||
2321 | if (d->map[type_id] <= BTF_MAX_TYPE) | ||
2322 | return resolve_type_id(d, type_id); | ||
2323 | |||
2324 | t = d->btf->types[type_id]; | ||
2325 | d->map[type_id] = BTF_IN_PROGRESS_ID; | ||
2326 | |||
2327 | switch (BTF_INFO_KIND(t->info)) { | ||
2328 | case BTF_KIND_CONST: | ||
2329 | case BTF_KIND_VOLATILE: | ||
2330 | case BTF_KIND_RESTRICT: | ||
2331 | case BTF_KIND_PTR: | ||
2332 | case BTF_KIND_TYPEDEF: | ||
2333 | case BTF_KIND_FUNC: | ||
2334 | ref_type_id = btf_dedup_ref_type(d, t->type); | ||
2335 | if (ref_type_id < 0) | ||
2336 | return ref_type_id; | ||
2337 | t->type = ref_type_id; | ||
2338 | |||
2339 | h = btf_hash_common(t); | ||
2340 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
2341 | cand = d->btf->types[cand_node->type_id]; | ||
2342 | if (btf_equal_common(t, cand)) { | ||
2343 | new_id = cand_node->type_id; | ||
2344 | break; | ||
2345 | } | ||
2346 | } | ||
2347 | break; | ||
2348 | |||
2349 | case BTF_KIND_ARRAY: { | ||
2350 | struct btf_array *info = (struct btf_array *)(t + 1); | ||
2351 | |||
2352 | ref_type_id = btf_dedup_ref_type(d, info->type); | ||
2353 | if (ref_type_id < 0) | ||
2354 | return ref_type_id; | ||
2355 | info->type = ref_type_id; | ||
2356 | |||
2357 | ref_type_id = btf_dedup_ref_type(d, info->index_type); | ||
2358 | if (ref_type_id < 0) | ||
2359 | return ref_type_id; | ||
2360 | info->index_type = ref_type_id; | ||
2361 | |||
2362 | h = btf_hash_array(t); | ||
2363 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
2364 | cand = d->btf->types[cand_node->type_id]; | ||
2365 | if (btf_equal_array(t, cand)) { | ||
2366 | new_id = cand_node->type_id; | ||
2367 | break; | ||
2368 | } | ||
2369 | } | ||
2370 | break; | ||
2371 | } | ||
2372 | |||
2373 | case BTF_KIND_FUNC_PROTO: { | ||
2374 | struct btf_param *param; | ||
2375 | __u16 vlen; | ||
2376 | int i; | ||
2377 | |||
2378 | ref_type_id = btf_dedup_ref_type(d, t->type); | ||
2379 | if (ref_type_id < 0) | ||
2380 | return ref_type_id; | ||
2381 | t->type = ref_type_id; | ||
2382 | |||
2383 | vlen = BTF_INFO_VLEN(t->info); | ||
2384 | param = (struct btf_param *)(t + 1); | ||
2385 | for (i = 0; i < vlen; i++) { | ||
2386 | ref_type_id = btf_dedup_ref_type(d, param->type); | ||
2387 | if (ref_type_id < 0) | ||
2388 | return ref_type_id; | ||
2389 | param->type = ref_type_id; | ||
2390 | param++; | ||
2391 | } | ||
2392 | |||
2393 | h = btf_hash_fnproto(t); | ||
2394 | for_each_hash_node(d->dedup_table, h, cand_node) { | ||
2395 | cand = d->btf->types[cand_node->type_id]; | ||
2396 | if (btf_equal_fnproto(t, cand)) { | ||
2397 | new_id = cand_node->type_id; | ||
2398 | break; | ||
2399 | } | ||
2400 | } | ||
2401 | break; | ||
2402 | } | ||
2403 | |||
2404 | default: | ||
2405 | return -EINVAL; | ||
2406 | } | ||
2407 | |||
2408 | d->map[type_id] = new_id; | ||
2409 | if (type_id == new_id && btf_dedup_table_add(d, h, type_id)) | ||
2410 | return -ENOMEM; | ||
2411 | |||
2412 | return new_id; | ||
2413 | } | ||
2414 | |||
2415 | static int btf_dedup_ref_types(struct btf_dedup *d) | ||
2416 | { | ||
2417 | int i, err; | ||
2418 | |||
2419 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
2420 | err = btf_dedup_ref_type(d, i); | ||
2421 | if (err < 0) | ||
2422 | return err; | ||
2423 | } | ||
2424 | btf_dedup_table_free(d); | ||
2425 | return 0; | ||
2426 | } | ||
2427 | |||
2428 | /* | ||
2429 | * Compact types. | ||
2430 | * | ||
2431 | * After we established for each type its corresponding canonical representative | ||
2432 | * type, we now can eliminate types that are not canonical and leave only | ||
2433 | * canonical ones layed out sequentially in memory by copying them over | ||
2434 | * duplicates. During compaction btf_dedup->hypot_map array is reused to store | ||
2435 | * a map from original type ID to a new compacted type ID, which will be used | ||
2436 | * during next phase to "fix up" type IDs, referenced from struct/union and | ||
2437 | * reference types. | ||
2438 | */ | ||
2439 | static int btf_dedup_compact_types(struct btf_dedup *d) | ||
2440 | { | ||
2441 | struct btf_type **new_types; | ||
2442 | __u32 next_type_id = 1; | ||
2443 | char *types_start, *p; | ||
2444 | int i, len; | ||
2445 | |||
2446 | /* we are going to reuse hypot_map to store compaction remapping */ | ||
2447 | d->hypot_map[0] = 0; | ||
2448 | for (i = 1; i <= d->btf->nr_types; i++) | ||
2449 | d->hypot_map[i] = BTF_UNPROCESSED_ID; | ||
2450 | |||
2451 | types_start = d->btf->nohdr_data + d->btf->hdr->type_off; | ||
2452 | p = types_start; | ||
2453 | |||
2454 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
2455 | if (d->map[i] != i) | ||
2456 | continue; | ||
2457 | |||
2458 | len = btf_type_size(d->btf->types[i]); | ||
2459 | if (len < 0) | ||
2460 | return len; | ||
2461 | |||
2462 | memmove(p, d->btf->types[i], len); | ||
2463 | d->hypot_map[i] = next_type_id; | ||
2464 | d->btf->types[next_type_id] = (struct btf_type *)p; | ||
2465 | p += len; | ||
2466 | next_type_id++; | ||
2467 | } | ||
2468 | |||
2469 | /* shrink struct btf's internal types index and update btf_header */ | ||
2470 | d->btf->nr_types = next_type_id - 1; | ||
2471 | d->btf->types_size = d->btf->nr_types; | ||
2472 | d->btf->hdr->type_len = p - types_start; | ||
2473 | new_types = realloc(d->btf->types, | ||
2474 | (1 + d->btf->nr_types) * sizeof(struct btf_type *)); | ||
2475 | if (!new_types) | ||
2476 | return -ENOMEM; | ||
2477 | d->btf->types = new_types; | ||
2478 | |||
2479 | /* make sure string section follows type information without gaps */ | ||
2480 | d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data; | ||
2481 | memmove(p, d->btf->strings, d->btf->hdr->str_len); | ||
2482 | d->btf->strings = p; | ||
2483 | p += d->btf->hdr->str_len; | ||
2484 | |||
2485 | d->btf->data_size = p - (char *)d->btf->data; | ||
2486 | return 0; | ||
2487 | } | ||
2488 | |||
2489 | /* | ||
2490 | * Figure out final (deduplicated and compacted) type ID for provided original | ||
2491 | * `type_id` by first resolving it into corresponding canonical type ID and | ||
2492 | * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map, | ||
2493 | * which is populated during compaction phase. | ||
2494 | */ | ||
2495 | static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id) | ||
2496 | { | ||
2497 | __u32 resolved_type_id, new_type_id; | ||
2498 | |||
2499 | resolved_type_id = resolve_type_id(d, type_id); | ||
2500 | new_type_id = d->hypot_map[resolved_type_id]; | ||
2501 | if (new_type_id > BTF_MAX_TYPE) | ||
2502 | return -EINVAL; | ||
2503 | return new_type_id; | ||
2504 | } | ||
2505 | |||
2506 | /* | ||
2507 | * Remap referenced type IDs into deduped type IDs. | ||
2508 | * | ||
2509 | * After BTF types are deduplicated and compacted, their final type IDs may | ||
2510 | * differ from original ones. The map from original to a corresponding | ||
2511 | * deduped type ID is stored in btf_dedup->hypot_map and is populated during | ||
2512 | * compaction phase. During remapping phase we are rewriting all type IDs | ||
2513 | * referenced from any BTF type (e.g., struct fields, func proto args, etc) to | ||
2514 | * their final deduped type IDs. | ||
2515 | */ | ||
2516 | static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id) | ||
2517 | { | ||
2518 | struct btf_type *t = d->btf->types[type_id]; | ||
2519 | int i, r; | ||
2520 | |||
2521 | switch (BTF_INFO_KIND(t->info)) { | ||
2522 | case BTF_KIND_INT: | ||
2523 | case BTF_KIND_ENUM: | ||
2524 | break; | ||
2525 | |||
2526 | case BTF_KIND_FWD: | ||
2527 | case BTF_KIND_CONST: | ||
2528 | case BTF_KIND_VOLATILE: | ||
2529 | case BTF_KIND_RESTRICT: | ||
2530 | case BTF_KIND_PTR: | ||
2531 | case BTF_KIND_TYPEDEF: | ||
2532 | case BTF_KIND_FUNC: | ||
2533 | r = btf_dedup_remap_type_id(d, t->type); | ||
2534 | if (r < 0) | ||
2535 | return r; | ||
2536 | t->type = r; | ||
2537 | break; | ||
2538 | |||
2539 | case BTF_KIND_ARRAY: { | ||
2540 | struct btf_array *arr_info = (struct btf_array *)(t + 1); | ||
2541 | |||
2542 | r = btf_dedup_remap_type_id(d, arr_info->type); | ||
2543 | if (r < 0) | ||
2544 | return r; | ||
2545 | arr_info->type = r; | ||
2546 | r = btf_dedup_remap_type_id(d, arr_info->index_type); | ||
2547 | if (r < 0) | ||
2548 | return r; | ||
2549 | arr_info->index_type = r; | ||
2550 | break; | ||
2551 | } | ||
2552 | |||
2553 | case BTF_KIND_STRUCT: | ||
2554 | case BTF_KIND_UNION: { | ||
2555 | struct btf_member *member = (struct btf_member *)(t + 1); | ||
2556 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
2557 | |||
2558 | for (i = 0; i < vlen; i++) { | ||
2559 | r = btf_dedup_remap_type_id(d, member->type); | ||
2560 | if (r < 0) | ||
2561 | return r; | ||
2562 | member->type = r; | ||
2563 | member++; | ||
2564 | } | ||
2565 | break; | ||
2566 | } | ||
2567 | |||
2568 | case BTF_KIND_FUNC_PROTO: { | ||
2569 | struct btf_param *param = (struct btf_param *)(t + 1); | ||
2570 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
2571 | |||
2572 | r = btf_dedup_remap_type_id(d, t->type); | ||
2573 | if (r < 0) | ||
2574 | return r; | ||
2575 | t->type = r; | ||
2576 | |||
2577 | for (i = 0; i < vlen; i++) { | ||
2578 | r = btf_dedup_remap_type_id(d, param->type); | ||
2579 | if (r < 0) | ||
2580 | return r; | ||
2581 | param->type = r; | ||
2582 | param++; | ||
2583 | } | ||
2584 | break; | ||
2585 | } | ||
2586 | |||
2587 | default: | ||
2588 | return -EINVAL; | ||
2589 | } | ||
2590 | |||
2591 | return 0; | ||
2592 | } | ||
2593 | |||
2594 | static int btf_dedup_remap_types(struct btf_dedup *d) | ||
2595 | { | ||
2596 | int i, r; | ||
2597 | |||
2598 | for (i = 1; i <= d->btf->nr_types; i++) { | ||
2599 | r = btf_dedup_remap_type(d, i); | ||
2600 | if (r < 0) | ||
2601 | return r; | ||
2602 | } | ||
2603 | return 0; | ||
2604 | } | ||
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index b0610dcdae6b..b393da90cc85 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h | |||
@@ -55,33 +55,44 @@ struct btf_ext_header { | |||
55 | __u32 line_info_len; | 55 | __u32 line_info_len; |
56 | }; | 56 | }; |
57 | 57 | ||
58 | typedef int (*btf_print_fn_t)(const char *, ...) | ||
59 | __attribute__((format(printf, 1, 2))); | ||
60 | |||
61 | LIBBPF_API void btf__free(struct btf *btf); | 58 | LIBBPF_API void btf__free(struct btf *btf); |
62 | LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log); | 59 | LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); |
63 | LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, | 60 | LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, |
64 | const char *type_name); | 61 | const char *type_name); |
62 | LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf); | ||
65 | LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, | 63 | LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, |
66 | __u32 id); | 64 | __u32 id); |
67 | LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); | 65 | LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); |
68 | LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); | 66 | LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); |
69 | LIBBPF_API int btf__fd(const struct btf *btf); | 67 | LIBBPF_API int btf__fd(const struct btf *btf); |
68 | LIBBPF_API void btf__get_strings(const struct btf *btf, const char **strings, | ||
69 | __u32 *str_len); | ||
70 | LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); | 70 | LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); |
71 | LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); | 71 | LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); |
72 | LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name, | ||
73 | __u32 expected_key_size, | ||
74 | __u32 expected_value_size, | ||
75 | __u32 *key_type_id, __u32 *value_type_id); | ||
76 | |||
77 | LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size); | ||
78 | LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext); | ||
79 | LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf, | ||
80 | const struct btf_ext *btf_ext, | ||
81 | const char *sec_name, __u32 insns_cnt, | ||
82 | void **func_info, __u32 *cnt); | ||
83 | LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf, | ||
84 | const struct btf_ext *btf_ext, | ||
85 | const char *sec_name, __u32 insns_cnt, | ||
86 | void **line_info, __u32 *cnt); | ||
87 | LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); | ||
88 | LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); | ||
89 | |||
90 | struct btf_dedup_opts { | ||
91 | bool dont_resolve_fwds; | ||
92 | }; | ||
72 | 93 | ||
73 | struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log); | 94 | LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, |
74 | void btf_ext__free(struct btf_ext *btf_ext); | 95 | const struct btf_dedup_opts *opts); |
75 | int btf_ext__reloc_func_info(const struct btf *btf, | ||
76 | const struct btf_ext *btf_ext, | ||
77 | const char *sec_name, __u32 insns_cnt, | ||
78 | void **func_info, __u32 *func_info_len); | ||
79 | int btf_ext__reloc_line_info(const struct btf *btf, | ||
80 | const struct btf_ext *btf_ext, | ||
81 | const char *sec_name, __u32 insns_cnt, | ||
82 | void **line_info, __u32 *cnt); | ||
83 | __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext); | ||
84 | __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext); | ||
85 | 96 | ||
86 | #ifdef __cplusplus | 97 | #ifdef __cplusplus |
87 | } /* extern "C" */ | 98 | } /* extern "C" */ |
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 03bc01ca2577..47969aa0faf8 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "bpf.h" | 42 | #include "bpf.h" |
43 | #include "btf.h" | 43 | #include "btf.h" |
44 | #include "str_error.h" | 44 | #include "str_error.h" |
45 | #include "libbpf_util.h" | ||
45 | 46 | ||
46 | #ifndef EM_BPF | 47 | #ifndef EM_BPF |
47 | #define EM_BPF 247 | 48 | #define EM_BPF 247 |
@@ -53,39 +54,33 @@ | |||
53 | 54 | ||
54 | #define __printf(a, b) __attribute__((format(printf, a, b))) | 55 | #define __printf(a, b) __attribute__((format(printf, a, b))) |
55 | 56 | ||
56 | __printf(1, 2) | 57 | static int __base_pr(enum libbpf_print_level level, const char *format, |
57 | static int __base_pr(const char *format, ...) | 58 | va_list args) |
58 | { | 59 | { |
59 | va_list args; | 60 | if (level == LIBBPF_DEBUG) |
60 | int err; | 61 | return 0; |
61 | 62 | ||
62 | va_start(args, format); | 63 | return vfprintf(stderr, format, args); |
63 | err = vfprintf(stderr, format, args); | ||
64 | va_end(args); | ||
65 | return err; | ||
66 | } | 64 | } |
67 | 65 | ||
68 | static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr; | 66 | static libbpf_print_fn_t __libbpf_pr = __base_pr; |
69 | static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr; | ||
70 | static __printf(1, 2) libbpf_print_fn_t __pr_debug; | ||
71 | |||
72 | #define __pr(func, fmt, ...) \ | ||
73 | do { \ | ||
74 | if ((func)) \ | ||
75 | (func)("libbpf: " fmt, ##__VA_ARGS__); \ | ||
76 | } while (0) | ||
77 | 67 | ||
78 | #define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__) | 68 | void libbpf_set_print(libbpf_print_fn_t fn) |
79 | #define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__) | 69 | { |
80 | #define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__) | 70 | __libbpf_pr = fn; |
71 | } | ||
81 | 72 | ||
82 | void libbpf_set_print(libbpf_print_fn_t warn, | 73 | __printf(2, 3) |
83 | libbpf_print_fn_t info, | 74 | void libbpf_print(enum libbpf_print_level level, const char *format, ...) |
84 | libbpf_print_fn_t debug) | ||
85 | { | 75 | { |
86 | __pr_warning = warn; | 76 | va_list args; |
87 | __pr_info = info; | 77 | |
88 | __pr_debug = debug; | 78 | if (!__libbpf_pr) |
79 | return; | ||
80 | |||
81 | va_start(args, format); | ||
82 | __libbpf_pr(level, format, args); | ||
83 | va_end(args); | ||
89 | } | 84 | } |
90 | 85 | ||
91 | #define STRERR_BUFSIZE 128 | 86 | #define STRERR_BUFSIZE 128 |
@@ -839,8 +834,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) | |||
839 | else if (strcmp(name, "maps") == 0) | 834 | else if (strcmp(name, "maps") == 0) |
840 | obj->efile.maps_shndx = idx; | 835 | obj->efile.maps_shndx = idx; |
841 | else if (strcmp(name, BTF_ELF_SEC) == 0) { | 836 | else if (strcmp(name, BTF_ELF_SEC) == 0) { |
842 | obj->btf = btf__new(data->d_buf, data->d_size, | 837 | obj->btf = btf__new(data->d_buf, data->d_size); |
843 | __pr_debug); | ||
844 | if (IS_ERR(obj->btf)) { | 838 | if (IS_ERR(obj->btf)) { |
845 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", | 839 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", |
846 | BTF_ELF_SEC, PTR_ERR(obj->btf)); | 840 | BTF_ELF_SEC, PTR_ERR(obj->btf)); |
@@ -915,8 +909,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags) | |||
915 | BTF_EXT_ELF_SEC, BTF_ELF_SEC); | 909 | BTF_EXT_ELF_SEC, BTF_ELF_SEC); |
916 | } else { | 910 | } else { |
917 | obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, | 911 | obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, |
918 | btf_ext_data->d_size, | 912 | btf_ext_data->d_size); |
919 | __pr_debug); | ||
920 | if (IS_ERR(obj->btf_ext)) { | 913 | if (IS_ERR(obj->btf_ext)) { |
921 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", | 914 | pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", |
922 | BTF_EXT_ELF_SEC, | 915 | BTF_EXT_ELF_SEC, |
@@ -1057,72 +1050,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, | |||
1057 | 1050 | ||
1058 | static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) | 1051 | static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) |
1059 | { | 1052 | { |
1060 | const struct btf_type *container_type; | ||
1061 | const struct btf_member *key, *value; | ||
1062 | struct bpf_map_def *def = &map->def; | 1053 | struct bpf_map_def *def = &map->def; |
1063 | const size_t max_name = 256; | 1054 | __u32 key_type_id, value_type_id; |
1064 | char container_name[max_name]; | 1055 | int ret; |
1065 | __s64 key_size, value_size; | ||
1066 | __s32 container_id; | ||
1067 | |||
1068 | if (snprintf(container_name, max_name, "____btf_map_%s", map->name) == | ||
1069 | max_name) { | ||
1070 | pr_warning("map:%s length of '____btf_map_%s' is too long\n", | ||
1071 | map->name, map->name); | ||
1072 | return -EINVAL; | ||
1073 | } | ||
1074 | |||
1075 | container_id = btf__find_by_name(btf, container_name); | ||
1076 | if (container_id < 0) { | ||
1077 | pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n", | ||
1078 | map->name, container_name); | ||
1079 | return container_id; | ||
1080 | } | ||
1081 | |||
1082 | container_type = btf__type_by_id(btf, container_id); | ||
1083 | if (!container_type) { | ||
1084 | pr_warning("map:%s cannot find BTF type for container_id:%u\n", | ||
1085 | map->name, container_id); | ||
1086 | return -EINVAL; | ||
1087 | } | ||
1088 | |||
1089 | if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT || | ||
1090 | BTF_INFO_VLEN(container_type->info) < 2) { | ||
1091 | pr_warning("map:%s container_name:%s is an invalid container struct\n", | ||
1092 | map->name, container_name); | ||
1093 | return -EINVAL; | ||
1094 | } | ||
1095 | |||
1096 | key = (struct btf_member *)(container_type + 1); | ||
1097 | value = key + 1; | ||
1098 | |||
1099 | key_size = btf__resolve_size(btf, key->type); | ||
1100 | if (key_size < 0) { | ||
1101 | pr_warning("map:%s invalid BTF key_type_size\n", | ||
1102 | map->name); | ||
1103 | return key_size; | ||
1104 | } | ||
1105 | |||
1106 | if (def->key_size != key_size) { | ||
1107 | pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n", | ||
1108 | map->name, (__u32)key_size, def->key_size); | ||
1109 | return -EINVAL; | ||
1110 | } | ||
1111 | |||
1112 | value_size = btf__resolve_size(btf, value->type); | ||
1113 | if (value_size < 0) { | ||
1114 | pr_warning("map:%s invalid BTF value_type_size\n", map->name); | ||
1115 | return value_size; | ||
1116 | } | ||
1117 | 1056 | ||
1118 | if (def->value_size != value_size) { | 1057 | ret = btf__get_map_kv_tids(btf, map->name, def->key_size, |
1119 | pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", | 1058 | def->value_size, &key_type_id, |
1120 | map->name, (__u32)value_size, def->value_size); | 1059 | &value_type_id); |
1121 | return -EINVAL; | 1060 | if (ret) |
1122 | } | 1061 | return ret; |
1123 | 1062 | ||
1124 | map->btf_key_type_id = key->type; | 1063 | map->btf_key_type_id = key_type_id; |
1125 | map->btf_value_type_id = value->type; | 1064 | map->btf_value_type_id = value_type_id; |
1126 | 1065 | ||
1127 | return 0; | 1066 | return 0; |
1128 | } | 1067 | } |
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index 43c77e98df6f..69a7c25eaccc 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h | |||
@@ -47,17 +47,16 @@ enum libbpf_errno { | |||
47 | 47 | ||
48 | LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); | 48 | LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); |
49 | 49 | ||
50 | /* | 50 | enum libbpf_print_level { |
51 | * __printf is defined in include/linux/compiler-gcc.h. However, | 51 | LIBBPF_WARN, |
52 | * it would be better if libbpf.h didn't depend on Linux header files. | 52 | LIBBPF_INFO, |
53 | * So instead of __printf, here we use gcc attribute directly. | 53 | LIBBPF_DEBUG, |
54 | */ | 54 | }; |
55 | typedef int (*libbpf_print_fn_t)(const char *, ...) | 55 | |
56 | __attribute__((format(printf, 1, 2))); | 56 | typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, |
57 | const char *, va_list ap); | ||
57 | 58 | ||
58 | LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn, | 59 | LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn); |
59 | libbpf_print_fn_t info, | ||
60 | libbpf_print_fn_t debug); | ||
61 | 60 | ||
62 | /* Hide internal to user */ | 61 | /* Hide internal to user */ |
63 | struct bpf_object; | 62 | struct bpf_object; |
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index 62c680fb13d1..89c1149e32ee 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map | |||
@@ -133,4 +133,14 @@ LIBBPF_0.0.2 { | |||
133 | bpf_map_lookup_elem_flags; | 133 | bpf_map_lookup_elem_flags; |
134 | bpf_object__find_map_fd_by_name; | 134 | bpf_object__find_map_fd_by_name; |
135 | bpf_get_link_xdp_id; | 135 | bpf_get_link_xdp_id; |
136 | btf__dedup; | ||
137 | btf__get_map_kv_tids; | ||
138 | btf__get_nr_types; | ||
139 | btf__get_strings; | ||
140 | btf_ext__free; | ||
141 | btf_ext__func_info_rec_size; | ||
142 | btf_ext__line_info_rec_size; | ||
143 | btf_ext__new; | ||
144 | btf_ext__reloc_func_info; | ||
145 | btf_ext__reloc_line_info; | ||
136 | } LIBBPF_0.0.1; | 146 | } LIBBPF_0.0.1; |
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h new file mode 100644 index 000000000000..81ecda0cb9c9 --- /dev/null +++ b/tools/lib/bpf/libbpf_util.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */ | ||
2 | /* Copyright (c) 2019 Facebook */ | ||
3 | |||
4 | #ifndef __LIBBPF_LIBBPF_UTIL_H | ||
5 | #define __LIBBPF_LIBBPF_UTIL_H | ||
6 | |||
7 | #include <stdbool.h> | ||
8 | |||
9 | #ifdef __cplusplus | ||
10 | extern "C" { | ||
11 | #endif | ||
12 | |||
13 | extern void libbpf_print(enum libbpf_print_level level, | ||
14 | const char *format, ...) | ||
15 | __attribute__((format(printf, 2, 3))); | ||
16 | |||
17 | #define __pr(level, fmt, ...) \ | ||
18 | do { \ | ||
19 | libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \ | ||
20 | } while (0) | ||
21 | |||
22 | #define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__) | ||
23 | #define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__) | ||
24 | #define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__) | ||
25 | |||
26 | #ifdef __cplusplus | ||
27 | } /* extern "C" */ | ||
28 | #endif | ||
29 | |||
30 | #endif | ||
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp index abf3fc25c9fa..fc134873bb6d 100644 --- a/tools/lib/bpf/test_libbpf.cpp +++ b/tools/lib/bpf/test_libbpf.cpp | |||
@@ -8,11 +8,11 @@ | |||
8 | int main(int argc, char *argv[]) | 8 | int main(int argc, char *argv[]) |
9 | { | 9 | { |
10 | /* libbpf.h */ | 10 | /* libbpf.h */ |
11 | libbpf_set_print(NULL, NULL, NULL); | 11 | libbpf_set_print(NULL); |
12 | 12 | ||
13 | /* bpf.h */ | 13 | /* bpf.h */ |
14 | bpf_prog_get_fd_by_id(0); | 14 | bpf_prog_get_fd_by_id(0); |
15 | 15 | ||
16 | /* btf.h */ | 16 | /* btf.h */ |
17 | btf__new(NULL, 0, NULL); | 17 | btf__new(NULL, 0); |
18 | } | 18 | } |
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 2f3eb6d293ee..037d8ff6a634 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c | |||
@@ -24,22 +24,12 @@ | |||
24 | #include "llvm-utils.h" | 24 | #include "llvm-utils.h" |
25 | #include "c++/clang-c.h" | 25 | #include "c++/clang-c.h" |
26 | 26 | ||
27 | #define DEFINE_PRINT_FN(name, level) \ | 27 | static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), |
28 | static int libbpf_##name(const char *fmt, ...) \ | 28 | const char *fmt, va_list args) |
29 | { \ | 29 | { |
30 | va_list args; \ | 30 | return veprintf(1, verbose, pr_fmt(fmt), args); |
31 | int ret; \ | ||
32 | \ | ||
33 | va_start(args, fmt); \ | ||
34 | ret = veprintf(level, verbose, pr_fmt(fmt), args);\ | ||
35 | va_end(args); \ | ||
36 | return ret; \ | ||
37 | } | 31 | } |
38 | 32 | ||
39 | DEFINE_PRINT_FN(warning, 1) | ||
40 | DEFINE_PRINT_FN(info, 1) | ||
41 | DEFINE_PRINT_FN(debug, 1) | ||
42 | |||
43 | struct bpf_prog_priv { | 33 | struct bpf_prog_priv { |
44 | bool is_tp; | 34 | bool is_tp; |
45 | char *sys_name; | 35 | char *sys_name; |
@@ -59,9 +49,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) | |||
59 | struct bpf_object *obj; | 49 | struct bpf_object *obj; |
60 | 50 | ||
61 | if (!libbpf_initialized) { | 51 | if (!libbpf_initialized) { |
62 | libbpf_set_print(libbpf_warning, | 52 | libbpf_set_print(libbpf_perf_print); |
63 | libbpf_info, | ||
64 | libbpf_debug); | ||
65 | libbpf_initialized = true; | 53 | libbpf_initialized = true; |
66 | } | 54 | } |
67 | 55 | ||
@@ -79,9 +67,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source) | |||
79 | struct bpf_object *obj; | 67 | struct bpf_object *obj; |
80 | 68 | ||
81 | if (!libbpf_initialized) { | 69 | if (!libbpf_initialized) { |
82 | libbpf_set_print(libbpf_warning, | 70 | libbpf_set_print(libbpf_perf_print); |
83 | libbpf_info, | ||
84 | libbpf_debug); | ||
85 | libbpf_initialized = true; | 71 | libbpf_initialized = true; |
86 | } | 72 | } |
87 | 73 | ||
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py index 7f8200a8702b..a53ed58528d6 100755 --- a/tools/testing/selftests/bpf/tcp_client.py +++ b/tools/testing/selftests/bpf/tcp_client.py | |||
@@ -30,12 +30,11 @@ def send(sock, s): | |||
30 | 30 | ||
31 | 31 | ||
32 | serverPort = int(sys.argv[1]) | 32 | serverPort = int(sys.argv[1]) |
33 | HostName = socket.gethostname() | ||
34 | 33 | ||
35 | # create active socket | 34 | # create active socket |
36 | sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) | 35 | sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) |
37 | try: | 36 | try: |
38 | sock.connect((HostName, serverPort)) | 37 | sock.connect(('localhost', serverPort)) |
39 | except socket.error as e: | 38 | except socket.error as e: |
40 | sys.exit(1) | 39 | sys.exit(1) |
41 | 40 | ||
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py index b39903fca4c8..0ca60d193bed 100755 --- a/tools/testing/selftests/bpf/tcp_server.py +++ b/tools/testing/selftests/bpf/tcp_server.py | |||
@@ -35,13 +35,10 @@ MAX_PORTS = 2 | |||
35 | serverPort = SERVER_PORT | 35 | serverPort = SERVER_PORT |
36 | serverSocket = None | 36 | serverSocket = None |
37 | 37 | ||
38 | HostName = socket.gethostname() | ||
39 | |||
40 | # create passive socket | 38 | # create passive socket |
41 | serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) | 39 | serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) |
42 | host = socket.gethostname() | ||
43 | 40 | ||
44 | try: serverSocket.bind((host, 0)) | 41 | try: serverSocket.bind(('localhost', 0)) |
45 | except socket.error as msg: | 42 | except socket.error as msg: |
46 | print('bind fails: ' + str(msg)) | 43 | print('bind fails: ' + str(msg)) |
47 | 44 | ||
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c index 179f1d8ec5bf..447acc34db94 100644 --- a/tools/testing/selftests/bpf/test_btf.c +++ b/tools/testing/selftests/bpf/test_btf.c | |||
@@ -52,18 +52,10 @@ static int count_result(int err) | |||
52 | return err; | 52 | return err; |
53 | } | 53 | } |
54 | 54 | ||
55 | #define __printf(a, b) __attribute__((format(printf, a, b))) | 55 | static int __base_pr(enum libbpf_print_level level __attribute__((unused)), |
56 | 56 | const char *format, va_list args) | |
57 | __printf(1, 2) | ||
58 | static int __base_pr(const char *format, ...) | ||
59 | { | 57 | { |
60 | va_list args; | 58 | return vfprintf(stderr, format, args); |
61 | int err; | ||
62 | |||
63 | va_start(args, format); | ||
64 | err = vfprintf(stderr, format, args); | ||
65 | va_end(args); | ||
66 | return err; | ||
67 | } | 59 | } |
68 | 60 | ||
69 | #define BTF_INFO_ENC(kind, kind_flag, vlen) \ | 61 | #define BTF_INFO_ENC(kind, kind_flag, vlen) \ |
@@ -78,12 +70,21 @@ static int __base_pr(const char *format, ...) | |||
78 | BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ | 70 | BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ |
79 | BTF_INT_ENC(encoding, bits_offset, bits) | 71 | BTF_INT_ENC(encoding, bits_offset, bits) |
80 | 72 | ||
73 | #define BTF_FWD_ENC(name, kind_flag) \ | ||
74 | BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0) | ||
75 | |||
81 | #define BTF_ARRAY_ENC(type, index_type, nr_elems) \ | 76 | #define BTF_ARRAY_ENC(type, index_type, nr_elems) \ |
82 | (type), (index_type), (nr_elems) | 77 | (type), (index_type), (nr_elems) |
83 | #define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \ | 78 | #define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \ |
84 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \ | 79 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \ |
85 | BTF_ARRAY_ENC(type, index_type, nr_elems) | 80 | BTF_ARRAY_ENC(type, index_type, nr_elems) |
86 | 81 | ||
82 | #define BTF_STRUCT_ENC(name, nr_elems, sz) \ | ||
83 | BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz) | ||
84 | |||
85 | #define BTF_UNION_ENC(name, nr_elems, sz) \ | ||
86 | BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz) | ||
87 | |||
87 | #define BTF_MEMBER_ENC(name, type, bits_offset) \ | 88 | #define BTF_MEMBER_ENC(name, type, bits_offset) \ |
88 | (name), (type), (bits_offset) | 89 | (name), (type), (bits_offset) |
89 | #define BTF_ENUM_ENC(name, val) (name), (val) | 90 | #define BTF_ENUM_ENC(name, val) (name), (val) |
@@ -99,6 +100,12 @@ static int __base_pr(const char *format, ...) | |||
99 | #define BTF_CONST_ENC(type) \ | 100 | #define BTF_CONST_ENC(type) \ |
100 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type) | 101 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type) |
101 | 102 | ||
103 | #define BTF_VOLATILE_ENC(type) \ | ||
104 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type) | ||
105 | |||
106 | #define BTF_RESTRICT_ENC(type) \ | ||
107 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type) | ||
108 | |||
102 | #define BTF_FUNC_PROTO_ENC(ret_type, nargs) \ | 109 | #define BTF_FUNC_PROTO_ENC(ret_type, nargs) \ |
103 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type) | 110 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type) |
104 | 111 | ||
@@ -111,6 +118,10 @@ static int __base_pr(const char *format, ...) | |||
111 | #define BTF_END_RAW 0xdeadbeef | 118 | #define BTF_END_RAW 0xdeadbeef |
112 | #define NAME_TBD 0xdeadb33f | 119 | #define NAME_TBD 0xdeadb33f |
113 | 120 | ||
121 | #define NAME_NTH(N) (0xffff0000 | N) | ||
122 | #define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000) | ||
123 | #define GET_NAME_NTH_IDX(X) (X & 0x0000ffff) | ||
124 | |||
114 | #define MAX_NR_RAW_U32 1024 | 125 | #define MAX_NR_RAW_U32 1024 |
115 | #define BTF_LOG_BUF_SIZE 65535 | 126 | #define BTF_LOG_BUF_SIZE 65535 |
116 | 127 | ||
@@ -119,12 +130,14 @@ static struct args { | |||
119 | unsigned int file_test_num; | 130 | unsigned int file_test_num; |
120 | unsigned int get_info_test_num; | 131 | unsigned int get_info_test_num; |
121 | unsigned int info_raw_test_num; | 132 | unsigned int info_raw_test_num; |
133 | unsigned int dedup_test_num; | ||
122 | bool raw_test; | 134 | bool raw_test; |
123 | bool file_test; | 135 | bool file_test; |
124 | bool get_info_test; | 136 | bool get_info_test; |
125 | bool pprint_test; | 137 | bool pprint_test; |
126 | bool always_log; | 138 | bool always_log; |
127 | bool info_raw_test; | 139 | bool info_raw_test; |
140 | bool dedup_test; | ||
128 | } args; | 141 | } args; |
129 | 142 | ||
130 | static char btf_log_buf[BTF_LOG_BUF_SIZE]; | 143 | static char btf_log_buf[BTF_LOG_BUF_SIZE]; |
@@ -1965,7 +1978,7 @@ static struct btf_raw_test raw_tests[] = { | |||
1965 | /* void (*)(int a, unsigned int <bad_name_off>) */ | 1978 | /* void (*)(int a, unsigned int <bad_name_off>) */ |
1966 | BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ | 1979 | BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ |
1967 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), | 1980 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), |
1968 | BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2), | 1981 | BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2), |
1969 | BTF_END_RAW, | 1982 | BTF_END_RAW, |
1970 | }, | 1983 | }, |
1971 | .str_sec = "\0a", | 1984 | .str_sec = "\0a", |
@@ -2835,11 +2848,13 @@ static void *btf_raw_create(const struct btf_header *hdr, | |||
2835 | const char **ret_next_str) | 2848 | const char **ret_next_str) |
2836 | { | 2849 | { |
2837 | const char *next_str = str, *end_str = str + str_sec_size; | 2850 | const char *next_str = str, *end_str = str + str_sec_size; |
2851 | const char **strs_idx = NULL, **tmp_strs_idx; | ||
2852 | int strs_cap = 0, strs_cnt = 0, next_str_idx = 0; | ||
2838 | unsigned int size_needed, offset; | 2853 | unsigned int size_needed, offset; |
2839 | struct btf_header *ret_hdr; | 2854 | struct btf_header *ret_hdr; |
2840 | int i, type_sec_size; | 2855 | int i, type_sec_size, err = 0; |
2841 | uint32_t *ret_types; | 2856 | uint32_t *ret_types; |
2842 | void *raw_btf; | 2857 | void *raw_btf = NULL; |
2843 | 2858 | ||
2844 | type_sec_size = get_raw_sec_size(raw_types); | 2859 | type_sec_size = get_raw_sec_size(raw_types); |
2845 | if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types")) | 2860 | if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types")) |
@@ -2854,17 +2869,44 @@ static void *btf_raw_create(const struct btf_header *hdr, | |||
2854 | memcpy(raw_btf, hdr, sizeof(*hdr)); | 2869 | memcpy(raw_btf, hdr, sizeof(*hdr)); |
2855 | offset = sizeof(*hdr); | 2870 | offset = sizeof(*hdr); |
2856 | 2871 | ||
2872 | /* Index strings */ | ||
2873 | while ((next_str = get_next_str(next_str, end_str))) { | ||
2874 | if (strs_cnt == strs_cap) { | ||
2875 | strs_cap += max(16, strs_cap / 2); | ||
2876 | tmp_strs_idx = realloc(strs_idx, | ||
2877 | sizeof(*strs_idx) * strs_cap); | ||
2878 | if (CHECK(!tmp_strs_idx, | ||
2879 | "Cannot allocate memory for strs_idx")) { | ||
2880 | err = -1; | ||
2881 | goto done; | ||
2882 | } | ||
2883 | strs_idx = tmp_strs_idx; | ||
2884 | } | ||
2885 | strs_idx[strs_cnt++] = next_str; | ||
2886 | next_str += strlen(next_str); | ||
2887 | } | ||
2888 | |||
2857 | /* Copy type section */ | 2889 | /* Copy type section */ |
2858 | ret_types = raw_btf + offset; | 2890 | ret_types = raw_btf + offset; |
2859 | for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) { | 2891 | for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) { |
2860 | if (raw_types[i] == NAME_TBD) { | 2892 | if (raw_types[i] == NAME_TBD) { |
2861 | next_str = get_next_str(next_str, end_str); | 2893 | if (CHECK(next_str_idx == strs_cnt, |
2862 | if (CHECK(!next_str, "Error in getting next_str")) { | 2894 | "Error in getting next_str #%d", |
2863 | free(raw_btf); | 2895 | next_str_idx)) { |
2864 | return NULL; | 2896 | err = -1; |
2897 | goto done; | ||
2865 | } | 2898 | } |
2866 | ret_types[i] = next_str - str; | 2899 | ret_types[i] = strs_idx[next_str_idx++] - str; |
2867 | next_str += strlen(next_str); | 2900 | } else if (IS_NAME_NTH(raw_types[i])) { |
2901 | int idx = GET_NAME_NTH_IDX(raw_types[i]); | ||
2902 | |||
2903 | if (CHECK(idx <= 0 || idx > strs_cnt, | ||
2904 | "Error getting string #%d, strs_cnt:%d", | ||
2905 | idx, strs_cnt)) { | ||
2906 | err = -1; | ||
2907 | goto done; | ||
2908 | } | ||
2909 | ret_types[i] = strs_idx[idx-1] - str; | ||
2868 | } else { | 2910 | } else { |
2869 | ret_types[i] = raw_types[i]; | 2911 | ret_types[i] = raw_types[i]; |
2870 | } | 2912 | } |
@@ -2881,8 +2923,17 @@ static void *btf_raw_create(const struct btf_header *hdr, | |||
2881 | 2923 | ||
2882 | *btf_size = size_needed; | 2924 | *btf_size = size_needed; |
2883 | if (ret_next_str) | 2925 | if (ret_next_str) |
2884 | *ret_next_str = next_str; | 2926 | *ret_next_str = |
2927 | next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL; | ||
2885 | 2928 | ||
2929 | done: | ||
2930 | if (err) { | ||
2931 | if (raw_btf) | ||
2932 | free(raw_btf); | ||
2933 | if (strs_idx) | ||
2934 | free(strs_idx); | ||
2935 | return NULL; | ||
2936 | } | ||
2886 | return raw_btf; | 2937 | return raw_btf; |
2887 | } | 2938 | } |
2888 | 2939 | ||
@@ -5551,20 +5602,450 @@ static int test_info_raw(void) | |||
5551 | return err; | 5602 | return err; |
5552 | } | 5603 | } |
5553 | 5604 | ||
5605 | struct btf_raw_data { | ||
5606 | __u32 raw_types[MAX_NR_RAW_U32]; | ||
5607 | const char *str_sec; | ||
5608 | __u32 str_sec_size; | ||
5609 | }; | ||
5610 | |||
5611 | struct btf_dedup_test { | ||
5612 | const char *descr; | ||
5613 | struct btf_raw_data input; | ||
5614 | struct btf_raw_data expect; | ||
5615 | struct btf_dedup_opts opts; | ||
5616 | }; | ||
5617 | |||
5618 | const struct btf_dedup_test dedup_tests[] = { | ||
5619 | |||
5620 | { | ||
5621 | .descr = "dedup: unused strings filtering", | ||
5622 | .input = { | ||
5623 | .raw_types = { | ||
5624 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4), | ||
5625 | BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8), | ||
5626 | BTF_END_RAW, | ||
5627 | }, | ||
5628 | BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"), | ||
5629 | }, | ||
5630 | .expect = { | ||
5631 | .raw_types = { | ||
5632 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), | ||
5633 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), | ||
5634 | BTF_END_RAW, | ||
5635 | }, | ||
5636 | BTF_STR_SEC("\0int\0long"), | ||
5637 | }, | ||
5638 | .opts = { | ||
5639 | .dont_resolve_fwds = false, | ||
5640 | }, | ||
5641 | }, | ||
5642 | { | ||
5643 | .descr = "dedup: strings deduplication", | ||
5644 | .input = { | ||
5645 | .raw_types = { | ||
5646 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), | ||
5647 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), | ||
5648 | BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4), | ||
5649 | BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8), | ||
5650 | BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4), | ||
5651 | BTF_END_RAW, | ||
5652 | }, | ||
5653 | BTF_STR_SEC("\0int\0long int\0int\0long int\0int"), | ||
5654 | }, | ||
5655 | .expect = { | ||
5656 | .raw_types = { | ||
5657 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), | ||
5658 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8), | ||
5659 | BTF_END_RAW, | ||
5660 | }, | ||
5661 | BTF_STR_SEC("\0int\0long int"), | ||
5662 | }, | ||
5663 | .opts = { | ||
5664 | .dont_resolve_fwds = false, | ||
5665 | }, | ||
5666 | }, | ||
5667 | { | ||
5668 | .descr = "dedup: struct example #1", | ||
5669 | /* | ||
5670 | * struct s { | ||
5671 | * struct s *next; | ||
5672 | * const int *a; | ||
5673 | * int b[16]; | ||
5674 | * int c; | ||
5675 | * } | ||
5676 | */ | ||
5677 | .input = { | ||
5678 | .raw_types = { | ||
5679 | /* int */ | ||
5680 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
5681 | /* int[16] */ | ||
5682 | BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */ | ||
5683 | /* struct s { */ | ||
5684 | BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */ | ||
5685 | BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */ | ||
5686 | BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */ | ||
5687 | BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */ | ||
5688 | BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */ | ||
5689 | /* ptr -> [3] struct s */ | ||
5690 | BTF_PTR_ENC(3), /* [4] */ | ||
5691 | /* ptr -> [6] const int */ | ||
5692 | BTF_PTR_ENC(6), /* [5] */ | ||
5693 | /* const -> [1] int */ | ||
5694 | BTF_CONST_ENC(1), /* [6] */ | ||
5695 | |||
5696 | /* full copy of the above */ | ||
5697 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */ | ||
5698 | BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */ | ||
5699 | BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */ | ||
5700 | BTF_MEMBER_ENC(NAME_NTH(3), 10, 0), | ||
5701 | BTF_MEMBER_ENC(NAME_NTH(4), 11, 64), | ||
5702 | BTF_MEMBER_ENC(NAME_NTH(5), 8, 128), | ||
5703 | BTF_MEMBER_ENC(NAME_NTH(6), 7, 640), | ||
5704 | BTF_PTR_ENC(9), /* [10] */ | ||
5705 | BTF_PTR_ENC(12), /* [11] */ | ||
5706 | BTF_CONST_ENC(7), /* [12] */ | ||
5707 | BTF_END_RAW, | ||
5708 | }, | ||
5709 | BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"), | ||
5710 | }, | ||
5711 | .expect = { | ||
5712 | .raw_types = { | ||
5713 | /* int */ | ||
5714 | BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | ||
5715 | /* int[16] */ | ||
5716 | BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */ | ||
5717 | /* struct s { */ | ||
5718 | BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */ | ||
5719 | BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */ | ||
5720 | BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */ | ||
5721 | BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */ | ||
5722 | BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */ | ||
5723 | /* ptr -> [3] struct s */ | ||
5724 | BTF_PTR_ENC(3), /* [4] */ | ||
5725 | /* ptr -> [6] const int */ | ||
5726 | BTF_PTR_ENC(6), /* [5] */ | ||
5727 | /* const -> [1] int */ | ||
5728 | BTF_CONST_ENC(1), /* [6] */ | ||
5729 | BTF_END_RAW, | ||
5730 | }, | ||
5731 | BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"), | ||
5732 | }, | ||
5733 | .opts = { | ||
5734 | .dont_resolve_fwds = false, | ||
5735 | }, | ||
5736 | }, | ||
5737 | { | ||
5738 | .descr = "dedup: all possible kinds (no duplicates)", | ||
5739 | .input = { | ||
5740 | .raw_types = { | ||
5741 | BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */ | ||
5742 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */ | ||
5743 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
5744 | BTF_ENUM_ENC(NAME_TBD, 1), | ||
5745 | BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */ | ||
5746 | BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */ | ||
5747 | BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */ | ||
5748 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
5749 | BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */ | ||
5750 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
5751 | BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */ | ||
5752 | BTF_PTR_ENC(0), /* [8] ptr */ | ||
5753 | BTF_CONST_ENC(8), /* [9] const */ | ||
5754 | BTF_VOLATILE_ENC(8), /* [10] volatile */ | ||
5755 | BTF_RESTRICT_ENC(8), /* [11] restrict */ | ||
5756 | BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ | ||
5757 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), | ||
5758 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), | ||
5759 | BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ | ||
5760 | BTF_END_RAW, | ||
5761 | }, | ||
5762 | BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"), | ||
5763 | }, | ||
5764 | .expect = { | ||
5765 | .raw_types = { | ||
5766 | BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */ | ||
5767 | BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */ | ||
5768 | BTF_ENUM_ENC(NAME_TBD, 0), | ||
5769 | BTF_ENUM_ENC(NAME_TBD, 1), | ||
5770 | BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */ | ||
5771 | BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */ | ||
5772 | BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */ | ||
5773 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
5774 | BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */ | ||
5775 | BTF_MEMBER_ENC(NAME_TBD, 1, 0), | ||
5776 | BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */ | ||
5777 | BTF_PTR_ENC(0), /* [8] ptr */ | ||
5778 | BTF_CONST_ENC(8), /* [9] const */ | ||
5779 | BTF_VOLATILE_ENC(8), /* [10] volatile */ | ||
5780 | BTF_RESTRICT_ENC(8), /* [11] restrict */ | ||
5781 | BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */ | ||
5782 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), | ||
5783 | BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8), | ||
5784 | BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */ | ||
5785 | BTF_END_RAW, | ||
5786 | }, | ||
5787 | BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"), | ||
5788 | }, | ||
5789 | .opts = { | ||
5790 | .dont_resolve_fwds = false, | ||
5791 | }, | ||
5792 | }, | ||
5793 | { | ||
5794 | .descr = "dedup: no int duplicates", | ||
5795 | .input = { | ||
5796 | .raw_types = { | ||
5797 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8), | ||
5798 | /* different name */ | ||
5799 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8), | ||
5800 | /* different encoding */ | ||
5801 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8), | ||
5802 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8), | ||
5803 | /* different bit offset */ | ||
5804 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8), | ||
5805 | /* different bit size */ | ||
5806 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8), | ||
5807 | /* different byte size */ | ||
5808 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), | ||
5809 | BTF_END_RAW, | ||
5810 | }, | ||
5811 | BTF_STR_SEC("\0int\0some other int"), | ||
5812 | }, | ||
5813 | .expect = { | ||
5814 | .raw_types = { | ||
5815 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8), | ||
5816 | /* different name */ | ||
5817 | BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8), | ||
5818 | /* different encoding */ | ||
5819 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8), | ||
5820 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8), | ||
5821 | /* different bit offset */ | ||
5822 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8), | ||
5823 | /* different bit size */ | ||
5824 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8), | ||
5825 | /* different byte size */ | ||
5826 | BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), | ||
5827 | BTF_END_RAW, | ||
5828 | }, | ||
5829 | BTF_STR_SEC("\0int\0some other int"), | ||
5830 | }, | ||
5831 | .opts = { | ||
5832 | .dont_resolve_fwds = false, | ||
5833 | }, | ||
5834 | }, | ||
5835 | |||
5836 | }; | ||
5837 | |||
5838 | static int btf_type_size(const struct btf_type *t) | ||
5839 | { | ||
5840 | int base_size = sizeof(struct btf_type); | ||
5841 | __u16 vlen = BTF_INFO_VLEN(t->info); | ||
5842 | __u16 kind = BTF_INFO_KIND(t->info); | ||
5843 | |||
5844 | switch (kind) { | ||
5845 | case BTF_KIND_FWD: | ||
5846 | case BTF_KIND_CONST: | ||
5847 | case BTF_KIND_VOLATILE: | ||
5848 | case BTF_KIND_RESTRICT: | ||
5849 | case BTF_KIND_PTR: | ||
5850 | case BTF_KIND_TYPEDEF: | ||
5851 | case BTF_KIND_FUNC: | ||
5852 | return base_size; | ||
5853 | case BTF_KIND_INT: | ||
5854 | return base_size + sizeof(__u32); | ||
5855 | case BTF_KIND_ENUM: | ||
5856 | return base_size + vlen * sizeof(struct btf_enum); | ||
5857 | case BTF_KIND_ARRAY: | ||
5858 | return base_size + sizeof(struct btf_array); | ||
5859 | case BTF_KIND_STRUCT: | ||
5860 | case BTF_KIND_UNION: | ||
5861 | return base_size + vlen * sizeof(struct btf_member); | ||
5862 | case BTF_KIND_FUNC_PROTO: | ||
5863 | return base_size + vlen * sizeof(struct btf_param); | ||
5864 | default: | ||
5865 | fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind); | ||
5866 | return -EINVAL; | ||
5867 | } | ||
5868 | } | ||
5869 | |||
5870 | static void dump_btf_strings(const char *strs, __u32 len) | ||
5871 | { | ||
5872 | const char *cur = strs; | ||
5873 | int i = 0; | ||
5874 | |||
5875 | while (cur < strs + len) { | ||
5876 | fprintf(stderr, "string #%d: '%s'\n", i, cur); | ||
5877 | cur += strlen(cur) + 1; | ||
5878 | i++; | ||
5879 | } | ||
5880 | } | ||
5881 | |||
5882 | static int do_test_dedup(unsigned int test_num) | ||
5883 | { | ||
5884 | const struct btf_dedup_test *test = &dedup_tests[test_num - 1]; | ||
5885 | int err = 0, i; | ||
5886 | __u32 test_nr_types, expect_nr_types, test_str_len, expect_str_len; | ||
5887 | void *raw_btf; | ||
5888 | unsigned int raw_btf_size; | ||
5889 | struct btf *test_btf = NULL, *expect_btf = NULL; | ||
5890 | const char *ret_test_next_str, *ret_expect_next_str; | ||
5891 | const char *test_strs, *expect_strs; | ||
5892 | const char *test_str_cur, *test_str_end; | ||
5893 | const char *expect_str_cur, *expect_str_end; | ||
5894 | |||
5895 | fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr); | ||
5896 | |||
5897 | raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types, | ||
5898 | test->input.str_sec, test->input.str_sec_size, | ||
5899 | &raw_btf_size, &ret_test_next_str); | ||
5900 | if (!raw_btf) | ||
5901 | return -1; | ||
5902 | test_btf = btf__new((__u8 *)raw_btf, raw_btf_size); | ||
5903 | free(raw_btf); | ||
5904 | if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld", | ||
5905 | PTR_ERR(test_btf))) { | ||
5906 | err = -1; | ||
5907 | goto done; | ||
5908 | } | ||
5909 | |||
5910 | raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types, | ||
5911 | test->expect.str_sec, | ||
5912 | test->expect.str_sec_size, | ||
5913 | &raw_btf_size, &ret_expect_next_str); | ||
5914 | if (!raw_btf) | ||
5915 | return -1; | ||
5916 | expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size); | ||
5917 | free(raw_btf); | ||
5918 | if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld", | ||
5919 | PTR_ERR(expect_btf))) { | ||
5920 | err = -1; | ||
5921 | goto done; | ||
5922 | } | ||
5923 | |||
5924 | err = btf__dedup(test_btf, NULL, &test->opts); | ||
5925 | if (CHECK(err, "btf_dedup failed errno:%d", err)) { | ||
5926 | err = -1; | ||
5927 | goto done; | ||
5928 | } | ||
5929 | |||
5930 | btf__get_strings(test_btf, &test_strs, &test_str_len); | ||
5931 | btf__get_strings(expect_btf, &expect_strs, &expect_str_len); | ||
5932 | if (CHECK(test_str_len != expect_str_len, | ||
5933 | "test_str_len:%u != expect_str_len:%u", | ||
5934 | test_str_len, expect_str_len)) { | ||
5935 | fprintf(stderr, "\ntest strings:\n"); | ||
5936 | dump_btf_strings(test_strs, test_str_len); | ||
5937 | fprintf(stderr, "\nexpected strings:\n"); | ||
5938 | dump_btf_strings(expect_strs, expect_str_len); | ||
5939 | err = -1; | ||
5940 | goto done; | ||
5941 | } | ||
5942 | |||
5943 | test_str_cur = test_strs; | ||
5944 | test_str_end = test_strs + test_str_len; | ||
5945 | expect_str_cur = expect_strs; | ||
5946 | expect_str_end = expect_strs + expect_str_len; | ||
5947 | while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) { | ||
5948 | size_t test_len, expect_len; | ||
5949 | |||
5950 | test_len = strlen(test_str_cur); | ||
5951 | expect_len = strlen(expect_str_cur); | ||
5952 | if (CHECK(test_len != expect_len, | ||
5953 | "test_len:%zu != expect_len:%zu " | ||
5954 | "(test_str:%s, expect_str:%s)", | ||
5955 | test_len, expect_len, test_str_cur, expect_str_cur)) { | ||
5956 | err = -1; | ||
5957 | goto done; | ||
5958 | } | ||
5959 | if (CHECK(strcmp(test_str_cur, expect_str_cur), | ||
5960 | "test_str:%s != expect_str:%s", | ||
5961 | test_str_cur, expect_str_cur)) { | ||
5962 | err = -1; | ||
5963 | goto done; | ||
5964 | } | ||
5965 | test_str_cur += test_len + 1; | ||
5966 | expect_str_cur += expect_len + 1; | ||
5967 | } | ||
5968 | if (CHECK(test_str_cur != test_str_end, | ||
5969 | "test_str_cur:%p != test_str_end:%p", | ||
5970 | test_str_cur, test_str_end)) { | ||
5971 | err = -1; | ||
5972 | goto done; | ||
5973 | } | ||
5974 | |||
5975 | test_nr_types = btf__get_nr_types(test_btf); | ||
5976 | expect_nr_types = btf__get_nr_types(expect_btf); | ||
5977 | if (CHECK(test_nr_types != expect_nr_types, | ||
5978 | "test_nr_types:%u != expect_nr_types:%u", | ||
5979 | test_nr_types, expect_nr_types)) { | ||
5980 | err = -1; | ||
5981 | goto done; | ||
5982 | } | ||
5983 | |||
5984 | for (i = 1; i <= test_nr_types; i++) { | ||
5985 | const struct btf_type *test_type, *expect_type; | ||
5986 | int test_size, expect_size; | ||
5987 | |||
5988 | test_type = btf__type_by_id(test_btf, i); | ||
5989 | expect_type = btf__type_by_id(expect_btf, i); | ||
5990 | test_size = btf_type_size(test_type); | ||
5991 | expect_size = btf_type_size(expect_type); | ||
5992 | |||
5993 | if (CHECK(test_size != expect_size, | ||
5994 | "type #%d: test_size:%d != expect_size:%u", | ||
5995 | i, test_size, expect_size)) { | ||
5996 | err = -1; | ||
5997 | goto done; | ||
5998 | } | ||
5999 | if (CHECK(memcmp((void *)test_type, | ||
6000 | (void *)expect_type, | ||
6001 | test_size), | ||
6002 | "type #%d: contents differ", i)) { | ||
6003 | err = -1; | ||
6004 | goto done; | ||
6005 | } | ||
6006 | } | ||
6007 | |||
6008 | done: | ||
6009 | if (!err) | ||
6010 | fprintf(stderr, "OK"); | ||
6011 | if (!IS_ERR(test_btf)) | ||
6012 | btf__free(test_btf); | ||
6013 | if (!IS_ERR(expect_btf)) | ||
6014 | btf__free(expect_btf); | ||
6015 | |||
6016 | return err; | ||
6017 | } | ||
6018 | |||
6019 | static int test_dedup(void) | ||
6020 | { | ||
6021 | unsigned int i; | ||
6022 | int err = 0; | ||
6023 | |||
6024 | if (args.dedup_test_num) | ||
6025 | return count_result(do_test_dedup(args.dedup_test_num)); | ||
6026 | |||
6027 | for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++) | ||
6028 | err |= count_result(do_test_dedup(i)); | ||
6029 | |||
6030 | return err; | ||
6031 | } | ||
6032 | |||
5554 | static void usage(const char *cmd) | 6033 | static void usage(const char *cmd) |
5555 | { | 6034 | { |
5556 | fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n" | 6035 | fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n" |
5557 | "\t[-g btf_get_info_test_num (1 - %zu)] |\n" | 6036 | "\t[-g btf_get_info_test_num (1 - %zu)] |\n" |
5558 | "\t[-f btf_file_test_num (1 - %zu)] |\n" | 6037 | "\t[-f btf_file_test_num (1 - %zu)] |\n" |
5559 | "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n" | 6038 | "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n" |
5560 | "\t[-p (pretty print test)]]\n", | 6039 | "\t[-p (pretty print test)] |\n" |
6040 | "\t[-d btf_dedup_test_num (1 - %zu)]]\n", | ||
5561 | cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests), | 6041 | cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests), |
5562 | ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests)); | 6042 | ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests), |
6043 | ARRAY_SIZE(dedup_tests)); | ||
5563 | } | 6044 | } |
5564 | 6045 | ||
5565 | static int parse_args(int argc, char **argv) | 6046 | static int parse_args(int argc, char **argv) |
5566 | { | 6047 | { |
5567 | const char *optstr = "lpk:f:r:g:"; | 6048 | const char *optstr = "hlpk:f:r:g:d:"; |
5568 | int opt; | 6049 | int opt; |
5569 | 6050 | ||
5570 | while ((opt = getopt(argc, argv, optstr)) != -1) { | 6051 | while ((opt = getopt(argc, argv, optstr)) != -1) { |
@@ -5591,12 +6072,16 @@ static int parse_args(int argc, char **argv) | |||
5591 | args.info_raw_test_num = atoi(optarg); | 6072 | args.info_raw_test_num = atoi(optarg); |
5592 | args.info_raw_test = true; | 6073 | args.info_raw_test = true; |
5593 | break; | 6074 | break; |
6075 | case 'd': | ||
6076 | args.dedup_test_num = atoi(optarg); | ||
6077 | args.dedup_test = true; | ||
6078 | break; | ||
5594 | case 'h': | 6079 | case 'h': |
5595 | usage(argv[0]); | 6080 | usage(argv[0]); |
5596 | exit(0); | 6081 | exit(0); |
5597 | default: | 6082 | default: |
5598 | usage(argv[0]); | 6083 | usage(argv[0]); |
5599 | return -1; | 6084 | return -1; |
5600 | } | 6085 | } |
5601 | } | 6086 | } |
5602 | 6087 | ||
@@ -5632,6 +6117,14 @@ static int parse_args(int argc, char **argv) | |||
5632 | return -1; | 6117 | return -1; |
5633 | } | 6118 | } |
5634 | 6119 | ||
6120 | if (args.dedup_test_num && | ||
6121 | (args.dedup_test_num < 1 || | ||
6122 | args.dedup_test_num > ARRAY_SIZE(dedup_tests))) { | ||
6123 | fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n", | ||
6124 | ARRAY_SIZE(dedup_tests)); | ||
6125 | return -1; | ||
6126 | } | ||
6127 | |||
5635 | return 0; | 6128 | return 0; |
5636 | } | 6129 | } |
5637 | 6130 | ||
@@ -5650,7 +6143,7 @@ int main(int argc, char **argv) | |||
5650 | return err; | 6143 | return err; |
5651 | 6144 | ||
5652 | if (args.always_log) | 6145 | if (args.always_log) |
5653 | libbpf_set_print(__base_pr, __base_pr, __base_pr); | 6146 | libbpf_set_print(__base_pr); |
5654 | 6147 | ||
5655 | if (args.raw_test) | 6148 | if (args.raw_test) |
5656 | err |= test_raw(); | 6149 | err |= test_raw(); |
@@ -5667,14 +6160,18 @@ int main(int argc, char **argv) | |||
5667 | if (args.info_raw_test) | 6160 | if (args.info_raw_test) |
5668 | err |= test_info_raw(); | 6161 | err |= test_info_raw(); |
5669 | 6162 | ||
6163 | if (args.dedup_test) | ||
6164 | err |= test_dedup(); | ||
6165 | |||
5670 | if (args.raw_test || args.get_info_test || args.file_test || | 6166 | if (args.raw_test || args.get_info_test || args.file_test || |
5671 | args.pprint_test || args.info_raw_test) | 6167 | args.pprint_test || args.info_raw_test || args.dedup_test) |
5672 | goto done; | 6168 | goto done; |
5673 | 6169 | ||
5674 | err |= test_raw(); | 6170 | err |= test_raw(); |
5675 | err |= test_get_info(); | 6171 | err |= test_get_info(); |
5676 | err |= test_file(); | 6172 | err |= test_file(); |
5677 | err |= test_info_raw(); | 6173 | err |= test_info_raw(); |
6174 | err |= test_dedup(); | ||
5678 | 6175 | ||
5679 | done: | 6176 | done: |
5680 | print_summary(); | 6177 | print_summary(); |
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c index 8fcd1c076add..1909ecf4d999 100644 --- a/tools/testing/selftests/bpf/test_libbpf_open.c +++ b/tools/testing/selftests/bpf/test_libbpf_open.c | |||
@@ -34,23 +34,16 @@ static void usage(char *argv[]) | |||
34 | printf("\n"); | 34 | printf("\n"); |
35 | } | 35 | } |
36 | 36 | ||
37 | #define DEFINE_PRINT_FN(name, enabled) \ | 37 | static bool debug = 0; |
38 | static int libbpf_##name(const char *fmt, ...) \ | 38 | static int libbpf_debug_print(enum libbpf_print_level level, |
39 | { \ | 39 | const char *fmt, va_list args) |
40 | va_list args; \ | 40 | { |
41 | int ret; \ | 41 | if (level == LIBBPF_DEBUG && !debug) |
42 | \ | 42 | return 0; |
43 | va_start(args, fmt); \ | 43 | |
44 | if (enabled) { \ | 44 | fprintf(stderr, "[%d] ", level); |
45 | fprintf(stderr, "[" #name "] "); \ | 45 | return vfprintf(stderr, fmt, args); |
46 | ret = vfprintf(stderr, fmt, args); \ | ||
47 | } \ | ||
48 | va_end(args); \ | ||
49 | return ret; \ | ||
50 | } | 46 | } |
51 | DEFINE_PRINT_FN(warning, 1) | ||
52 | DEFINE_PRINT_FN(info, 1) | ||
53 | DEFINE_PRINT_FN(debug, 1) | ||
54 | 47 | ||
55 | #define EXIT_FAIL_LIBBPF EXIT_FAILURE | 48 | #define EXIT_FAIL_LIBBPF EXIT_FAILURE |
56 | #define EXIT_FAIL_OPTION 2 | 49 | #define EXIT_FAIL_OPTION 2 |
@@ -120,15 +113,14 @@ int main(int argc, char **argv) | |||
120 | int longindex = 0; | 113 | int longindex = 0; |
121 | int opt; | 114 | int opt; |
122 | 115 | ||
123 | libbpf_set_print(libbpf_warning, libbpf_info, NULL); | 116 | libbpf_set_print(libbpf_debug_print); |
124 | 117 | ||
125 | /* Parse commands line args */ | 118 | /* Parse commands line args */ |
126 | while ((opt = getopt_long(argc, argv, "hDq", | 119 | while ((opt = getopt_long(argc, argv, "hDq", |
127 | long_options, &longindex)) != -1) { | 120 | long_options, &longindex)) != -1) { |
128 | switch (opt) { | 121 | switch (opt) { |
129 | case 'D': | 122 | case 'D': |
130 | libbpf_set_print(libbpf_warning, libbpf_info, | 123 | debug = 1; |
131 | libbpf_debug); | ||
132 | break; | 124 | break; |
133 | case 'q': /* Use in scripting mode */ | 125 | case 'q': /* Use in scripting mode */ |
134 | verbose = 0; | 126 | verbose = 0; |
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c index e7798dd97f4b..3c627771f965 100644 --- a/tools/testing/selftests/bpf/test_maps.c +++ b/tools/testing/selftests/bpf/test_maps.c | |||
@@ -45,7 +45,7 @@ static int map_flags; | |||
45 | } \ | 45 | } \ |
46 | }) | 46 | }) |
47 | 47 | ||
48 | static void test_hashmap(int task, void *data) | 48 | static void test_hashmap(unsigned int task, void *data) |
49 | { | 49 | { |
50 | long long key, next_key, first_key, value; | 50 | long long key, next_key, first_key, value; |
51 | int fd; | 51 | int fd; |
@@ -135,7 +135,7 @@ static void test_hashmap(int task, void *data) | |||
135 | close(fd); | 135 | close(fd); |
136 | } | 136 | } |
137 | 137 | ||
138 | static void test_hashmap_sizes(int task, void *data) | 138 | static void test_hashmap_sizes(unsigned int task, void *data) |
139 | { | 139 | { |
140 | int fd, i, j; | 140 | int fd, i, j; |
141 | 141 | ||
@@ -155,7 +155,7 @@ static void test_hashmap_sizes(int task, void *data) | |||
155 | } | 155 | } |
156 | } | 156 | } |
157 | 157 | ||
158 | static void test_hashmap_percpu(int task, void *data) | 158 | static void test_hashmap_percpu(unsigned int task, void *data) |
159 | { | 159 | { |
160 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 160 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
161 | BPF_DECLARE_PERCPU(long, value); | 161 | BPF_DECLARE_PERCPU(long, value); |
@@ -282,7 +282,7 @@ static int helper_fill_hashmap(int max_entries) | |||
282 | return fd; | 282 | return fd; |
283 | } | 283 | } |
284 | 284 | ||
285 | static void test_hashmap_walk(int task, void *data) | 285 | static void test_hashmap_walk(unsigned int task, void *data) |
286 | { | 286 | { |
287 | int fd, i, max_entries = 1000; | 287 | int fd, i, max_entries = 1000; |
288 | long long key, value, next_key; | 288 | long long key, value, next_key; |
@@ -353,7 +353,7 @@ static void test_hashmap_zero_seed(void) | |||
353 | close(second); | 353 | close(second); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void test_arraymap(int task, void *data) | 356 | static void test_arraymap(unsigned int task, void *data) |
357 | { | 357 | { |
358 | int key, next_key, fd; | 358 | int key, next_key, fd; |
359 | long long value; | 359 | long long value; |
@@ -408,7 +408,7 @@ static void test_arraymap(int task, void *data) | |||
408 | close(fd); | 408 | close(fd); |
409 | } | 409 | } |
410 | 410 | ||
411 | static void test_arraymap_percpu(int task, void *data) | 411 | static void test_arraymap_percpu(unsigned int task, void *data) |
412 | { | 412 | { |
413 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 413 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
414 | BPF_DECLARE_PERCPU(long, values); | 414 | BPF_DECLARE_PERCPU(long, values); |
@@ -504,7 +504,7 @@ static void test_arraymap_percpu_many_keys(void) | |||
504 | close(fd); | 504 | close(fd); |
505 | } | 505 | } |
506 | 506 | ||
507 | static void test_devmap(int task, void *data) | 507 | static void test_devmap(unsigned int task, void *data) |
508 | { | 508 | { |
509 | int fd; | 509 | int fd; |
510 | __u32 key, value; | 510 | __u32 key, value; |
@@ -519,7 +519,7 @@ static void test_devmap(int task, void *data) | |||
519 | close(fd); | 519 | close(fd); |
520 | } | 520 | } |
521 | 521 | ||
522 | static void test_queuemap(int task, void *data) | 522 | static void test_queuemap(unsigned int task, void *data) |
523 | { | 523 | { |
524 | const int MAP_SIZE = 32; | 524 | const int MAP_SIZE = 32; |
525 | __u32 vals[MAP_SIZE + MAP_SIZE/2], val; | 525 | __u32 vals[MAP_SIZE + MAP_SIZE/2], val; |
@@ -577,7 +577,7 @@ static void test_queuemap(int task, void *data) | |||
577 | close(fd); | 577 | close(fd); |
578 | } | 578 | } |
579 | 579 | ||
580 | static void test_stackmap(int task, void *data) | 580 | static void test_stackmap(unsigned int task, void *data) |
581 | { | 581 | { |
582 | const int MAP_SIZE = 32; | 582 | const int MAP_SIZE = 32; |
583 | __u32 vals[MAP_SIZE + MAP_SIZE/2], val; | 583 | __u32 vals[MAP_SIZE + MAP_SIZE/2], val; |
@@ -642,7 +642,7 @@ static void test_stackmap(int task, void *data) | |||
642 | #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" | 642 | #define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" |
643 | #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" | 643 | #define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" |
644 | #define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o" | 644 | #define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o" |
645 | static void test_sockmap(int tasks, void *data) | 645 | static void test_sockmap(unsigned int tasks, void *data) |
646 | { | 646 | { |
647 | struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break; | 647 | struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break; |
648 | int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break; | 648 | int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break; |
@@ -1268,10 +1268,11 @@ static void test_map_large(void) | |||
1268 | } | 1268 | } |
1269 | 1269 | ||
1270 | #define run_parallel(N, FN, DATA) \ | 1270 | #define run_parallel(N, FN, DATA) \ |
1271 | printf("Fork %d tasks to '" #FN "'\n", N); \ | 1271 | printf("Fork %u tasks to '" #FN "'\n", N); \ |
1272 | __run_parallel(N, FN, DATA) | 1272 | __run_parallel(N, FN, DATA) |
1273 | 1273 | ||
1274 | static void __run_parallel(int tasks, void (*fn)(int task, void *data), | 1274 | static void __run_parallel(unsigned int tasks, |
1275 | void (*fn)(unsigned int task, void *data), | ||
1275 | void *data) | 1276 | void *data) |
1276 | { | 1277 | { |
1277 | pid_t pid[tasks]; | 1278 | pid_t pid[tasks]; |
@@ -1312,7 +1313,7 @@ static void test_map_stress(void) | |||
1312 | #define DO_UPDATE 1 | 1313 | #define DO_UPDATE 1 |
1313 | #define DO_DELETE 0 | 1314 | #define DO_DELETE 0 |
1314 | 1315 | ||
1315 | static void test_update_delete(int fn, void *data) | 1316 | static void test_update_delete(unsigned int fn, void *data) |
1316 | { | 1317 | { |
1317 | int do_update = ((int *)data)[1]; | 1318 | int do_update = ((int *)data)[1]; |
1318 | int fd = ((int *)data)[0]; | 1319 | int fd = ((int *)data)[0]; |
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py index d59642e70f56..84bea3985d64 100755 --- a/tools/testing/selftests/bpf/test_offload.py +++ b/tools/testing/selftests/bpf/test_offload.py | |||
@@ -23,6 +23,7 @@ import string | |||
23 | import struct | 23 | import struct |
24 | import subprocess | 24 | import subprocess |
25 | import time | 25 | import time |
26 | import traceback | ||
26 | 27 | ||
27 | logfile = None | 28 | logfile = None |
28 | log_level = 1 | 29 | log_level = 1 |
@@ -78,7 +79,9 @@ def fail(cond, msg): | |||
78 | if not cond: | 79 | if not cond: |
79 | return | 80 | return |
80 | print("FAIL: " + msg) | 81 | print("FAIL: " + msg) |
81 | log("FAIL: " + msg, "", level=1) | 82 | tb = "".join(traceback.extract_stack().format()) |
83 | print(tb) | ||
84 | log("FAIL: " + msg, tb, level=1) | ||
82 | os.sys.exit(1) | 85 | os.sys.exit(1) |
83 | 86 | ||
84 | def start_test(msg): | 87 | def start_test(msg): |
@@ -589,6 +592,15 @@ def check_verifier_log(output, reference): | |||
589 | return | 592 | return |
590 | fail(True, "Missing or incorrect message from netdevsim in verifier log") | 593 | fail(True, "Missing or incorrect message from netdevsim in verifier log") |
591 | 594 | ||
595 | def check_multi_basic(two_xdps): | ||
596 | fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs") | ||
597 | fail("prog" in two_xdps, "Base program reported in multi program mode") | ||
598 | fail(len(two_xdps["attached"]) != 2, | ||
599 | "Wrong attached program count with two programs") | ||
600 | fail(two_xdps["attached"][0]["prog"]["id"] == | ||
601 | two_xdps["attached"][1]["prog"]["id"], | ||
602 | "Offloaded and other programs have the same id") | ||
603 | |||
592 | def test_spurios_extack(sim, obj, skip_hw, needle): | 604 | def test_spurios_extack(sim, obj, skip_hw, needle): |
593 | res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw, | 605 | res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw, |
594 | include_stderr=True) | 606 | include_stderr=True) |
@@ -600,6 +612,67 @@ def test_spurios_extack(sim, obj, skip_hw, needle): | |||
600 | include_stderr=True) | 612 | include_stderr=True) |
601 | check_no_extack(res, needle) | 613 | check_no_extack(res, needle) |
602 | 614 | ||
615 | def test_multi_prog(sim, obj, modename, modeid): | ||
616 | start_test("Test multi-attachment XDP - %s + offload..." % | ||
617 | (modename or "default", )) | ||
618 | sim.set_xdp(obj, "offload") | ||
619 | xdp = sim.ip_link_show(xdp=True)["xdp"] | ||
620 | offloaded = sim.dfs_read("bpf_offloaded_id") | ||
621 | fail("prog" not in xdp, "Base program not reported in single program mode") | ||
622 | fail(len(xdp["attached"]) != 1, | ||
623 | "Wrong attached program count with one program") | ||
624 | |||
625 | sim.set_xdp(obj, modename) | ||
626 | two_xdps = sim.ip_link_show(xdp=True)["xdp"] | ||
627 | |||
628 | fail(xdp["attached"][0] not in two_xdps["attached"], | ||
629 | "Offload program not reported after other activated") | ||
630 | check_multi_basic(two_xdps) | ||
631 | |||
632 | offloaded2 = sim.dfs_read("bpf_offloaded_id") | ||
633 | fail(offloaded != offloaded2, | ||
634 | "Offload ID changed after loading other program") | ||
635 | |||
636 | start_test("Test multi-attachment XDP - replace...") | ||
637 | ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) | ||
638 | fail(ret == 0, "Replaced one of programs without -force") | ||
639 | check_extack(err, "XDP program already attached.", args) | ||
640 | |||
641 | if modename == "" or modename == "drv": | ||
642 | othermode = "" if modename == "drv" else "drv" | ||
643 | start_test("Test multi-attachment XDP - detach...") | ||
644 | ret, _, err = sim.unset_xdp(othermode, force=True, | ||
645 | fail=False, include_stderr=True) | ||
646 | fail(ret == 0, "Removed program with a bad mode") | ||
647 | check_extack(err, "program loaded with different flags.", args) | ||
648 | |||
649 | sim.unset_xdp("offload") | ||
650 | xdp = sim.ip_link_show(xdp=True)["xdp"] | ||
651 | offloaded = sim.dfs_read("bpf_offloaded_id") | ||
652 | |||
653 | fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs") | ||
654 | fail("prog" not in xdp, | ||
655 | "Base program not reported after multi program mode") | ||
656 | fail(xdp["attached"][0] not in two_xdps["attached"], | ||
657 | "Offload program not reported after other activated") | ||
658 | fail(len(xdp["attached"]) != 1, | ||
659 | "Wrong attached program count with remaining programs") | ||
660 | fail(offloaded != "0", "Offload ID reported with only other program left") | ||
661 | |||
662 | start_test("Test multi-attachment XDP - reattach...") | ||
663 | sim.set_xdp(obj, "offload") | ||
664 | two_xdps = sim.ip_link_show(xdp=True)["xdp"] | ||
665 | |||
666 | fail(xdp["attached"][0] not in two_xdps["attached"], | ||
667 | "Other program not reported after offload activated") | ||
668 | check_multi_basic(two_xdps) | ||
669 | |||
670 | start_test("Test multi-attachment XDP - device remove...") | ||
671 | sim.remove() | ||
672 | |||
673 | sim = NetdevSim() | ||
674 | sim.set_ethtool_tc_offloads(True) | ||
675 | return sim | ||
603 | 676 | ||
604 | # Parse command line | 677 | # Parse command line |
605 | parser = argparse.ArgumentParser() | 678 | parser = argparse.ArgumentParser() |
@@ -842,7 +915,9 @@ try: | |||
842 | ret, _, err = sim.set_xdp(obj, "generic", force=True, | 915 | ret, _, err = sim.set_xdp(obj, "generic", force=True, |
843 | fail=False, include_stderr=True) | 916 | fail=False, include_stderr=True) |
844 | fail(ret == 0, "Replaced XDP program with a program in different mode") | 917 | fail(ret == 0, "Replaced XDP program with a program in different mode") |
845 | fail(err.count("File exists") != 1, "Replaced driver XDP with generic") | 918 | check_extack(err, |
919 | "native and generic XDP can't be active at the same time.", | ||
920 | args) | ||
846 | ret, _, err = sim.set_xdp(obj, "", force=True, | 921 | ret, _, err = sim.set_xdp(obj, "", force=True, |
847 | fail=False, include_stderr=True) | 922 | fail=False, include_stderr=True) |
848 | fail(ret == 0, "Replaced XDP program with a program in different mode") | 923 | fail(ret == 0, "Replaced XDP program with a program in different mode") |
@@ -931,59 +1006,9 @@ try: | |||
931 | rm(pin_file) | 1006 | rm(pin_file) |
932 | bpftool_prog_list_wait(expected=0) | 1007 | bpftool_prog_list_wait(expected=0) |
933 | 1008 | ||
934 | start_test("Test multi-attachment XDP - attach...") | 1009 | sim = test_multi_prog(sim, obj, "", 1) |
935 | sim.set_xdp(obj, "offload") | 1010 | sim = test_multi_prog(sim, obj, "drv", 1) |
936 | xdp = sim.ip_link_show(xdp=True)["xdp"] | 1011 | sim = test_multi_prog(sim, obj, "generic", 2) |
937 | offloaded = sim.dfs_read("bpf_offloaded_id") | ||
938 | fail("prog" not in xdp, "Base program not reported in single program mode") | ||
939 | fail(len(ipl["xdp"]["attached"]) != 1, | ||
940 | "Wrong attached program count with one program") | ||
941 | |||
942 | sim.set_xdp(obj, "") | ||
943 | two_xdps = sim.ip_link_show(xdp=True)["xdp"] | ||
944 | offloaded2 = sim.dfs_read("bpf_offloaded_id") | ||
945 | |||
946 | fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs") | ||
947 | fail("prog" in two_xdps, "Base program reported in multi program mode") | ||
948 | fail(xdp["attached"][0] not in two_xdps["attached"], | ||
949 | "Offload program not reported after driver activated") | ||
950 | fail(len(two_xdps["attached"]) != 2, | ||
951 | "Wrong attached program count with two programs") | ||
952 | fail(two_xdps["attached"][0]["prog"]["id"] == | ||
953 | two_xdps["attached"][1]["prog"]["id"], | ||
954 | "offloaded and drv programs have the same id") | ||
955 | fail(offloaded != offloaded2, | ||
956 | "offload ID changed after loading driver program") | ||
957 | |||
958 | start_test("Test multi-attachment XDP - replace...") | ||
959 | ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True) | ||
960 | fail(err.count("busy") != 1, "Replaced one of programs without -force") | ||
961 | |||
962 | start_test("Test multi-attachment XDP - detach...") | ||
963 | ret, _, err = sim.unset_xdp("drv", force=True, | ||
964 | fail=False, include_stderr=True) | ||
965 | fail(ret == 0, "Removed program with a bad mode") | ||
966 | check_extack(err, "program loaded with different flags.", args) | ||
967 | |||
968 | sim.unset_xdp("offload") | ||
969 | xdp = sim.ip_link_show(xdp=True)["xdp"] | ||
970 | offloaded = sim.dfs_read("bpf_offloaded_id") | ||
971 | |||
972 | fail(xdp["mode"] != 1, "Bad mode reported after multiple programs") | ||
973 | fail("prog" not in xdp, | ||
974 | "Base program not reported after multi program mode") | ||
975 | fail(xdp["attached"][0] not in two_xdps["attached"], | ||
976 | "Offload program not reported after driver activated") | ||
977 | fail(len(ipl["xdp"]["attached"]) != 1, | ||
978 | "Wrong attached program count with remaining programs") | ||
979 | fail(offloaded != "0", "offload ID reported with only driver program left") | ||
980 | |||
981 | start_test("Test multi-attachment XDP - device remove...") | ||
982 | sim.set_xdp(obj, "offload") | ||
983 | sim.remove() | ||
984 | |||
985 | sim = NetdevSim() | ||
986 | sim.set_ethtool_tc_offloads(True) | ||
987 | 1012 | ||
988 | start_test("Test mixing of TC and XDP...") | 1013 | start_test("Test mixing of TC and XDP...") |
989 | sim.tc_add_ingress() | 1014 | sim.tc_add_ingress() |
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index a08d026ac396..c52bd90fbb34 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <string.h> | 10 | #include <string.h> |
11 | #include <assert.h> | 11 | #include <assert.h> |
12 | #include <stdlib.h> | 12 | #include <stdlib.h> |
13 | #include <stdarg.h> | ||
13 | #include <time.h> | 14 | #include <time.h> |
14 | 15 | ||
15 | #include <linux/types.h> | 16 | #include <linux/types.h> |
@@ -1783,6 +1784,15 @@ static void test_task_fd_query_tp(void) | |||
1783 | "sys_enter_read"); | 1784 | "sys_enter_read"); |
1784 | } | 1785 | } |
1785 | 1786 | ||
1787 | static int libbpf_debug_print(enum libbpf_print_level level, | ||
1788 | const char *format, va_list args) | ||
1789 | { | ||
1790 | if (level == LIBBPF_DEBUG) | ||
1791 | return 0; | ||
1792 | |||
1793 | return vfprintf(stderr, format, args); | ||
1794 | } | ||
1795 | |||
1786 | static void test_reference_tracking() | 1796 | static void test_reference_tracking() |
1787 | { | 1797 | { |
1788 | const char *file = "./test_sk_lookup_kern.o"; | 1798 | const char *file = "./test_sk_lookup_kern.o"; |
@@ -1809,9 +1819,9 @@ static void test_reference_tracking() | |||
1809 | 1819 | ||
1810 | /* Expect verifier failure if test name has 'fail' */ | 1820 | /* Expect verifier failure if test name has 'fail' */ |
1811 | if (strstr(title, "fail") != NULL) { | 1821 | if (strstr(title, "fail") != NULL) { |
1812 | libbpf_set_print(NULL, NULL, NULL); | 1822 | libbpf_set_print(NULL); |
1813 | err = !bpf_program__load(prog, "GPL", 0); | 1823 | err = !bpf_program__load(prog, "GPL", 0); |
1814 | libbpf_set_print(printf, printf, NULL); | 1824 | libbpf_set_print(libbpf_debug_print); |
1815 | } else { | 1825 | } else { |
1816 | err = bpf_program__load(prog, "GPL", 0); | 1826 | err = bpf_program__load(prog, "GPL", 0); |
1817 | } | 1827 | } |
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c index b0195770da6a..c6c69220a569 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c +++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c | |||
@@ -100,6 +100,7 @@ | |||
100 | .errstr = "invalid bpf_context access", | 100 | .errstr = "invalid bpf_context access", |
101 | .result = REJECT, | 101 | .result = REJECT, |
102 | .prog_type = BPF_PROG_TYPE_SK_MSG, | 102 | .prog_type = BPF_PROG_TYPE_SK_MSG, |
103 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
103 | }, | 104 | }, |
104 | { | 105 | { |
105 | "invalid read past end of SK_MSG", | 106 | "invalid read past end of SK_MSG", |
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c index 881f1c7f57a1..c660deb582f1 100644 --- a/tools/testing/selftests/bpf/verifier/ctx_skb.c +++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c | |||
@@ -687,6 +687,7 @@ | |||
687 | }, | 687 | }, |
688 | .errstr = "invalid bpf_context access", | 688 | .errstr = "invalid bpf_context access", |
689 | .result = REJECT, | 689 | .result = REJECT, |
690 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
690 | }, | 691 | }, |
691 | { | 692 | { |
692 | "check skb->hash half load not permitted, unaligned 3", | 693 | "check skb->hash half load not permitted, unaligned 3", |
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c index ceb39ffa0e88..f0961c58581e 100644 --- a/tools/testing/selftests/bpf/verifier/jmp32.c +++ b/tools/testing/selftests/bpf/verifier/jmp32.c | |||
@@ -27,6 +27,7 @@ | |||
27 | .data64 = { 1ULL << 63 | 1, } | 27 | .data64 = { 1ULL << 63 | 1, } |
28 | }, | 28 | }, |
29 | }, | 29 | }, |
30 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
30 | }, | 31 | }, |
31 | { | 32 | { |
32 | "jset32: BPF_X", | 33 | "jset32: BPF_X", |
@@ -58,6 +59,7 @@ | |||
58 | .data64 = { 1ULL << 63 | 1, } | 59 | .data64 = { 1ULL << 63 | 1, } |
59 | }, | 60 | }, |
60 | }, | 61 | }, |
62 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
61 | }, | 63 | }, |
62 | { | 64 | { |
63 | "jset32: min/max deduction", | 65 | "jset32: min/max deduction", |
@@ -93,6 +95,7 @@ | |||
93 | .data64 = { -1, } | 95 | .data64 = { -1, } |
94 | }, | 96 | }, |
95 | }, | 97 | }, |
98 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
96 | }, | 99 | }, |
97 | { | 100 | { |
98 | "jeq32: BPF_X", | 101 | "jeq32: BPF_X", |
@@ -119,6 +122,7 @@ | |||
119 | .data64 = { 1ULL << 63 | 1, } | 122 | .data64 = { 1ULL << 63 | 1, } |
120 | }, | 123 | }, |
121 | }, | 124 | }, |
125 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
122 | }, | 126 | }, |
123 | { | 127 | { |
124 | "jeq32: min/max deduction", | 128 | "jeq32: min/max deduction", |
@@ -154,6 +158,7 @@ | |||
154 | .data64 = { -1, } | 158 | .data64 = { -1, } |
155 | }, | 159 | }, |
156 | }, | 160 | }, |
161 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
157 | }, | 162 | }, |
158 | { | 163 | { |
159 | "jne32: BPF_X", | 164 | "jne32: BPF_X", |
@@ -180,6 +185,7 @@ | |||
180 | .data64 = { 1ULL << 63 | 2, } | 185 | .data64 = { 1ULL << 63 | 2, } |
181 | }, | 186 | }, |
182 | }, | 187 | }, |
188 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
183 | }, | 189 | }, |
184 | { | 190 | { |
185 | "jne32: min/max deduction", | 191 | "jne32: min/max deduction", |
@@ -218,6 +224,7 @@ | |||
218 | .data64 = { 0, } | 224 | .data64 = { 0, } |
219 | }, | 225 | }, |
220 | }, | 226 | }, |
227 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
221 | }, | 228 | }, |
222 | { | 229 | { |
223 | "jge32: BPF_X", | 230 | "jge32: BPF_X", |
@@ -244,6 +251,7 @@ | |||
244 | .data64 = { (UINT_MAX - 1) | 2ULL << 32, } | 251 | .data64 = { (UINT_MAX - 1) | 2ULL << 32, } |
245 | }, | 252 | }, |
246 | }, | 253 | }, |
254 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
247 | }, | 255 | }, |
248 | { | 256 | { |
249 | "jge32: min/max deduction", | 257 | "jge32: min/max deduction", |
@@ -284,6 +292,7 @@ | |||
284 | .data64 = { 0, } | 292 | .data64 = { 0, } |
285 | }, | 293 | }, |
286 | }, | 294 | }, |
295 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
287 | }, | 296 | }, |
288 | { | 297 | { |
289 | "jgt32: BPF_X", | 298 | "jgt32: BPF_X", |
@@ -310,6 +319,7 @@ | |||
310 | .data64 = { (UINT_MAX - 1) | 2ULL << 32, } | 319 | .data64 = { (UINT_MAX - 1) | 2ULL << 32, } |
311 | }, | 320 | }, |
312 | }, | 321 | }, |
322 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
313 | }, | 323 | }, |
314 | { | 324 | { |
315 | "jgt32: min/max deduction", | 325 | "jgt32: min/max deduction", |
@@ -350,6 +360,7 @@ | |||
350 | .data64 = { INT_MAX, } | 360 | .data64 = { INT_MAX, } |
351 | }, | 361 | }, |
352 | }, | 362 | }, |
363 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
353 | }, | 364 | }, |
354 | { | 365 | { |
355 | "jle32: BPF_X", | 366 | "jle32: BPF_X", |
@@ -376,6 +387,7 @@ | |||
376 | .data64 = { UINT_MAX, } | 387 | .data64 = { UINT_MAX, } |
377 | }, | 388 | }, |
378 | }, | 389 | }, |
390 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
379 | }, | 391 | }, |
380 | { | 392 | { |
381 | "jle32: min/max deduction", | 393 | "jle32: min/max deduction", |
@@ -416,6 +428,7 @@ | |||
416 | .data64 = { INT_MAX - 1, } | 428 | .data64 = { INT_MAX - 1, } |
417 | }, | 429 | }, |
418 | }, | 430 | }, |
431 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
419 | }, | 432 | }, |
420 | { | 433 | { |
421 | "jlt32: BPF_X", | 434 | "jlt32: BPF_X", |
@@ -442,6 +455,7 @@ | |||
442 | .data64 = { (INT_MAX - 1) | 3ULL << 32, } | 455 | .data64 = { (INT_MAX - 1) | 3ULL << 32, } |
443 | }, | 456 | }, |
444 | }, | 457 | }, |
458 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
445 | }, | 459 | }, |
446 | { | 460 | { |
447 | "jlt32: min/max deduction", | 461 | "jlt32: min/max deduction", |
@@ -482,6 +496,7 @@ | |||
482 | .data64 = { -2, } | 496 | .data64 = { -2, } |
483 | }, | 497 | }, |
484 | }, | 498 | }, |
499 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
485 | }, | 500 | }, |
486 | { | 501 | { |
487 | "jsge32: BPF_X", | 502 | "jsge32: BPF_X", |
@@ -508,6 +523,7 @@ | |||
508 | .data64 = { -2, } | 523 | .data64 = { -2, } |
509 | }, | 524 | }, |
510 | }, | 525 | }, |
526 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
511 | }, | 527 | }, |
512 | { | 528 | { |
513 | "jsge32: min/max deduction", | 529 | "jsge32: min/max deduction", |
@@ -548,6 +564,7 @@ | |||
548 | .data64 = { 1, } | 564 | .data64 = { 1, } |
549 | }, | 565 | }, |
550 | }, | 566 | }, |
567 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
551 | }, | 568 | }, |
552 | { | 569 | { |
553 | "jsgt32: BPF_X", | 570 | "jsgt32: BPF_X", |
@@ -574,6 +591,7 @@ | |||
574 | .data64 = { 0x7fffffff, } | 591 | .data64 = { 0x7fffffff, } |
575 | }, | 592 | }, |
576 | }, | 593 | }, |
594 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
577 | }, | 595 | }, |
578 | { | 596 | { |
579 | "jsgt32: min/max deduction", | 597 | "jsgt32: min/max deduction", |
@@ -614,6 +632,7 @@ | |||
614 | .data64 = { 1, } | 632 | .data64 = { 1, } |
615 | }, | 633 | }, |
616 | }, | 634 | }, |
635 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
617 | }, | 636 | }, |
618 | { | 637 | { |
619 | "jsle32: BPF_X", | 638 | "jsle32: BPF_X", |
@@ -640,6 +659,7 @@ | |||
640 | .data64 = { 0x7fffffff | 2ULL << 32, } | 659 | .data64 = { 0x7fffffff | 2ULL << 32, } |
641 | }, | 660 | }, |
642 | }, | 661 | }, |
662 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
643 | }, | 663 | }, |
644 | { | 664 | { |
645 | "jsle32: min/max deduction", | 665 | "jsle32: min/max deduction", |
@@ -680,6 +700,7 @@ | |||
680 | .data64 = { 1, } | 700 | .data64 = { 1, } |
681 | }, | 701 | }, |
682 | }, | 702 | }, |
703 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
683 | }, | 704 | }, |
684 | { | 705 | { |
685 | "jslt32: BPF_X", | 706 | "jslt32: BPF_X", |
@@ -706,6 +727,7 @@ | |||
706 | .data64 = { 0x7fffffff | 2ULL << 32, } | 727 | .data64 = { 0x7fffffff | 2ULL << 32, } |
707 | }, | 728 | }, |
708 | }, | 729 | }, |
730 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
709 | }, | 731 | }, |
710 | { | 732 | { |
711 | "jslt32: min/max deduction", | 733 | "jslt32: min/max deduction", |
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c index 7e14037acfaf..8dcd4e0383d5 100644 --- a/tools/testing/selftests/bpf/verifier/jset.c +++ b/tools/testing/selftests/bpf/verifier/jset.c | |||
@@ -53,6 +53,7 @@ | |||
53 | .data64 = { ~0ULL, } | 53 | .data64 = { ~0ULL, } |
54 | }, | 54 | }, |
55 | }, | 55 | }, |
56 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
56 | }, | 57 | }, |
57 | { | 58 | { |
58 | "jset: sign-extend", | 59 | "jset: sign-extend", |
@@ -70,6 +71,7 @@ | |||
70 | .result = ACCEPT, | 71 | .result = ACCEPT, |
71 | .retval = 2, | 72 | .retval = 2, |
72 | .data = { 1, 0, 0, 0, 0, 0, 0, 1, }, | 73 | .data = { 1, 0, 0, 0, 0, 0, 0, 1, }, |
74 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
73 | }, | 75 | }, |
74 | { | 76 | { |
75 | "jset: known const compare", | 77 | "jset: known const compare", |
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c index d58db72fdfe8..45d43bf82f26 100644 --- a/tools/testing/selftests/bpf/verifier/spill_fill.c +++ b/tools/testing/selftests/bpf/verifier/spill_fill.c | |||
@@ -46,6 +46,7 @@ | |||
46 | .errstr_unpriv = "attempt to corrupt spilled", | 46 | .errstr_unpriv = "attempt to corrupt spilled", |
47 | .errstr = "R0 invalid mem access 'inv", | 47 | .errstr = "R0 invalid mem access 'inv", |
48 | .result = REJECT, | 48 | .result = REJECT, |
49 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
49 | }, | 50 | }, |
50 | { | 51 | { |
51 | "check corrupted spill/fill, LSB", | 52 | "check corrupted spill/fill, LSB", |
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c index d829eef372a4..781621facae4 100644 --- a/tools/testing/selftests/bpf/verifier/spin_lock.c +++ b/tools/testing/selftests/bpf/verifier/spin_lock.c | |||
@@ -83,6 +83,7 @@ | |||
83 | .result_unpriv = REJECT, | 83 | .result_unpriv = REJECT, |
84 | .errstr_unpriv = "", | 84 | .errstr_unpriv = "", |
85 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, | 85 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, |
86 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
86 | }, | 87 | }, |
87 | { | 88 | { |
88 | "spin_lock: test4 direct ld/st", | 89 | "spin_lock: test4 direct ld/st", |
@@ -112,6 +113,7 @@ | |||
112 | .result_unpriv = REJECT, | 113 | .result_unpriv = REJECT, |
113 | .errstr_unpriv = "", | 114 | .errstr_unpriv = "", |
114 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, | 115 | .prog_type = BPF_PROG_TYPE_CGROUP_SKB, |
116 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
115 | }, | 117 | }, |
116 | { | 118 | { |
117 | "spin_lock: test5 call within a locked region", | 119 | "spin_lock: test5 call within a locked region", |
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c index 9ab5ace83e02..4b721a77bebb 100644 --- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c +++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c | |||
@@ -512,6 +512,7 @@ | |||
512 | .fixup_map_array_48b = { 3 }, | 512 | .fixup_map_array_48b = { 3 }, |
513 | .result = ACCEPT, | 513 | .result = ACCEPT, |
514 | .retval = 0xabcdef12, | 514 | .retval = 0xabcdef12, |
515 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
515 | }, | 516 | }, |
516 | { | 517 | { |
517 | "map access: unknown scalar += value_ptr, 3", | 518 | "map access: unknown scalar += value_ptr, 3", |
@@ -537,6 +538,7 @@ | |||
537 | .result_unpriv = REJECT, | 538 | .result_unpriv = REJECT, |
538 | .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", | 539 | .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", |
539 | .retval = 0xabcdef12, | 540 | .retval = 0xabcdef12, |
541 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
540 | }, | 542 | }, |
541 | { | 543 | { |
542 | "map access: unknown scalar += value_ptr, 4", | 544 | "map access: unknown scalar += value_ptr, 4", |
@@ -559,6 +561,7 @@ | |||
559 | .result = REJECT, | 561 | .result = REJECT, |
560 | .errstr = "R1 max value is outside of the array range", | 562 | .errstr = "R1 max value is outside of the array range", |
561 | .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", | 563 | .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", |
564 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
562 | }, | 565 | }, |
563 | { | 566 | { |
564 | "map access: value_ptr += unknown scalar, 1", | 567 | "map access: value_ptr += unknown scalar, 1", |
@@ -598,6 +601,7 @@ | |||
598 | .fixup_map_array_48b = { 3 }, | 601 | .fixup_map_array_48b = { 3 }, |
599 | .result = ACCEPT, | 602 | .result = ACCEPT, |
600 | .retval = 0xabcdef12, | 603 | .retval = 0xabcdef12, |
604 | .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, | ||
601 | }, | 605 | }, |
602 | { | 606 | { |
603 | "map access: value_ptr += unknown scalar, 3", | 607 | "map access: value_ptr += unknown scalar, 3", |