aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-02-06 19:56:20 -0500
committerDavid S. Miller <davem@davemloft.net>2019-02-06 19:56:20 -0500
commite90b1fd83c94d536375d8b9f4916afd15f4db0ed (patch)
treeba50688cc9a6712575aa861ff37b1db53dc472b8 /tools
parent907bea9cb8e9b7c4cb6a8042c164f3c24f141006 (diff)
parentdd9cef43c222df7c0d76d34451808e789952379d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2019-02-07 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Add a riscv64 JIT for BPF, from Björn. 2) Implement BTF deduplication algorithm for libbpf which takes BTF type information containing duplicate per-compilation unit information and reduces it to an equivalent set of BTF types with no duplication and without loss of information, from Andrii. 3) Offloaded and native BPF XDP programs can coexist today, enable also offloaded and generic ones as well, from Jakub. 4) Expose various BTF related helper functions in libbpf as API which are in particular helpful for JITed programs, from Yonghong. 5) Fix the recently added JMP32 code emission in s390x JIT, from Heiko. 6) Fix BPF kselftests' tcp_{server,client}.py to be able to run inside a network namespace, also add a fix for libbpf to get libbpf_print() working, from Stanislav. 7) Fixes for bpftool documentation, from Prashant. 8) Type cleanup in BPF kselftests' test_maps.c to silence a gcc8 warning, from Breno. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-cgroup.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-feature.rst4
-rw-r--r--tools/bpf/bpftool/Documentation/bpftool-prog.rst2
-rw-r--r--tools/lib/bpf/btf.c2032
-rw-r--r--tools/lib/bpf/btf.h43
-rw-r--r--tools/lib/bpf/libbpf.c125
-rw-r--r--tools/lib/bpf/libbpf.h19
-rw-r--r--tools/lib/bpf/libbpf.map10
-rw-r--r--tools/lib/bpf/libbpf_util.h30
-rw-r--r--tools/lib/bpf/test_libbpf.cpp4
-rw-r--r--tools/perf/util/bpf-loader.c26
-rwxr-xr-xtools/testing/selftests/bpf/tcp_client.py3
-rwxr-xr-xtools/testing/selftests/bpf/tcp_server.py5
-rw-r--r--tools/testing/selftests/bpf/test_btf.c553
-rw-r--r--tools/testing/selftests/bpf/test_libbpf_open.c30
-rw-r--r--tools/testing/selftests/bpf/test_maps.c27
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py135
-rw-r--r--tools/testing/selftests/bpf/test_progs.c14
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_sk_msg.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/ctx_skb.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/jmp32.c22
-rw-r--r--tools/testing/selftests/bpf/verifier/jset.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/spill_fill.c1
-rw-r--r--tools/testing/selftests/bpf/verifier/spin_lock.c2
-rw-r--r--tools/testing/selftests/bpf/verifier/value_ptr_arith.c4
25 files changed, 2722 insertions, 377 deletions
diff --git a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
index d43fce568ef7..9bb9ace54ba8 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-cgroup.rst
@@ -17,8 +17,8 @@ SYNOPSIS
17 *COMMANDS* := 17 *COMMANDS* :=
18 { **show** | **list** | **tree** | **attach** | **detach** | **help** } 18 { **show** | **list** | **tree** | **attach** | **detach** | **help** }
19 19
20MAP COMMANDS 20CGROUP COMMANDS
21============= 21===============
22 22
23| **bpftool** **cgroup { show | list }** *CGROUP* 23| **bpftool** **cgroup { show | list }** *CGROUP*
24| **bpftool** **cgroup tree** [*CGROUP_ROOT*] 24| **bpftool** **cgroup tree** [*CGROUP_ROOT*]
diff --git a/tools/bpf/bpftool/Documentation/bpftool-feature.rst b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
index 8d489a26e3c9..82de03dd8f52 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-feature.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-feature.rst
@@ -16,8 +16,8 @@ SYNOPSIS
16 16
17 *COMMANDS* := { **probe** | **help** } 17 *COMMANDS* := { **probe** | **help** }
18 18
19MAP COMMANDS 19FEATURE COMMANDS
20============= 20================
21 21
22| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]] 22| **bpftool** **feature probe** [*COMPONENT*] [**macros** [**prefix** *PREFIX*]]
23| **bpftool** **feature help** 23| **bpftool** **feature help**
diff --git a/tools/bpf/bpftool/Documentation/bpftool-prog.rst b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
index 13b56102f528..7e59495cb028 100644
--- a/tools/bpf/bpftool/Documentation/bpftool-prog.rst
+++ b/tools/bpf/bpftool/Documentation/bpftool-prog.rst
@@ -18,7 +18,7 @@ SYNOPSIS
18 { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load** 18 { **show** | **list** | **dump xlated** | **dump jited** | **pin** | **load**
19 | **loadall** | **help** } 19 | **loadall** | **help** }
20 20
21MAP COMMANDS 21PROG COMMANDS
22============= 22=============
23 23
24| **bpftool** **prog { show | list }** [*PROG*] 24| **bpftool** **prog { show | list }** [*PROG*]
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index d682d3b8f7b9..ab6528c935a1 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -1,6 +1,7 @@
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2/* Copyright (c) 2018 Facebook */ 2/* Copyright (c) 2018 Facebook */
3 3
4#include <stdio.h>
4#include <stdlib.h> 5#include <stdlib.h>
5#include <string.h> 6#include <string.h>
6#include <unistd.h> 7#include <unistd.h>
@@ -9,8 +10,9 @@
9#include <linux/btf.h> 10#include <linux/btf.h>
10#include "btf.h" 11#include "btf.h"
11#include "bpf.h" 12#include "bpf.h"
13#include "libbpf.h"
14#include "libbpf_util.h"
12 15
13#define elog(fmt, ...) { if (err_log) err_log(fmt, ##__VA_ARGS__); }
14#define max(a, b) ((a) > (b) ? (a) : (b)) 16#define max(a, b) ((a) > (b) ? (a) : (b))
15#define min(a, b) ((a) < (b) ? (a) : (b)) 17#define min(a, b) ((a) < (b) ? (a) : (b))
16 18
@@ -107,54 +109,54 @@ static int btf_add_type(struct btf *btf, struct btf_type *t)
107 return 0; 109 return 0;
108} 110}
109 111
110static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log) 112static int btf_parse_hdr(struct btf *btf)
111{ 113{
112 const struct btf_header *hdr = btf->hdr; 114 const struct btf_header *hdr = btf->hdr;
113 __u32 meta_left; 115 __u32 meta_left;
114 116
115 if (btf->data_size < sizeof(struct btf_header)) { 117 if (btf->data_size < sizeof(struct btf_header)) {
116 elog("BTF header not found\n"); 118 pr_debug("BTF header not found\n");
117 return -EINVAL; 119 return -EINVAL;
118 } 120 }
119 121
120 if (hdr->magic != BTF_MAGIC) { 122 if (hdr->magic != BTF_MAGIC) {
121 elog("Invalid BTF magic:%x\n", hdr->magic); 123 pr_debug("Invalid BTF magic:%x\n", hdr->magic);
122 return -EINVAL; 124 return -EINVAL;
123 } 125 }
124 126
125 if (hdr->version != BTF_VERSION) { 127 if (hdr->version != BTF_VERSION) {
126 elog("Unsupported BTF version:%u\n", hdr->version); 128 pr_debug("Unsupported BTF version:%u\n", hdr->version);
127 return -ENOTSUP; 129 return -ENOTSUP;
128 } 130 }
129 131
130 if (hdr->flags) { 132 if (hdr->flags) {
131 elog("Unsupported BTF flags:%x\n", hdr->flags); 133 pr_debug("Unsupported BTF flags:%x\n", hdr->flags);
132 return -ENOTSUP; 134 return -ENOTSUP;
133 } 135 }
134 136
135 meta_left = btf->data_size - sizeof(*hdr); 137 meta_left = btf->data_size - sizeof(*hdr);
136 if (!meta_left) { 138 if (!meta_left) {
137 elog("BTF has no data\n"); 139 pr_debug("BTF has no data\n");
138 return -EINVAL; 140 return -EINVAL;
139 } 141 }
140 142
141 if (meta_left < hdr->type_off) { 143 if (meta_left < hdr->type_off) {
142 elog("Invalid BTF type section offset:%u\n", hdr->type_off); 144 pr_debug("Invalid BTF type section offset:%u\n", hdr->type_off);
143 return -EINVAL; 145 return -EINVAL;
144 } 146 }
145 147
146 if (meta_left < hdr->str_off) { 148 if (meta_left < hdr->str_off) {
147 elog("Invalid BTF string section offset:%u\n", hdr->str_off); 149 pr_debug("Invalid BTF string section offset:%u\n", hdr->str_off);
148 return -EINVAL; 150 return -EINVAL;
149 } 151 }
150 152
151 if (hdr->type_off >= hdr->str_off) { 153 if (hdr->type_off >= hdr->str_off) {
152 elog("BTF type section offset >= string section offset. No type?\n"); 154 pr_debug("BTF type section offset >= string section offset. No type?\n");
153 return -EINVAL; 155 return -EINVAL;
154 } 156 }
155 157
156 if (hdr->type_off & 0x02) { 158 if (hdr->type_off & 0x02) {
157 elog("BTF type section is not aligned to 4 bytes\n"); 159 pr_debug("BTF type section is not aligned to 4 bytes\n");
158 return -EINVAL; 160 return -EINVAL;
159 } 161 }
160 162
@@ -163,7 +165,7 @@ static int btf_parse_hdr(struct btf *btf, btf_print_fn_t err_log)
163 return 0; 165 return 0;
164} 166}
165 167
166static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log) 168static int btf_parse_str_sec(struct btf *btf)
167{ 169{
168 const struct btf_header *hdr = btf->hdr; 170 const struct btf_header *hdr = btf->hdr;
169 const char *start = btf->nohdr_data + hdr->str_off; 171 const char *start = btf->nohdr_data + hdr->str_off;
@@ -171,7 +173,7 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
171 173
172 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET || 174 if (!hdr->str_len || hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
173 start[0] || end[-1]) { 175 start[0] || end[-1]) {
174 elog("Invalid BTF string section\n"); 176 pr_debug("Invalid BTF string section\n");
175 return -EINVAL; 177 return -EINVAL;
176 } 178 }
177 179
@@ -180,7 +182,38 @@ static int btf_parse_str_sec(struct btf *btf, btf_print_fn_t err_log)
180 return 0; 182 return 0;
181} 183}
182 184
183static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log) 185static int btf_type_size(struct btf_type *t)
186{
187 int base_size = sizeof(struct btf_type);
188 __u16 vlen = BTF_INFO_VLEN(t->info);
189
190 switch (BTF_INFO_KIND(t->info)) {
191 case BTF_KIND_FWD:
192 case BTF_KIND_CONST:
193 case BTF_KIND_VOLATILE:
194 case BTF_KIND_RESTRICT:
195 case BTF_KIND_PTR:
196 case BTF_KIND_TYPEDEF:
197 case BTF_KIND_FUNC:
198 return base_size;
199 case BTF_KIND_INT:
200 return base_size + sizeof(__u32);
201 case BTF_KIND_ENUM:
202 return base_size + vlen * sizeof(struct btf_enum);
203 case BTF_KIND_ARRAY:
204 return base_size + sizeof(struct btf_array);
205 case BTF_KIND_STRUCT:
206 case BTF_KIND_UNION:
207 return base_size + vlen * sizeof(struct btf_member);
208 case BTF_KIND_FUNC_PROTO:
209 return base_size + vlen * sizeof(struct btf_param);
210 default:
211 pr_debug("Unsupported BTF_KIND:%u\n", BTF_INFO_KIND(t->info));
212 return -EINVAL;
213 }
214}
215
216static int btf_parse_type_sec(struct btf *btf)
184{ 217{
185 struct btf_header *hdr = btf->hdr; 218 struct btf_header *hdr = btf->hdr;
186 void *nohdr_data = btf->nohdr_data; 219 void *nohdr_data = btf->nohdr_data;
@@ -189,41 +222,13 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
189 222
190 while (next_type < end_type) { 223 while (next_type < end_type) {
191 struct btf_type *t = next_type; 224 struct btf_type *t = next_type;
192 __u16 vlen = BTF_INFO_VLEN(t->info); 225 int type_size;
193 int err; 226 int err;
194 227
195 next_type += sizeof(*t); 228 type_size = btf_type_size(t);
196 switch (BTF_INFO_KIND(t->info)) { 229 if (type_size < 0)
197 case BTF_KIND_INT: 230 return type_size;
198 next_type += sizeof(int); 231 next_type += type_size;
199 break;
200 case BTF_KIND_ARRAY:
201 next_type += sizeof(struct btf_array);
202 break;
203 case BTF_KIND_STRUCT:
204 case BTF_KIND_UNION:
205 next_type += vlen * sizeof(struct btf_member);
206 break;
207 case BTF_KIND_ENUM:
208 next_type += vlen * sizeof(struct btf_enum);
209 break;
210 case BTF_KIND_FUNC_PROTO:
211 next_type += vlen * sizeof(struct btf_param);
212 break;
213 case BTF_KIND_FUNC:
214 case BTF_KIND_TYPEDEF:
215 case BTF_KIND_PTR:
216 case BTF_KIND_FWD:
217 case BTF_KIND_VOLATILE:
218 case BTF_KIND_CONST:
219 case BTF_KIND_RESTRICT:
220 break;
221 default:
222 elog("Unsupported BTF_KIND:%u\n",
223 BTF_INFO_KIND(t->info));
224 return -EINVAL;
225 }
226
227 err = btf_add_type(btf, t); 232 err = btf_add_type(btf, t);
228 if (err) 233 if (err)
229 return err; 234 return err;
@@ -232,6 +237,11 @@ static int btf_parse_type_sec(struct btf *btf, btf_print_fn_t err_log)
232 return 0; 237 return 0;
233} 238}
234 239
240__u32 btf__get_nr_types(const struct btf *btf)
241{
242 return btf->nr_types;
243}
244
235const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id) 245const struct btf_type *btf__type_by_id(const struct btf *btf, __u32 type_id)
236{ 246{
237 if (type_id > btf->nr_types) 247 if (type_id > btf->nr_types)
@@ -250,21 +260,6 @@ static bool btf_type_is_void_or_null(const struct btf_type *t)
250 return !t || btf_type_is_void(t); 260 return !t || btf_type_is_void(t);
251} 261}
252 262
253static __s64 btf_type_size(const struct btf_type *t)
254{
255 switch (BTF_INFO_KIND(t->info)) {
256 case BTF_KIND_INT:
257 case BTF_KIND_STRUCT:
258 case BTF_KIND_UNION:
259 case BTF_KIND_ENUM:
260 return t->size;
261 case BTF_KIND_PTR:
262 return sizeof(void *);
263 default:
264 return -EINVAL;
265 }
266}
267
268#define MAX_RESOLVE_DEPTH 32 263#define MAX_RESOLVE_DEPTH 32
269 264
270__s64 btf__resolve_size(const struct btf *btf, __u32 type_id) 265__s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
@@ -278,11 +273,16 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
278 t = btf__type_by_id(btf, type_id); 273 t = btf__type_by_id(btf, type_id);
279 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t); 274 for (i = 0; i < MAX_RESOLVE_DEPTH && !btf_type_is_void_or_null(t);
280 i++) { 275 i++) {
281 size = btf_type_size(t);
282 if (size >= 0)
283 break;
284
285 switch (BTF_INFO_KIND(t->info)) { 276 switch (BTF_INFO_KIND(t->info)) {
277 case BTF_KIND_INT:
278 case BTF_KIND_STRUCT:
279 case BTF_KIND_UNION:
280 case BTF_KIND_ENUM:
281 size = t->size;
282 goto done;
283 case BTF_KIND_PTR:
284 size = sizeof(void *);
285 goto done;
286 case BTF_KIND_TYPEDEF: 286 case BTF_KIND_TYPEDEF:
287 case BTF_KIND_VOLATILE: 287 case BTF_KIND_VOLATILE:
288 case BTF_KIND_CONST: 288 case BTF_KIND_CONST:
@@ -306,6 +306,7 @@ __s64 btf__resolve_size(const struct btf *btf, __u32 type_id)
306 if (size < 0) 306 if (size < 0)
307 return -EINVAL; 307 return -EINVAL;
308 308
309done:
309 if (nelems && size > UINT32_MAX / nelems) 310 if (nelems && size > UINT32_MAX / nelems)
310 return -E2BIG; 311 return -E2BIG;
311 312
@@ -363,7 +364,7 @@ void btf__free(struct btf *btf)
363 free(btf); 364 free(btf);
364} 365}
365 366
366struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log) 367struct btf *btf__new(__u8 *data, __u32 size)
367{ 368{
368 __u32 log_buf_size = 0; 369 __u32 log_buf_size = 0;
369 char *log_buf = NULL; 370 char *log_buf = NULL;
@@ -376,16 +377,15 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
376 377
377 btf->fd = -1; 378 btf->fd = -1;
378 379
379 if (err_log) { 380 log_buf = malloc(BPF_LOG_BUF_SIZE);
380 log_buf = malloc(BPF_LOG_BUF_SIZE); 381 if (!log_buf) {
381 if (!log_buf) { 382 err = -ENOMEM;
382 err = -ENOMEM; 383 goto done;
383 goto done;
384 }
385 *log_buf = 0;
386 log_buf_size = BPF_LOG_BUF_SIZE;
387 } 384 }
388 385
386 *log_buf = 0;
387 log_buf_size = BPF_LOG_BUF_SIZE;
388
389 btf->data = malloc(size); 389 btf->data = malloc(size);
390 if (!btf->data) { 390 if (!btf->data) {
391 err = -ENOMEM; 391 err = -ENOMEM;
@@ -400,21 +400,21 @@ struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
400 400
401 if (btf->fd == -1) { 401 if (btf->fd == -1) {
402 err = -errno; 402 err = -errno;
403 elog("Error loading BTF: %s(%d)\n", strerror(errno), errno); 403 pr_warning("Error loading BTF: %s(%d)\n", strerror(errno), errno);
404 if (log_buf && *log_buf) 404 if (log_buf && *log_buf)
405 elog("%s\n", log_buf); 405 pr_warning("%s\n", log_buf);
406 goto done; 406 goto done;
407 } 407 }
408 408
409 err = btf_parse_hdr(btf, err_log); 409 err = btf_parse_hdr(btf);
410 if (err) 410 if (err)
411 goto done; 411 goto done;
412 412
413 err = btf_parse_str_sec(btf, err_log); 413 err = btf_parse_str_sec(btf);
414 if (err) 414 if (err)
415 goto done; 415 goto done;
416 416
417 err = btf_parse_type_sec(btf, err_log); 417 err = btf_parse_type_sec(btf);
418 418
419done: 419done:
420 free(log_buf); 420 free(log_buf);
@@ -432,6 +432,13 @@ int btf__fd(const struct btf *btf)
432 return btf->fd; 432 return btf->fd;
433} 433}
434 434
435void btf__get_strings(const struct btf *btf, const char **strings,
436 __u32 *str_len)
437{
438 *strings = btf->strings;
439 *str_len = btf->hdr->str_len;
440}
441
435const char *btf__name_by_offset(const struct btf *btf, __u32 offset) 442const char *btf__name_by_offset(const struct btf *btf, __u32 offset)
436{ 443{
437 if (offset < btf->hdr->str_len) 444 if (offset < btf->hdr->str_len)
@@ -491,7 +498,7 @@ int btf__get_from_id(__u32 id, struct btf **btf)
491 goto exit_free; 498 goto exit_free;
492 } 499 }
493 500
494 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size, NULL); 501 *btf = btf__new((__u8 *)(long)btf_info.btf, btf_info.btf_size);
495 if (IS_ERR(*btf)) { 502 if (IS_ERR(*btf)) {
496 err = PTR_ERR(*btf); 503 err = PTR_ERR(*btf);
497 *btf = NULL; 504 *btf = NULL;
@@ -504,6 +511,78 @@ exit_free:
504 return err; 511 return err;
505} 512}
506 513
514int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
515 __u32 expected_key_size, __u32 expected_value_size,
516 __u32 *key_type_id, __u32 *value_type_id)
517{
518 const struct btf_type *container_type;
519 const struct btf_member *key, *value;
520 const size_t max_name = 256;
521 char container_name[max_name];
522 __s64 key_size, value_size;
523 __s32 container_id;
524
525 if (snprintf(container_name, max_name, "____btf_map_%s", map_name) ==
526 max_name) {
527 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
528 map_name, map_name);
529 return -EINVAL;
530 }
531
532 container_id = btf__find_by_name(btf, container_name);
533 if (container_id < 0) {
534 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
535 map_name, container_name);
536 return container_id;
537 }
538
539 container_type = btf__type_by_id(btf, container_id);
540 if (!container_type) {
541 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
542 map_name, container_id);
543 return -EINVAL;
544 }
545
546 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
547 BTF_INFO_VLEN(container_type->info) < 2) {
548 pr_warning("map:%s container_name:%s is an invalid container struct\n",
549 map_name, container_name);
550 return -EINVAL;
551 }
552
553 key = (struct btf_member *)(container_type + 1);
554 value = key + 1;
555
556 key_size = btf__resolve_size(btf, key->type);
557 if (key_size < 0) {
558 pr_warning("map:%s invalid BTF key_type_size\n", map_name);
559 return key_size;
560 }
561
562 if (expected_key_size != key_size) {
563 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
564 map_name, (__u32)key_size, expected_key_size);
565 return -EINVAL;
566 }
567
568 value_size = btf__resolve_size(btf, value->type);
569 if (value_size < 0) {
570 pr_warning("map:%s invalid BTF value_type_size\n", map_name);
571 return value_size;
572 }
573
574 if (expected_value_size != value_size) {
575 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
576 map_name, (__u32)value_size, expected_value_size);
577 return -EINVAL;
578 }
579
580 *key_type_id = key->type;
581 *value_type_id = value->type;
582
583 return 0;
584}
585
507struct btf_ext_sec_copy_param { 586struct btf_ext_sec_copy_param {
508 __u32 off; 587 __u32 off;
509 __u32 len; 588 __u32 len;
@@ -514,8 +593,7 @@ struct btf_ext_sec_copy_param {
514 593
515static int btf_ext_copy_info(struct btf_ext *btf_ext, 594static int btf_ext_copy_info(struct btf_ext *btf_ext,
516 __u8 *data, __u32 data_size, 595 __u8 *data, __u32 data_size,
517 struct btf_ext_sec_copy_param *ext_sec, 596 struct btf_ext_sec_copy_param *ext_sec)
518 btf_print_fn_t err_log)
519{ 597{
520 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 598 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
521 const struct btf_ext_info_sec *sinfo; 599 const struct btf_ext_info_sec *sinfo;
@@ -529,14 +607,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
529 data_size -= hdr->hdr_len; 607 data_size -= hdr->hdr_len;
530 608
531 if (ext_sec->off & 0x03) { 609 if (ext_sec->off & 0x03) {
532 elog(".BTF.ext %s section is not aligned to 4 bytes\n", 610 pr_debug(".BTF.ext %s section is not aligned to 4 bytes\n",
533 ext_sec->desc); 611 ext_sec->desc);
534 return -EINVAL; 612 return -EINVAL;
535 } 613 }
536 614
537 if (data_size < ext_sec->off || 615 if (data_size < ext_sec->off ||
538 ext_sec->len > data_size - ext_sec->off) { 616 ext_sec->len > data_size - ext_sec->off) {
539 elog("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n", 617 pr_debug("%s section (off:%u len:%u) is beyond the end of the ELF section .BTF.ext\n",
540 ext_sec->desc, ext_sec->off, ext_sec->len); 618 ext_sec->desc, ext_sec->off, ext_sec->len);
541 return -EINVAL; 619 return -EINVAL;
542 } 620 }
@@ -546,7 +624,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
546 624
547 /* At least a record size */ 625 /* At least a record size */
548 if (info_left < sizeof(__u32)) { 626 if (info_left < sizeof(__u32)) {
549 elog(".BTF.ext %s record size not found\n", ext_sec->desc); 627 pr_debug(".BTF.ext %s record size not found\n", ext_sec->desc);
550 return -EINVAL; 628 return -EINVAL;
551 } 629 }
552 630
@@ -554,7 +632,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
554 record_size = *(__u32 *)info; 632 record_size = *(__u32 *)info;
555 if (record_size < ext_sec->min_rec_size || 633 if (record_size < ext_sec->min_rec_size ||
556 record_size & 0x03) { 634 record_size & 0x03) {
557 elog("%s section in .BTF.ext has invalid record size %u\n", 635 pr_debug("%s section in .BTF.ext has invalid record size %u\n",
558 ext_sec->desc, record_size); 636 ext_sec->desc, record_size);
559 return -EINVAL; 637 return -EINVAL;
560 } 638 }
@@ -564,7 +642,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
564 642
565 /* If no records, return failure now so .BTF.ext won't be used. */ 643 /* If no records, return failure now so .BTF.ext won't be used. */
566 if (!info_left) { 644 if (!info_left) {
567 elog("%s section in .BTF.ext has no records", ext_sec->desc); 645 pr_debug("%s section in .BTF.ext has no records", ext_sec->desc);
568 return -EINVAL; 646 return -EINVAL;
569 } 647 }
570 648
@@ -574,14 +652,14 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
574 __u32 num_records; 652 __u32 num_records;
575 653
576 if (info_left < sec_hdrlen) { 654 if (info_left < sec_hdrlen) {
577 elog("%s section header is not found in .BTF.ext\n", 655 pr_debug("%s section header is not found in .BTF.ext\n",
578 ext_sec->desc); 656 ext_sec->desc);
579 return -EINVAL; 657 return -EINVAL;
580 } 658 }
581 659
582 num_records = sinfo->num_info; 660 num_records = sinfo->num_info;
583 if (num_records == 0) { 661 if (num_records == 0) {
584 elog("%s section has incorrect num_records in .BTF.ext\n", 662 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
585 ext_sec->desc); 663 ext_sec->desc);
586 return -EINVAL; 664 return -EINVAL;
587 } 665 }
@@ -589,7 +667,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
589 total_record_size = sec_hdrlen + 667 total_record_size = sec_hdrlen +
590 (__u64)num_records * record_size; 668 (__u64)num_records * record_size;
591 if (info_left < total_record_size) { 669 if (info_left < total_record_size) {
592 elog("%s section has incorrect num_records in .BTF.ext\n", 670 pr_debug("%s section has incorrect num_records in .BTF.ext\n",
593 ext_sec->desc); 671 ext_sec->desc);
594 return -EINVAL; 672 return -EINVAL;
595 } 673 }
@@ -610,8 +688,7 @@ static int btf_ext_copy_info(struct btf_ext *btf_ext,
610} 688}
611 689
612static int btf_ext_copy_func_info(struct btf_ext *btf_ext, 690static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
613 __u8 *data, __u32 data_size, 691 __u8 *data, __u32 data_size)
614 btf_print_fn_t err_log)
615{ 692{
616 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 693 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
617 struct btf_ext_sec_copy_param param = { 694 struct btf_ext_sec_copy_param param = {
@@ -622,12 +699,11 @@ static int btf_ext_copy_func_info(struct btf_ext *btf_ext,
622 .desc = "func_info" 699 .desc = "func_info"
623 }; 700 };
624 701
625 return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log); 702 return btf_ext_copy_info(btf_ext, data, data_size, &param);
626} 703}
627 704
628static int btf_ext_copy_line_info(struct btf_ext *btf_ext, 705static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
629 __u8 *data, __u32 data_size, 706 __u8 *data, __u32 data_size)
630 btf_print_fn_t err_log)
631{ 707{
632 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 708 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
633 struct btf_ext_sec_copy_param param = { 709 struct btf_ext_sec_copy_param param = {
@@ -638,37 +714,36 @@ static int btf_ext_copy_line_info(struct btf_ext *btf_ext,
638 .desc = "line_info", 714 .desc = "line_info",
639 }; 715 };
640 716
641 return btf_ext_copy_info(btf_ext, data, data_size, &param, err_log); 717 return btf_ext_copy_info(btf_ext, data, data_size, &param);
642} 718}
643 719
644static int btf_ext_parse_hdr(__u8 *data, __u32 data_size, 720static int btf_ext_parse_hdr(__u8 *data, __u32 data_size)
645 btf_print_fn_t err_log)
646{ 721{
647 const struct btf_ext_header *hdr = (struct btf_ext_header *)data; 722 const struct btf_ext_header *hdr = (struct btf_ext_header *)data;
648 723
649 if (data_size < offsetof(struct btf_ext_header, func_info_off) || 724 if (data_size < offsetof(struct btf_ext_header, func_info_off) ||
650 data_size < hdr->hdr_len) { 725 data_size < hdr->hdr_len) {
651 elog("BTF.ext header not found"); 726 pr_debug("BTF.ext header not found");
652 return -EINVAL; 727 return -EINVAL;
653 } 728 }
654 729
655 if (hdr->magic != BTF_MAGIC) { 730 if (hdr->magic != BTF_MAGIC) {
656 elog("Invalid BTF.ext magic:%x\n", hdr->magic); 731 pr_debug("Invalid BTF.ext magic:%x\n", hdr->magic);
657 return -EINVAL; 732 return -EINVAL;
658 } 733 }
659 734
660 if (hdr->version != BTF_VERSION) { 735 if (hdr->version != BTF_VERSION) {
661 elog("Unsupported BTF.ext version:%u\n", hdr->version); 736 pr_debug("Unsupported BTF.ext version:%u\n", hdr->version);
662 return -ENOTSUP; 737 return -ENOTSUP;
663 } 738 }
664 739
665 if (hdr->flags) { 740 if (hdr->flags) {
666 elog("Unsupported BTF.ext flags:%x\n", hdr->flags); 741 pr_debug("Unsupported BTF.ext flags:%x\n", hdr->flags);
667 return -ENOTSUP; 742 return -ENOTSUP;
668 } 743 }
669 744
670 if (data_size == hdr->hdr_len) { 745 if (data_size == hdr->hdr_len) {
671 elog("BTF.ext has no data\n"); 746 pr_debug("BTF.ext has no data\n");
672 return -EINVAL; 747 return -EINVAL;
673 } 748 }
674 749
@@ -685,12 +760,12 @@ void btf_ext__free(struct btf_ext *btf_ext)
685 free(btf_ext); 760 free(btf_ext);
686} 761}
687 762
688struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log) 763struct btf_ext *btf_ext__new(__u8 *data, __u32 size)
689{ 764{
690 struct btf_ext *btf_ext; 765 struct btf_ext *btf_ext;
691 int err; 766 int err;
692 767
693 err = btf_ext_parse_hdr(data, size, err_log); 768 err = btf_ext_parse_hdr(data, size);
694 if (err) 769 if (err)
695 return ERR_PTR(err); 770 return ERR_PTR(err);
696 771
@@ -698,13 +773,13 @@ struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log)
698 if (!btf_ext) 773 if (!btf_ext)
699 return ERR_PTR(-ENOMEM); 774 return ERR_PTR(-ENOMEM);
700 775
701 err = btf_ext_copy_func_info(btf_ext, data, size, err_log); 776 err = btf_ext_copy_func_info(btf_ext, data, size);
702 if (err) { 777 if (err) {
703 btf_ext__free(btf_ext); 778 btf_ext__free(btf_ext);
704 return ERR_PTR(err); 779 return ERR_PTR(err);
705 } 780 }
706 781
707 err = btf_ext_copy_line_info(btf_ext, data, size, err_log); 782 err = btf_ext_copy_line_info(btf_ext, data, size);
708 if (err) { 783 if (err) {
709 btf_ext__free(btf_ext); 784 btf_ext__free(btf_ext);
710 return ERR_PTR(err); 785 return ERR_PTR(err);
@@ -786,3 +861,1744 @@ __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext)
786{ 861{
787 return btf_ext->line_info.rec_size; 862 return btf_ext->line_info.rec_size;
788} 863}
864
865struct btf_dedup;
866
867static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
868 const struct btf_dedup_opts *opts);
869static void btf_dedup_free(struct btf_dedup *d);
870static int btf_dedup_strings(struct btf_dedup *d);
871static int btf_dedup_prim_types(struct btf_dedup *d);
872static int btf_dedup_struct_types(struct btf_dedup *d);
873static int btf_dedup_ref_types(struct btf_dedup *d);
874static int btf_dedup_compact_types(struct btf_dedup *d);
875static int btf_dedup_remap_types(struct btf_dedup *d);
876
877/*
878 * Deduplicate BTF types and strings.
879 *
880 * BTF dedup algorithm takes as an input `struct btf` representing `.BTF` ELF
881 * section with all BTF type descriptors and string data. It overwrites that
882 * memory in-place with deduplicated types and strings without any loss of
883 * information. If optional `struct btf_ext` representing '.BTF.ext' ELF section
884 * is provided, all the strings referenced from .BTF.ext section are honored
885 * and updated to point to the right offsets after deduplication.
886 *
887 * If function returns with error, type/string data might be garbled and should
888 * be discarded.
889 *
890 * More verbose and detailed description of both problem btf_dedup is solving,
891 * as well as solution could be found at:
892 * https://facebookmicrosites.github.io/bpf/blog/2018/11/14/btf-enhancement.html
893 *
894 * Problem description and justification
895 * =====================================
896 *
897 * BTF type information is typically emitted either as a result of conversion
898 * from DWARF to BTF or directly by compiler. In both cases, each compilation
899 * unit contains information about a subset of all the types that are used
900 * in an application. These subsets are frequently overlapping and contain a lot
901 * of duplicated information when later concatenated together into a single
902 * binary. This algorithm ensures that each unique type is represented by single
903 * BTF type descriptor, greatly reducing resulting size of BTF data.
904 *
905 * Compilation unit isolation and subsequent duplication of data is not the only
906 * problem. The same type hierarchy (e.g., struct and all the type that struct
907 * references) in different compilation units can be represented in BTF to
908 * various degrees of completeness (or, rather, incompleteness) due to
909 * struct/union forward declarations.
910 *
911 * Let's take a look at an example, that we'll use to better understand the
912 * problem (and solution). Suppose we have two compilation units, each using
913 * same `struct S`, but each of them having incomplete type information about
914 * struct's fields:
915 *
916 * // CU #1:
917 * struct S;
918 * struct A {
919 * int a;
920 * struct A* self;
921 * struct S* parent;
922 * };
923 * struct B;
924 * struct S {
925 * struct A* a_ptr;
926 * struct B* b_ptr;
927 * };
928 *
929 * // CU #2:
930 * struct S;
931 * struct A;
932 * struct B {
933 * int b;
934 * struct B* self;
935 * struct S* parent;
936 * };
937 * struct S {
938 * struct A* a_ptr;
939 * struct B* b_ptr;
940 * };
941 *
942 * In case of CU #1, BTF data will know only that `struct B` exist (but no
943 * more), but will know the complete type information about `struct A`. While
944 * for CU #2, it will know full type information about `struct B`, but will
945 * only know about forward declaration of `struct A` (in BTF terms, it will
946 * have `BTF_KIND_FWD` type descriptor with name `B`).
947 *
948 * This compilation unit isolation means that it's possible that there is no
949 * single CU with complete type information describing structs `S`, `A`, and
950 * `B`. Also, we might get tons of duplicated and redundant type information.
951 *
952 * Additional complication we need to keep in mind comes from the fact that
953 * types, in general, can form graphs containing cycles, not just DAGs.
954 *
955 * While algorithm does deduplication, it also merges and resolves type
956 * information (unless disabled throught `struct btf_opts`), whenever possible.
957 * E.g., in the example above with two compilation units having partial type
958 * information for structs `A` and `B`, the output of algorithm will emit
959 * a single copy of each BTF type that describes structs `A`, `B`, and `S`
960 * (as well as type information for `int` and pointers), as if they were defined
961 * in a single compilation unit as:
962 *
963 * struct A {
964 * int a;
965 * struct A* self;
966 * struct S* parent;
967 * };
968 * struct B {
969 * int b;
970 * struct B* self;
971 * struct S* parent;
972 * };
973 * struct S {
974 * struct A* a_ptr;
975 * struct B* b_ptr;
976 * };
977 *
978 * Algorithm summary
979 * =================
980 *
981 * Algorithm completes its work in 6 separate passes:
982 *
983 * 1. Strings deduplication.
984 * 2. Primitive types deduplication (int, enum, fwd).
985 * 3. Struct/union types deduplication.
986 * 4. Reference types deduplication (pointers, typedefs, arrays, funcs, func
987 * protos, and const/volatile/restrict modifiers).
988 * 5. Types compaction.
989 * 6. Types remapping.
990 *
991 * Algorithm determines canonical type descriptor, which is a single
992 * representative type for each truly unique type. This canonical type is the
993 * one that will go into final deduplicated BTF type information. For
994 * struct/unions, it is also the type that algorithm will merge additional type
995 * information into (while resolving FWDs), as it discovers it from data in
996 * other CUs. Each input BTF type eventually gets either mapped to itself, if
997 * that type is canonical, or to some other type, if that type is equivalent
998 * and was chosen as canonical representative. This mapping is stored in
999 * `btf_dedup->map` array. This map is also used to record STRUCT/UNION that
1000 * FWD type got resolved to.
1001 *
1002 * To facilitate fast discovery of canonical types, we also maintain canonical
1003 * index (`btf_dedup->dedup_table`), which maps type descriptor's signature hash
1004 * (i.e., hashed kind, name, size, fields, etc) into a list of canonical types
1005 * that match that signature. With sufficiently good choice of type signature
1006 * hashing function, we can limit number of canonical types for each unique type
1007 * signature to a very small number, allowing to find canonical type for any
1008 * duplicated type very quickly.
1009 *
1010 * Struct/union deduplication is the most critical part and algorithm for
1011 * deduplicating structs/unions is described in greater details in comments for
1012 * `btf_dedup_is_equiv` function.
1013 */
1014int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
1015 const struct btf_dedup_opts *opts)
1016{
1017 struct btf_dedup *d = btf_dedup_new(btf, btf_ext, opts);
1018 int err;
1019
1020 if (IS_ERR(d)) {
1021 pr_debug("btf_dedup_new failed: %ld", PTR_ERR(d));
1022 return -EINVAL;
1023 }
1024
1025 err = btf_dedup_strings(d);
1026 if (err < 0) {
1027 pr_debug("btf_dedup_strings failed:%d\n", err);
1028 goto done;
1029 }
1030 err = btf_dedup_prim_types(d);
1031 if (err < 0) {
1032 pr_debug("btf_dedup_prim_types failed:%d\n", err);
1033 goto done;
1034 }
1035 err = btf_dedup_struct_types(d);
1036 if (err < 0) {
1037 pr_debug("btf_dedup_struct_types failed:%d\n", err);
1038 goto done;
1039 }
1040 err = btf_dedup_ref_types(d);
1041 if (err < 0) {
1042 pr_debug("btf_dedup_ref_types failed:%d\n", err);
1043 goto done;
1044 }
1045 err = btf_dedup_compact_types(d);
1046 if (err < 0) {
1047 pr_debug("btf_dedup_compact_types failed:%d\n", err);
1048 goto done;
1049 }
1050 err = btf_dedup_remap_types(d);
1051 if (err < 0) {
1052 pr_debug("btf_dedup_remap_types failed:%d\n", err);
1053 goto done;
1054 }
1055
1056done:
1057 btf_dedup_free(d);
1058 return err;
1059}
1060
1061#define BTF_DEDUP_TABLE_SIZE_LOG 14
1062#define BTF_DEDUP_TABLE_MOD ((1 << BTF_DEDUP_TABLE_SIZE_LOG) - 1)
1063#define BTF_UNPROCESSED_ID ((__u32)-1)
1064#define BTF_IN_PROGRESS_ID ((__u32)-2)
1065
1066struct btf_dedup_node {
1067 struct btf_dedup_node *next;
1068 __u32 type_id;
1069};
1070
1071struct btf_dedup {
1072 /* .BTF section to be deduped in-place */
1073 struct btf *btf;
1074 /*
1075 * Optional .BTF.ext section. When provided, any strings referenced
1076 * from it will be taken into account when deduping strings
1077 */
1078 struct btf_ext *btf_ext;
1079 /*
1080 * This is a map from any type's signature hash to a list of possible
1081 * canonical representative type candidates. Hash collisions are
1082 * ignored, so even types of various kinds can share same list of
1083 * candidates, which is fine because we rely on subsequent
1084 * btf_xxx_equal() checks to authoritatively verify type equality.
1085 */
1086 struct btf_dedup_node **dedup_table;
1087 /* Canonical types map */
1088 __u32 *map;
1089 /* Hypothetical mapping, used during type graph equivalence checks */
1090 __u32 *hypot_map;
1091 __u32 *hypot_list;
1092 size_t hypot_cnt;
1093 size_t hypot_cap;
1094 /* Various option modifying behavior of algorithm */
1095 struct btf_dedup_opts opts;
1096};
1097
1098struct btf_str_ptr {
1099 const char *str;
1100 __u32 new_off;
1101 bool used;
1102};
1103
1104struct btf_str_ptrs {
1105 struct btf_str_ptr *ptrs;
1106 const char *data;
1107 __u32 cnt;
1108 __u32 cap;
1109};
1110
1111static inline __u32 hash_combine(__u32 h, __u32 value)
1112{
1113/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */
1114#define GOLDEN_RATIO_PRIME 0x9e370001UL
1115 return h * 37 + value * GOLDEN_RATIO_PRIME;
1116#undef GOLDEN_RATIO_PRIME
1117}
1118
1119#define for_each_hash_node(table, hash, node) \
1120 for (node = table[hash & BTF_DEDUP_TABLE_MOD]; node; node = node->next)
1121
1122static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id)
1123{
1124 struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node));
1125
1126 if (!node)
1127 return -ENOMEM;
1128 node->type_id = type_id;
1129 node->next = d->dedup_table[hash & BTF_DEDUP_TABLE_MOD];
1130 d->dedup_table[hash & BTF_DEDUP_TABLE_MOD] = node;
1131 return 0;
1132}
1133
1134static int btf_dedup_hypot_map_add(struct btf_dedup *d,
1135 __u32 from_id, __u32 to_id)
1136{
1137 if (d->hypot_cnt == d->hypot_cap) {
1138 __u32 *new_list;
1139
1140 d->hypot_cap += max(16, d->hypot_cap / 2);
1141 new_list = realloc(d->hypot_list, sizeof(__u32) * d->hypot_cap);
1142 if (!new_list)
1143 return -ENOMEM;
1144 d->hypot_list = new_list;
1145 }
1146 d->hypot_list[d->hypot_cnt++] = from_id;
1147 d->hypot_map[from_id] = to_id;
1148 return 0;
1149}
1150
1151static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1152{
1153 int i;
1154
1155 for (i = 0; i < d->hypot_cnt; i++)
1156 d->hypot_map[d->hypot_list[i]] = BTF_UNPROCESSED_ID;
1157 d->hypot_cnt = 0;
1158}
1159
1160static void btf_dedup_table_free(struct btf_dedup *d)
1161{
1162 struct btf_dedup_node *head, *tmp;
1163 int i;
1164
1165 if (!d->dedup_table)
1166 return;
1167
1168 for (i = 0; i < (1 << BTF_DEDUP_TABLE_SIZE_LOG); i++) {
1169 while (d->dedup_table[i]) {
1170 tmp = d->dedup_table[i];
1171 d->dedup_table[i] = tmp->next;
1172 free(tmp);
1173 }
1174
1175 head = d->dedup_table[i];
1176 while (head) {
1177 tmp = head;
1178 head = head->next;
1179 free(tmp);
1180 }
1181 }
1182
1183 free(d->dedup_table);
1184 d->dedup_table = NULL;
1185}
1186
1187static void btf_dedup_free(struct btf_dedup *d)
1188{
1189 btf_dedup_table_free(d);
1190
1191 free(d->map);
1192 d->map = NULL;
1193
1194 free(d->hypot_map);
1195 d->hypot_map = NULL;
1196
1197 free(d->hypot_list);
1198 d->hypot_list = NULL;
1199
1200 free(d);
1201}
1202
1203static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1204 const struct btf_dedup_opts *opts)
1205{
1206 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1207 int i, err = 0;
1208
1209 if (!d)
1210 return ERR_PTR(-ENOMEM);
1211
1212 d->btf = btf;
1213 d->btf_ext = btf_ext;
1214
1215 d->dedup_table = calloc(1 << BTF_DEDUP_TABLE_SIZE_LOG,
1216 sizeof(struct btf_dedup_node *));
1217 if (!d->dedup_table) {
1218 err = -ENOMEM;
1219 goto done;
1220 }
1221
1222 d->map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1223 if (!d->map) {
1224 err = -ENOMEM;
1225 goto done;
1226 }
1227 /* special BTF "void" type is made canonical immediately */
1228 d->map[0] = 0;
1229 for (i = 1; i <= btf->nr_types; i++)
1230 d->map[i] = BTF_UNPROCESSED_ID;
1231
1232 d->hypot_map = malloc(sizeof(__u32) * (1 + btf->nr_types));
1233 if (!d->hypot_map) {
1234 err = -ENOMEM;
1235 goto done;
1236 }
1237 for (i = 0; i <= btf->nr_types; i++)
1238 d->hypot_map[i] = BTF_UNPROCESSED_ID;
1239
1240 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1241
1242done:
1243 if (err) {
1244 btf_dedup_free(d);
1245 return ERR_PTR(err);
1246 }
1247
1248 return d;
1249}
1250
1251typedef int (*str_off_fn_t)(__u32 *str_off_ptr, void *ctx);
1252
1253/*
1254 * Iterate over all possible places in .BTF and .BTF.ext that can reference
1255 * string and pass pointer to it to a provided callback `fn`.
1256 */
1257static int btf_for_each_str_off(struct btf_dedup *d, str_off_fn_t fn, void *ctx)
1258{
1259 void *line_data_cur, *line_data_end;
1260 int i, j, r, rec_size;
1261 struct btf_type *t;
1262
1263 for (i = 1; i <= d->btf->nr_types; i++) {
1264 t = d->btf->types[i];
1265 r = fn(&t->name_off, ctx);
1266 if (r)
1267 return r;
1268
1269 switch (BTF_INFO_KIND(t->info)) {
1270 case BTF_KIND_STRUCT:
1271 case BTF_KIND_UNION: {
1272 struct btf_member *m = (struct btf_member *)(t + 1);
1273 __u16 vlen = BTF_INFO_VLEN(t->info);
1274
1275 for (j = 0; j < vlen; j++) {
1276 r = fn(&m->name_off, ctx);
1277 if (r)
1278 return r;
1279 m++;
1280 }
1281 break;
1282 }
1283 case BTF_KIND_ENUM: {
1284 struct btf_enum *m = (struct btf_enum *)(t + 1);
1285 __u16 vlen = BTF_INFO_VLEN(t->info);
1286
1287 for (j = 0; j < vlen; j++) {
1288 r = fn(&m->name_off, ctx);
1289 if (r)
1290 return r;
1291 m++;
1292 }
1293 break;
1294 }
1295 case BTF_KIND_FUNC_PROTO: {
1296 struct btf_param *m = (struct btf_param *)(t + 1);
1297 __u16 vlen = BTF_INFO_VLEN(t->info);
1298
1299 for (j = 0; j < vlen; j++) {
1300 r = fn(&m->name_off, ctx);
1301 if (r)
1302 return r;
1303 m++;
1304 }
1305 break;
1306 }
1307 default:
1308 break;
1309 }
1310 }
1311
1312 if (!d->btf_ext)
1313 return 0;
1314
1315 line_data_cur = d->btf_ext->line_info.info;
1316 line_data_end = d->btf_ext->line_info.info + d->btf_ext->line_info.len;
1317 rec_size = d->btf_ext->line_info.rec_size;
1318
1319 while (line_data_cur < line_data_end) {
1320 struct btf_ext_info_sec *sec = line_data_cur;
1321 struct bpf_line_info_min *line_info;
1322 __u32 num_info = sec->num_info;
1323
1324 r = fn(&sec->sec_name_off, ctx);
1325 if (r)
1326 return r;
1327
1328 line_data_cur += sizeof(struct btf_ext_info_sec);
1329 for (i = 0; i < num_info; i++) {
1330 line_info = line_data_cur;
1331 r = fn(&line_info->file_name_off, ctx);
1332 if (r)
1333 return r;
1334 r = fn(&line_info->line_off, ctx);
1335 if (r)
1336 return r;
1337 line_data_cur += rec_size;
1338 }
1339 }
1340
1341 return 0;
1342}
1343
1344static int str_sort_by_content(const void *a1, const void *a2)
1345{
1346 const struct btf_str_ptr *p1 = a1;
1347 const struct btf_str_ptr *p2 = a2;
1348
1349 return strcmp(p1->str, p2->str);
1350}
1351
1352static int str_sort_by_offset(const void *a1, const void *a2)
1353{
1354 const struct btf_str_ptr *p1 = a1;
1355 const struct btf_str_ptr *p2 = a2;
1356
1357 if (p1->str != p2->str)
1358 return p1->str < p2->str ? -1 : 1;
1359 return 0;
1360}
1361
1362static int btf_dedup_str_ptr_cmp(const void *str_ptr, const void *pelem)
1363{
1364 const struct btf_str_ptr *p = pelem;
1365
1366 if (str_ptr != p->str)
1367 return (const char *)str_ptr < p->str ? -1 : 1;
1368 return 0;
1369}
1370
1371static int btf_str_mark_as_used(__u32 *str_off_ptr, void *ctx)
1372{
1373 struct btf_str_ptrs *strs;
1374 struct btf_str_ptr *s;
1375
1376 if (*str_off_ptr == 0)
1377 return 0;
1378
1379 strs = ctx;
1380 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1381 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1382 if (!s)
1383 return -EINVAL;
1384 s->used = true;
1385 return 0;
1386}
1387
1388static int btf_str_remap_offset(__u32 *str_off_ptr, void *ctx)
1389{
1390 struct btf_str_ptrs *strs;
1391 struct btf_str_ptr *s;
1392
1393 if (*str_off_ptr == 0)
1394 return 0;
1395
1396 strs = ctx;
1397 s = bsearch(strs->data + *str_off_ptr, strs->ptrs, strs->cnt,
1398 sizeof(struct btf_str_ptr), btf_dedup_str_ptr_cmp);
1399 if (!s)
1400 return -EINVAL;
1401 *str_off_ptr = s->new_off;
1402 return 0;
1403}
1404
1405/*
1406 * Dedup string and filter out those that are not referenced from either .BTF
1407 * or .BTF.ext (if provided) sections.
1408 *
1409 * This is done by building index of all strings in BTF's string section,
1410 * then iterating over all entities that can reference strings (e.g., type
1411 * names, struct field names, .BTF.ext line info, etc) and marking corresponding
1412 * strings as used. After that all used strings are deduped and compacted into
1413 * sequential blob of memory and new offsets are calculated. Then all the string
1414 * references are iterated again and rewritten using new offsets.
1415 */
1416static int btf_dedup_strings(struct btf_dedup *d)
1417{
1418 const struct btf_header *hdr = d->btf->hdr;
1419 char *start = (char *)d->btf->nohdr_data + hdr->str_off;
1420 char *end = start + d->btf->hdr->str_len;
1421 char *p = start, *tmp_strs = NULL;
1422 struct btf_str_ptrs strs = {
1423 .cnt = 0,
1424 .cap = 0,
1425 .ptrs = NULL,
1426 .data = start,
1427 };
1428 int i, j, err = 0, grp_idx;
1429 bool grp_used;
1430
1431 /* build index of all strings */
1432 while (p < end) {
1433 if (strs.cnt + 1 > strs.cap) {
1434 struct btf_str_ptr *new_ptrs;
1435
1436 strs.cap += max(strs.cnt / 2, 16);
1437 new_ptrs = realloc(strs.ptrs,
1438 sizeof(strs.ptrs[0]) * strs.cap);
1439 if (!new_ptrs) {
1440 err = -ENOMEM;
1441 goto done;
1442 }
1443 strs.ptrs = new_ptrs;
1444 }
1445
1446 strs.ptrs[strs.cnt].str = p;
1447 strs.ptrs[strs.cnt].used = false;
1448
1449 p += strlen(p) + 1;
1450 strs.cnt++;
1451 }
1452
1453 /* temporary storage for deduplicated strings */
1454 tmp_strs = malloc(d->btf->hdr->str_len);
1455 if (!tmp_strs) {
1456 err = -ENOMEM;
1457 goto done;
1458 }
1459
1460 /* mark all used strings */
1461 strs.ptrs[0].used = true;
1462 err = btf_for_each_str_off(d, btf_str_mark_as_used, &strs);
1463 if (err)
1464 goto done;
1465
1466 /* sort strings by context, so that we can identify duplicates */
1467 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_content);
1468
1469 /*
1470 * iterate groups of equal strings and if any instance in a group was
1471 * referenced, emit single instance and remember new offset
1472 */
1473 p = tmp_strs;
1474 grp_idx = 0;
1475 grp_used = strs.ptrs[0].used;
1476 /* iterate past end to avoid code duplication after loop */
1477 for (i = 1; i <= strs.cnt; i++) {
1478 /*
1479 * when i == strs.cnt, we want to skip string comparison and go
1480 * straight to handling last group of strings (otherwise we'd
1481 * need to handle last group after the loop w/ duplicated code)
1482 */
1483 if (i < strs.cnt &&
1484 !strcmp(strs.ptrs[i].str, strs.ptrs[grp_idx].str)) {
1485 grp_used = grp_used || strs.ptrs[i].used;
1486 continue;
1487 }
1488
1489 /*
1490 * this check would have been required after the loop to handle
1491 * last group of strings, but due to <= condition in a loop
1492 * we avoid that duplication
1493 */
1494 if (grp_used) {
1495 int new_off = p - tmp_strs;
1496 __u32 len = strlen(strs.ptrs[grp_idx].str);
1497
1498 memmove(p, strs.ptrs[grp_idx].str, len + 1);
1499 for (j = grp_idx; j < i; j++)
1500 strs.ptrs[j].new_off = new_off;
1501 p += len + 1;
1502 }
1503
1504 if (i < strs.cnt) {
1505 grp_idx = i;
1506 grp_used = strs.ptrs[i].used;
1507 }
1508 }
1509
1510 /* replace original strings with deduped ones */
1511 d->btf->hdr->str_len = p - tmp_strs;
1512 memmove(start, tmp_strs, d->btf->hdr->str_len);
1513 end = start + d->btf->hdr->str_len;
1514
1515 /* restore original order for further binary search lookups */
1516 qsort(strs.ptrs, strs.cnt, sizeof(strs.ptrs[0]), str_sort_by_offset);
1517
1518 /* remap string offsets */
1519 err = btf_for_each_str_off(d, btf_str_remap_offset, &strs);
1520 if (err)
1521 goto done;
1522
1523 d->btf->hdr->str_len = end - start;
1524
1525done:
1526 free(tmp_strs);
1527 free(strs.ptrs);
1528 return err;
1529}
1530
1531static __u32 btf_hash_common(struct btf_type *t)
1532{
1533 __u32 h;
1534
1535 h = hash_combine(0, t->name_off);
1536 h = hash_combine(h, t->info);
1537 h = hash_combine(h, t->size);
1538 return h;
1539}
1540
1541static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1542{
1543 return t1->name_off == t2->name_off &&
1544 t1->info == t2->info &&
1545 t1->size == t2->size;
1546}
1547
1548/* Calculate type signature hash of INT. */
1549static __u32 btf_hash_int(struct btf_type *t)
1550{
1551 __u32 info = *(__u32 *)(t + 1);
1552 __u32 h;
1553
1554 h = btf_hash_common(t);
1555 h = hash_combine(h, info);
1556 return h;
1557}
1558
1559/* Check structural equality of two INTs. */
1560static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1561{
1562 __u32 info1, info2;
1563
1564 if (!btf_equal_common(t1, t2))
1565 return false;
1566 info1 = *(__u32 *)(t1 + 1);
1567 info2 = *(__u32 *)(t2 + 1);
1568 return info1 == info2;
1569}
1570
1571/* Calculate type signature hash of ENUM. */
1572static __u32 btf_hash_enum(struct btf_type *t)
1573{
1574 struct btf_enum *member = (struct btf_enum *)(t + 1);
1575 __u32 vlen = BTF_INFO_VLEN(t->info);
1576 __u32 h = btf_hash_common(t);
1577 int i;
1578
1579 for (i = 0; i < vlen; i++) {
1580 h = hash_combine(h, member->name_off);
1581 h = hash_combine(h, member->val);
1582 member++;
1583 }
1584 return h;
1585}
1586
1587/* Check structural equality of two ENUMs. */
1588static bool btf_equal_enum(struct btf_type *t1, struct btf_type *t2)
1589{
1590 struct btf_enum *m1, *m2;
1591 __u16 vlen;
1592 int i;
1593
1594 if (!btf_equal_common(t1, t2))
1595 return false;
1596
1597 vlen = BTF_INFO_VLEN(t1->info);
1598 m1 = (struct btf_enum *)(t1 + 1);
1599 m2 = (struct btf_enum *)(t2 + 1);
1600 for (i = 0; i < vlen; i++) {
1601 if (m1->name_off != m2->name_off || m1->val != m2->val)
1602 return false;
1603 m1++;
1604 m2++;
1605 }
1606 return true;
1607}
1608
1609/*
1610 * Calculate type signature hash of STRUCT/UNION, ignoring referenced type IDs,
1611 * as referenced type IDs equivalence is established separately during type
1612 * graph equivalence check algorithm.
1613 */
1614static __u32 btf_hash_struct(struct btf_type *t)
1615{
1616 struct btf_member *member = (struct btf_member *)(t + 1);
1617 __u32 vlen = BTF_INFO_VLEN(t->info);
1618 __u32 h = btf_hash_common(t);
1619 int i;
1620
1621 for (i = 0; i < vlen; i++) {
1622 h = hash_combine(h, member->name_off);
1623 h = hash_combine(h, member->offset);
1624 /* no hashing of referenced type ID, it can be unresolved yet */
1625 member++;
1626 }
1627 return h;
1628}
1629
1630/*
1631 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1632 * IDs. This check is performed during type graph equivalence check and
1633 * referenced types equivalence is checked separately.
1634 */
1635static bool btf_equal_struct(struct btf_type *t1, struct btf_type *t2)
1636{
1637 struct btf_member *m1, *m2;
1638 __u16 vlen;
1639 int i;
1640
1641 if (!btf_equal_common(t1, t2))
1642 return false;
1643
1644 vlen = BTF_INFO_VLEN(t1->info);
1645 m1 = (struct btf_member *)(t1 + 1);
1646 m2 = (struct btf_member *)(t2 + 1);
1647 for (i = 0; i < vlen; i++) {
1648 if (m1->name_off != m2->name_off || m1->offset != m2->offset)
1649 return false;
1650 m1++;
1651 m2++;
1652 }
1653 return true;
1654}
1655
1656/*
1657 * Calculate type signature hash of ARRAY, including referenced type IDs,
1658 * under assumption that they were already resolved to canonical type IDs and
1659 * are not going to change.
1660 */
1661static __u32 btf_hash_array(struct btf_type *t)
1662{
1663 struct btf_array *info = (struct btf_array *)(t + 1);
1664 __u32 h = btf_hash_common(t);
1665
1666 h = hash_combine(h, info->type);
1667 h = hash_combine(h, info->index_type);
1668 h = hash_combine(h, info->nelems);
1669 return h;
1670}
1671
1672/*
1673 * Check exact equality of two ARRAYs, taking into account referenced
1674 * type IDs, under assumption that they were already resolved to canonical
1675 * type IDs and are not going to change.
1676 * This function is called during reference types deduplication to compare
1677 * ARRAY to potential canonical representative.
1678 */
1679static bool btf_equal_array(struct btf_type *t1, struct btf_type *t2)
1680{
1681 struct btf_array *info1, *info2;
1682
1683 if (!btf_equal_common(t1, t2))
1684 return false;
1685
1686 info1 = (struct btf_array *)(t1 + 1);
1687 info2 = (struct btf_array *)(t2 + 1);
1688 return info1->type == info2->type &&
1689 info1->index_type == info2->index_type &&
1690 info1->nelems == info2->nelems;
1691}
1692
1693/*
1694 * Check structural compatibility of two ARRAYs, ignoring referenced type
1695 * IDs. This check is performed during type graph equivalence check and
1696 * referenced types equivalence is checked separately.
1697 */
1698static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1699{
1700 struct btf_array *info1, *info2;
1701
1702 if (!btf_equal_common(t1, t2))
1703 return false;
1704
1705 info1 = (struct btf_array *)(t1 + 1);
1706 info2 = (struct btf_array *)(t2 + 1);
1707 return info1->nelems == info2->nelems;
1708}
1709
1710/*
1711 * Calculate type signature hash of FUNC_PROTO, including referenced type IDs,
1712 * under assumption that they were already resolved to canonical type IDs and
1713 * are not going to change.
1714 */
1715static inline __u32 btf_hash_fnproto(struct btf_type *t)
1716{
1717 struct btf_param *member = (struct btf_param *)(t + 1);
1718 __u16 vlen = BTF_INFO_VLEN(t->info);
1719 __u32 h = btf_hash_common(t);
1720 int i;
1721
1722 for (i = 0; i < vlen; i++) {
1723 h = hash_combine(h, member->name_off);
1724 h = hash_combine(h, member->type);
1725 member++;
1726 }
1727 return h;
1728}
1729
1730/*
1731 * Check exact equality of two FUNC_PROTOs, taking into account referenced
1732 * type IDs, under assumption that they were already resolved to canonical
1733 * type IDs and are not going to change.
1734 * This function is called during reference types deduplication to compare
1735 * FUNC_PROTO to potential canonical representative.
1736 */
1737static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1738{
1739 struct btf_param *m1, *m2;
1740 __u16 vlen;
1741 int i;
1742
1743 if (!btf_equal_common(t1, t2))
1744 return false;
1745
1746 vlen = BTF_INFO_VLEN(t1->info);
1747 m1 = (struct btf_param *)(t1 + 1);
1748 m2 = (struct btf_param *)(t2 + 1);
1749 for (i = 0; i < vlen; i++) {
1750 if (m1->name_off != m2->name_off || m1->type != m2->type)
1751 return false;
1752 m1++;
1753 m2++;
1754 }
1755 return true;
1756}
1757
1758/*
1759 * Check structural compatibility of two FUNC_PROTOs, ignoring referenced type
1760 * IDs. This check is performed during type graph equivalence check and
1761 * referenced types equivalence is checked separately.
1762 */
1763static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1764{
1765 struct btf_param *m1, *m2;
1766 __u16 vlen;
1767 int i;
1768
1769 /* skip return type ID */
1770 if (t1->name_off != t2->name_off || t1->info != t2->info)
1771 return false;
1772
1773 vlen = BTF_INFO_VLEN(t1->info);
1774 m1 = (struct btf_param *)(t1 + 1);
1775 m2 = (struct btf_param *)(t2 + 1);
1776 for (i = 0; i < vlen; i++) {
1777 if (m1->name_off != m2->name_off)
1778 return false;
1779 m1++;
1780 m2++;
1781 }
1782 return true;
1783}
1784
1785/*
1786 * Deduplicate primitive types, that can't reference other types, by calculating
1787 * their type signature hash and comparing them with any possible canonical
1788 * candidate. If no canonical candidate matches, type itself is marked as
1789 * canonical and is added into `btf_dedup->dedup_table` as another candidate.
1790 */
1791static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1792{
1793 struct btf_type *t = d->btf->types[type_id];
1794 struct btf_type *cand;
1795 struct btf_dedup_node *cand_node;
1796 /* if we don't find equivalent type, then we are canonical */
1797 __u32 new_id = type_id;
1798 __u32 h;
1799
1800 switch (BTF_INFO_KIND(t->info)) {
1801 case BTF_KIND_CONST:
1802 case BTF_KIND_VOLATILE:
1803 case BTF_KIND_RESTRICT:
1804 case BTF_KIND_PTR:
1805 case BTF_KIND_TYPEDEF:
1806 case BTF_KIND_ARRAY:
1807 case BTF_KIND_STRUCT:
1808 case BTF_KIND_UNION:
1809 case BTF_KIND_FUNC:
1810 case BTF_KIND_FUNC_PROTO:
1811 return 0;
1812
1813 case BTF_KIND_INT:
1814 h = btf_hash_int(t);
1815 for_each_hash_node(d->dedup_table, h, cand_node) {
1816 cand = d->btf->types[cand_node->type_id];
1817 if (btf_equal_int(t, cand)) {
1818 new_id = cand_node->type_id;
1819 break;
1820 }
1821 }
1822 break;
1823
1824 case BTF_KIND_ENUM:
1825 h = btf_hash_enum(t);
1826 for_each_hash_node(d->dedup_table, h, cand_node) {
1827 cand = d->btf->types[cand_node->type_id];
1828 if (btf_equal_enum(t, cand)) {
1829 new_id = cand_node->type_id;
1830 break;
1831 }
1832 }
1833 break;
1834
1835 case BTF_KIND_FWD:
1836 h = btf_hash_common(t);
1837 for_each_hash_node(d->dedup_table, h, cand_node) {
1838 cand = d->btf->types[cand_node->type_id];
1839 if (btf_equal_common(t, cand)) {
1840 new_id = cand_node->type_id;
1841 break;
1842 }
1843 }
1844 break;
1845
1846 default:
1847 return -EINVAL;
1848 }
1849
1850 d->map[type_id] = new_id;
1851 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
1852 return -ENOMEM;
1853
1854 return 0;
1855}
1856
1857static int btf_dedup_prim_types(struct btf_dedup *d)
1858{
1859 int i, err;
1860
1861 for (i = 1; i <= d->btf->nr_types; i++) {
1862 err = btf_dedup_prim_type(d, i);
1863 if (err)
1864 return err;
1865 }
1866 return 0;
1867}
1868
1869/*
1870 * Check whether type is already mapped into canonical one (could be to itself).
1871 */
1872static inline bool is_type_mapped(struct btf_dedup *d, uint32_t type_id)
1873{
1874 return d->map[type_id] <= BTF_MAX_TYPE;
1875}
1876
1877/*
1878 * Resolve type ID into its canonical type ID, if any; otherwise return original
1879 * type ID. If type is FWD and is resolved into STRUCT/UNION already, follow
1880 * STRUCT/UNION link and resolve it into canonical type ID as well.
1881 */
1882static inline __u32 resolve_type_id(struct btf_dedup *d, __u32 type_id)
1883{
1884 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
1885 type_id = d->map[type_id];
1886 return type_id;
1887}
1888
1889/*
1890 * Resolve FWD to underlying STRUCT/UNION, if any; otherwise return original
1891 * type ID.
1892 */
1893static uint32_t resolve_fwd_id(struct btf_dedup *d, uint32_t type_id)
1894{
1895 __u32 orig_type_id = type_id;
1896
1897 if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
1898 return type_id;
1899
1900 while (is_type_mapped(d, type_id) && d->map[type_id] != type_id)
1901 type_id = d->map[type_id];
1902
1903 if (BTF_INFO_KIND(d->btf->types[type_id]->info) != BTF_KIND_FWD)
1904 return type_id;
1905
1906 return orig_type_id;
1907}
1908
1909
1910static inline __u16 btf_fwd_kind(struct btf_type *t)
1911{
1912 return BTF_INFO_KFLAG(t->info) ? BTF_KIND_UNION : BTF_KIND_STRUCT;
1913}
1914
1915/*
1916 * Check equivalence of BTF type graph formed by candidate struct/union (we'll
1917 * call it "candidate graph" in this description for brevity) to a type graph
1918 * formed by (potential) canonical struct/union ("canonical graph" for brevity
1919 * here, though keep in mind that not all types in canonical graph are
1920 * necessarily canonical representatives themselves, some of them might be
1921 * duplicates or its uniqueness might not have been established yet).
1922 * Returns:
1923 * - >0, if type graphs are equivalent;
1924 * - 0, if not equivalent;
1925 * - <0, on error.
1926 *
1927 * Algorithm performs side-by-side DFS traversal of both type graphs and checks
1928 * equivalence of BTF types at each step. If at any point BTF types in candidate
1929 * and canonical graphs are not compatible structurally, whole graphs are
1930 * incompatible. If types are structurally equivalent (i.e., all information
1931 * except referenced type IDs is exactly the same), a mapping from `canon_id` to
1932 * a `cand_id` is recored in hypothetical mapping (`btf_dedup->hypot_map`).
1933 * If a type references other types, then those referenced types are checked
1934 * for equivalence recursively.
1935 *
1936 * During DFS traversal, if we find that for current `canon_id` type we
1937 * already have some mapping in hypothetical map, we check for two possible
1938 * situations:
1939 * - `canon_id` is mapped to exactly the same type as `cand_id`. This will
1940 * happen when type graphs have cycles. In this case we assume those two
1941 * types are equivalent.
1942 * - `canon_id` is mapped to different type. This is contradiction in our
1943 * hypothetical mapping, because same graph in canonical graph corresponds
1944 * to two different types in candidate graph, which for equivalent type
1945 * graphs shouldn't happen. This condition terminates equivalence check
1946 * with negative result.
1947 *
1948 * If type graphs traversal exhausts types to check and find no contradiction,
1949 * then type graphs are equivalent.
1950 *
1951 * When checking types for equivalence, there is one special case: FWD types.
1952 * If FWD type resolution is allowed and one of the types (either from canonical
1953 * or candidate graph) is FWD and other is STRUCT/UNION (depending on FWD's kind
1954 * flag) and their names match, hypothetical mapping is updated to point from
1955 * FWD to STRUCT/UNION. If graphs will be determined as equivalent successfully,
1956 * this mapping will be used to record FWD -> STRUCT/UNION mapping permanently.
1957 *
1958 * Technically, this could lead to incorrect FWD to STRUCT/UNION resolution,
1959 * if there are two exactly named (or anonymous) structs/unions that are
1960 * compatible structurally, one of which has FWD field, while other is concrete
1961 * STRUCT/UNION, but according to C sources they are different structs/unions
1962 * that are referencing different types with the same name. This is extremely
1963 * unlikely to happen, but btf_dedup API allows to disable FWD resolution if
1964 * this logic is causing problems.
1965 *
1966 * Doing FWD resolution means that both candidate and/or canonical graphs can
1967 * consists of portions of the graph that come from multiple compilation units.
1968 * This is due to the fact that types within single compilation unit are always
1969 * deduplicated and FWDs are already resolved, if referenced struct/union
1970 * definiton is available. So, if we had unresolved FWD and found corresponding
1971 * STRUCT/UNION, they will be from different compilation units. This
1972 * consequently means that when we "link" FWD to corresponding STRUCT/UNION,
1973 * type graph will likely have at least two different BTF types that describe
1974 * same type (e.g., most probably there will be two different BTF types for the
1975 * same 'int' primitive type) and could even have "overlapping" parts of type
1976 * graph that describe same subset of types.
1977 *
1978 * This in turn means that our assumption that each type in canonical graph
1979 * must correspond to exactly one type in candidate graph might not hold
1980 * anymore and will make it harder to detect contradictions using hypothetical
1981 * map. To handle this problem, we allow to follow FWD -> STRUCT/UNION
1982 * resolution only in canonical graph. FWDs in candidate graphs are never
1983 * resolved. To see why it's OK, let's check all possible situations w.r.t. FWDs
1984 * that can occur:
1985 * - Both types in canonical and candidate graphs are FWDs. If they are
1986 * structurally equivalent, then they can either be both resolved to the
1987 * same STRUCT/UNION or not resolved at all. In both cases they are
1988 * equivalent and there is no need to resolve FWD on candidate side.
1989 * - Both types in canonical and candidate graphs are concrete STRUCT/UNION,
1990 * so nothing to resolve as well, algorithm will check equivalence anyway.
1991 * - Type in canonical graph is FWD, while type in candidate is concrete
1992 * STRUCT/UNION. In this case candidate graph comes from single compilation
1993 * unit, so there is exactly one BTF type for each unique C type. After
1994 * resolving FWD into STRUCT/UNION, there might be more than one BTF type
1995 * in canonical graph mapping to single BTF type in candidate graph, but
1996 * because hypothetical mapping maps from canonical to candidate types, it's
1997 * alright, and we still maintain the property of having single `canon_id`
1998 * mapping to single `cand_id` (there could be two different `canon_id`
1999 * mapped to the same `cand_id`, but it's not contradictory).
2000 * - Type in canonical graph is concrete STRUCT/UNION, while type in candidate
2001 * graph is FWD. In this case we are just going to check compatibility of
2002 * STRUCT/UNION and corresponding FWD, and if they are compatible, we'll
2003 * assume that whatever STRUCT/UNION FWD resolves to must be equivalent to
2004 * a concrete STRUCT/UNION from canonical graph. If the rest of type graphs
2005 * turn out equivalent, we'll re-resolve FWD to concrete STRUCT/UNION from
2006 * canonical graph.
2007 */
2008static int btf_dedup_is_equiv(struct btf_dedup *d, __u32 cand_id,
2009 __u32 canon_id)
2010{
2011 struct btf_type *cand_type;
2012 struct btf_type *canon_type;
2013 __u32 hypot_type_id;
2014 __u16 cand_kind;
2015 __u16 canon_kind;
2016 int i, eq;
2017
2018 /* if both resolve to the same canonical, they must be equivalent */
2019 if (resolve_type_id(d, cand_id) == resolve_type_id(d, canon_id))
2020 return 1;
2021
2022 canon_id = resolve_fwd_id(d, canon_id);
2023
2024 hypot_type_id = d->hypot_map[canon_id];
2025 if (hypot_type_id <= BTF_MAX_TYPE)
2026 return hypot_type_id == cand_id;
2027
2028 if (btf_dedup_hypot_map_add(d, canon_id, cand_id))
2029 return -ENOMEM;
2030
2031 cand_type = d->btf->types[cand_id];
2032 canon_type = d->btf->types[canon_id];
2033 cand_kind = BTF_INFO_KIND(cand_type->info);
2034 canon_kind = BTF_INFO_KIND(canon_type->info);
2035
2036 if (cand_type->name_off != canon_type->name_off)
2037 return 0;
2038
2039 /* FWD <--> STRUCT/UNION equivalence check, if enabled */
2040 if (!d->opts.dont_resolve_fwds
2041 && (cand_kind == BTF_KIND_FWD || canon_kind == BTF_KIND_FWD)
2042 && cand_kind != canon_kind) {
2043 __u16 real_kind;
2044 __u16 fwd_kind;
2045
2046 if (cand_kind == BTF_KIND_FWD) {
2047 real_kind = canon_kind;
2048 fwd_kind = btf_fwd_kind(cand_type);
2049 } else {
2050 real_kind = cand_kind;
2051 fwd_kind = btf_fwd_kind(canon_type);
2052 }
2053 return fwd_kind == real_kind;
2054 }
2055
2056 if (cand_type->info != canon_type->info)
2057 return 0;
2058
2059 switch (cand_kind) {
2060 case BTF_KIND_INT:
2061 return btf_equal_int(cand_type, canon_type);
2062
2063 case BTF_KIND_ENUM:
2064 return btf_equal_enum(cand_type, canon_type);
2065
2066 case BTF_KIND_FWD:
2067 return btf_equal_common(cand_type, canon_type);
2068
2069 case BTF_KIND_CONST:
2070 case BTF_KIND_VOLATILE:
2071 case BTF_KIND_RESTRICT:
2072 case BTF_KIND_PTR:
2073 case BTF_KIND_TYPEDEF:
2074 case BTF_KIND_FUNC:
2075 return btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2076
2077 case BTF_KIND_ARRAY: {
2078 struct btf_array *cand_arr, *canon_arr;
2079
2080 if (!btf_compat_array(cand_type, canon_type))
2081 return 0;
2082 cand_arr = (struct btf_array *)(cand_type + 1);
2083 canon_arr = (struct btf_array *)(canon_type + 1);
2084 eq = btf_dedup_is_equiv(d,
2085 cand_arr->index_type, canon_arr->index_type);
2086 if (eq <= 0)
2087 return eq;
2088 return btf_dedup_is_equiv(d, cand_arr->type, canon_arr->type);
2089 }
2090
2091 case BTF_KIND_STRUCT:
2092 case BTF_KIND_UNION: {
2093 struct btf_member *cand_m, *canon_m;
2094 __u16 vlen;
2095
2096 if (!btf_equal_struct(cand_type, canon_type))
2097 return 0;
2098 vlen = BTF_INFO_VLEN(cand_type->info);
2099 cand_m = (struct btf_member *)(cand_type + 1);
2100 canon_m = (struct btf_member *)(canon_type + 1);
2101 for (i = 0; i < vlen; i++) {
2102 eq = btf_dedup_is_equiv(d, cand_m->type, canon_m->type);
2103 if (eq <= 0)
2104 return eq;
2105 cand_m++;
2106 canon_m++;
2107 }
2108
2109 return 1;
2110 }
2111
2112 case BTF_KIND_FUNC_PROTO: {
2113 struct btf_param *cand_p, *canon_p;
2114 __u16 vlen;
2115
2116 if (!btf_compat_fnproto(cand_type, canon_type))
2117 return 0;
2118 eq = btf_dedup_is_equiv(d, cand_type->type, canon_type->type);
2119 if (eq <= 0)
2120 return eq;
2121 vlen = BTF_INFO_VLEN(cand_type->info);
2122 cand_p = (struct btf_param *)(cand_type + 1);
2123 canon_p = (struct btf_param *)(canon_type + 1);
2124 for (i = 0; i < vlen; i++) {
2125 eq = btf_dedup_is_equiv(d, cand_p->type, canon_p->type);
2126 if (eq <= 0)
2127 return eq;
2128 cand_p++;
2129 canon_p++;
2130 }
2131 return 1;
2132 }
2133
2134 default:
2135 return -EINVAL;
2136 }
2137 return 0;
2138}
2139
2140/*
2141 * Use hypothetical mapping, produced by successful type graph equivalence
2142 * check, to augment existing struct/union canonical mapping, where possible.
2143 *
2144 * If BTF_KIND_FWD resolution is allowed, this mapping is also used to record
2145 * FWD -> STRUCT/UNION correspondence as well. FWD resolution is bidirectional:
2146 * it doesn't matter if FWD type was part of canonical graph or candidate one,
2147 * we are recording the mapping anyway. As opposed to carefulness required
2148 * for struct/union correspondence mapping (described below), for FWD resolution
2149 * it's not important, as by the time that FWD type (reference type) will be
2150 * deduplicated all structs/unions will be deduped already anyway.
2151 *
2152 * Recording STRUCT/UNION mapping is purely a performance optimization and is
2153 * not required for correctness. It needs to be done carefully to ensure that
2154 * struct/union from candidate's type graph is not mapped into corresponding
2155 * struct/union from canonical type graph that itself hasn't been resolved into
2156 * canonical representative. The only guarantee we have is that canonical
2157 * struct/union was determined as canonical and that won't change. But any
2158 * types referenced through that struct/union fields could have been not yet
2159 * resolved, so in case like that it's too early to establish any kind of
2160 * correspondence between structs/unions.
2161 *
2162 * No canonical correspondence is derived for primitive types (they are already
2163 * deduplicated completely already anyway) or reference types (they rely on
2164 * stability of struct/union canonical relationship for equivalence checks).
2165 */
2166static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2167{
2168 __u32 cand_type_id, targ_type_id;
2169 __u16 t_kind, c_kind;
2170 __u32 t_id, c_id;
2171 int i;
2172
2173 for (i = 0; i < d->hypot_cnt; i++) {
2174 cand_type_id = d->hypot_list[i];
2175 targ_type_id = d->hypot_map[cand_type_id];
2176 t_id = resolve_type_id(d, targ_type_id);
2177 c_id = resolve_type_id(d, cand_type_id);
2178 t_kind = BTF_INFO_KIND(d->btf->types[t_id]->info);
2179 c_kind = BTF_INFO_KIND(d->btf->types[c_id]->info);
2180 /*
2181 * Resolve FWD into STRUCT/UNION.
2182 * It's ok to resolve FWD into STRUCT/UNION that's not yet
2183 * mapped to canonical representative (as opposed to
2184 * STRUCT/UNION <--> STRUCT/UNION mapping logic below), because
2185 * eventually that struct is going to be mapped and all resolved
2186 * FWDs will automatically resolve to correct canonical
2187 * representative. This will happen before ref type deduping,
2188 * which critically depends on stability of these mapping. This
2189 * stability is not a requirement for STRUCT/UNION equivalence
2190 * checks, though.
2191 */
2192 if (t_kind != BTF_KIND_FWD && c_kind == BTF_KIND_FWD)
2193 d->map[c_id] = t_id;
2194 else if (t_kind == BTF_KIND_FWD && c_kind != BTF_KIND_FWD)
2195 d->map[t_id] = c_id;
2196
2197 if ((t_kind == BTF_KIND_STRUCT || t_kind == BTF_KIND_UNION) &&
2198 c_kind != BTF_KIND_FWD &&
2199 is_type_mapped(d, c_id) &&
2200 !is_type_mapped(d, t_id)) {
2201 /*
2202 * as a perf optimization, we can map struct/union
2203 * that's part of type graph we just verified for
2204 * equivalence. We can do that for struct/union that has
2205 * canonical representative only, though.
2206 */
2207 d->map[t_id] = c_id;
2208 }
2209 }
2210}
2211
2212/*
2213 * Deduplicate struct/union types.
2214 *
2215 * For each struct/union type its type signature hash is calculated, taking
2216 * into account type's name, size, number, order and names of fields, but
2217 * ignoring type ID's referenced from fields, because they might not be deduped
2218 * completely until after reference types deduplication phase. This type hash
2219 * is used to iterate over all potential canonical types, sharing same hash.
2220 * For each canonical candidate we check whether type graphs that they form
2221 * (through referenced types in fields and so on) are equivalent using algorithm
2222 * implemented in `btf_dedup_is_equiv`. If such equivalence is found and
2223 * BTF_KIND_FWD resolution is allowed, then hypothetical mapping
2224 * (btf_dedup->hypot_map) produced by aforementioned type graph equivalence
2225 * algorithm is used to record FWD -> STRUCT/UNION mapping. It's also used to
2226 * potentially map other structs/unions to their canonical representatives,
2227 * if such relationship hasn't yet been established. This speeds up algorithm
2228 * by eliminating some of the duplicate work.
2229 *
2230 * If no matching canonical representative was found, struct/union is marked
2231 * as canonical for itself and is added into btf_dedup->dedup_table hash map
2232 * for further look ups.
2233 */
2234static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2235{
2236 struct btf_dedup_node *cand_node;
2237 struct btf_type *t;
2238 /* if we don't find equivalent type, then we are canonical */
2239 __u32 new_id = type_id;
2240 __u16 kind;
2241 __u32 h;
2242
2243 /* already deduped or is in process of deduping (loop detected) */
2244 if (d->map[type_id] <= BTF_MAX_TYPE)
2245 return 0;
2246
2247 t = d->btf->types[type_id];
2248 kind = BTF_INFO_KIND(t->info);
2249
2250 if (kind != BTF_KIND_STRUCT && kind != BTF_KIND_UNION)
2251 return 0;
2252
2253 h = btf_hash_struct(t);
2254 for_each_hash_node(d->dedup_table, h, cand_node) {
2255 int eq;
2256
2257 btf_dedup_clear_hypot_map(d);
2258 eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id);
2259 if (eq < 0)
2260 return eq;
2261 if (!eq)
2262 continue;
2263 new_id = cand_node->type_id;
2264 btf_dedup_merge_hypot_map(d);
2265 break;
2266 }
2267
2268 d->map[type_id] = new_id;
2269 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2270 return -ENOMEM;
2271
2272 return 0;
2273}
2274
2275static int btf_dedup_struct_types(struct btf_dedup *d)
2276{
2277 int i, err;
2278
2279 for (i = 1; i <= d->btf->nr_types; i++) {
2280 err = btf_dedup_struct_type(d, i);
2281 if (err)
2282 return err;
2283 }
2284 return 0;
2285}
2286
2287/*
2288 * Deduplicate reference type.
2289 *
2290 * Once all primitive and struct/union types got deduplicated, we can easily
2291 * deduplicate all other (reference) BTF types. This is done in two steps:
2292 *
2293 * 1. Resolve all referenced type IDs into their canonical type IDs. This
2294 * resolution can be done either immediately for primitive or struct/union types
2295 * (because they were deduped in previous two phases) or recursively for
2296 * reference types. Recursion will always terminate at either primitive or
2297 * struct/union type, at which point we can "unwind" chain of reference types
2298 * one by one. There is no danger of encountering cycles because in C type
2299 * system the only way to form type cycle is through struct/union, so any chain
2300 * of reference types, even those taking part in a type cycle, will inevitably
2301 * reach struct/union at some point.
2302 *
2303 * 2. Once all referenced type IDs are resolved into canonical ones, BTF type
2304 * becomes "stable", in the sense that no further deduplication will cause
2305 * any changes to it. With that, it's now possible to calculate type's signature
2306 * hash (this time taking into account referenced type IDs) and loop over all
2307 * potential canonical representatives. If no match was found, current type
2308 * will become canonical representative of itself and will be added into
2309 * btf_dedup->dedup_table as another possible canonical representative.
2310 */
2311static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2312{
2313 struct btf_dedup_node *cand_node;
2314 struct btf_type *t, *cand;
2315 /* if we don't find equivalent type, then we are representative type */
2316 __u32 new_id = type_id;
2317 __u32 h, ref_type_id;
2318
2319 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2320 return -ELOOP;
2321 if (d->map[type_id] <= BTF_MAX_TYPE)
2322 return resolve_type_id(d, type_id);
2323
2324 t = d->btf->types[type_id];
2325 d->map[type_id] = BTF_IN_PROGRESS_ID;
2326
2327 switch (BTF_INFO_KIND(t->info)) {
2328 case BTF_KIND_CONST:
2329 case BTF_KIND_VOLATILE:
2330 case BTF_KIND_RESTRICT:
2331 case BTF_KIND_PTR:
2332 case BTF_KIND_TYPEDEF:
2333 case BTF_KIND_FUNC:
2334 ref_type_id = btf_dedup_ref_type(d, t->type);
2335 if (ref_type_id < 0)
2336 return ref_type_id;
2337 t->type = ref_type_id;
2338
2339 h = btf_hash_common(t);
2340 for_each_hash_node(d->dedup_table, h, cand_node) {
2341 cand = d->btf->types[cand_node->type_id];
2342 if (btf_equal_common(t, cand)) {
2343 new_id = cand_node->type_id;
2344 break;
2345 }
2346 }
2347 break;
2348
2349 case BTF_KIND_ARRAY: {
2350 struct btf_array *info = (struct btf_array *)(t + 1);
2351
2352 ref_type_id = btf_dedup_ref_type(d, info->type);
2353 if (ref_type_id < 0)
2354 return ref_type_id;
2355 info->type = ref_type_id;
2356
2357 ref_type_id = btf_dedup_ref_type(d, info->index_type);
2358 if (ref_type_id < 0)
2359 return ref_type_id;
2360 info->index_type = ref_type_id;
2361
2362 h = btf_hash_array(t);
2363 for_each_hash_node(d->dedup_table, h, cand_node) {
2364 cand = d->btf->types[cand_node->type_id];
2365 if (btf_equal_array(t, cand)) {
2366 new_id = cand_node->type_id;
2367 break;
2368 }
2369 }
2370 break;
2371 }
2372
2373 case BTF_KIND_FUNC_PROTO: {
2374 struct btf_param *param;
2375 __u16 vlen;
2376 int i;
2377
2378 ref_type_id = btf_dedup_ref_type(d, t->type);
2379 if (ref_type_id < 0)
2380 return ref_type_id;
2381 t->type = ref_type_id;
2382
2383 vlen = BTF_INFO_VLEN(t->info);
2384 param = (struct btf_param *)(t + 1);
2385 for (i = 0; i < vlen; i++) {
2386 ref_type_id = btf_dedup_ref_type(d, param->type);
2387 if (ref_type_id < 0)
2388 return ref_type_id;
2389 param->type = ref_type_id;
2390 param++;
2391 }
2392
2393 h = btf_hash_fnproto(t);
2394 for_each_hash_node(d->dedup_table, h, cand_node) {
2395 cand = d->btf->types[cand_node->type_id];
2396 if (btf_equal_fnproto(t, cand)) {
2397 new_id = cand_node->type_id;
2398 break;
2399 }
2400 }
2401 break;
2402 }
2403
2404 default:
2405 return -EINVAL;
2406 }
2407
2408 d->map[type_id] = new_id;
2409 if (type_id == new_id && btf_dedup_table_add(d, h, type_id))
2410 return -ENOMEM;
2411
2412 return new_id;
2413}
2414
2415static int btf_dedup_ref_types(struct btf_dedup *d)
2416{
2417 int i, err;
2418
2419 for (i = 1; i <= d->btf->nr_types; i++) {
2420 err = btf_dedup_ref_type(d, i);
2421 if (err < 0)
2422 return err;
2423 }
2424 btf_dedup_table_free(d);
2425 return 0;
2426}
2427
2428/*
2429 * Compact types.
2430 *
2431 * After we established for each type its corresponding canonical representative
2432 * type, we now can eliminate types that are not canonical and leave only
2433 * canonical ones layed out sequentially in memory by copying them over
2434 * duplicates. During compaction btf_dedup->hypot_map array is reused to store
2435 * a map from original type ID to a new compacted type ID, which will be used
2436 * during next phase to "fix up" type IDs, referenced from struct/union and
2437 * reference types.
2438 */
2439static int btf_dedup_compact_types(struct btf_dedup *d)
2440{
2441 struct btf_type **new_types;
2442 __u32 next_type_id = 1;
2443 char *types_start, *p;
2444 int i, len;
2445
2446 /* we are going to reuse hypot_map to store compaction remapping */
2447 d->hypot_map[0] = 0;
2448 for (i = 1; i <= d->btf->nr_types; i++)
2449 d->hypot_map[i] = BTF_UNPROCESSED_ID;
2450
2451 types_start = d->btf->nohdr_data + d->btf->hdr->type_off;
2452 p = types_start;
2453
2454 for (i = 1; i <= d->btf->nr_types; i++) {
2455 if (d->map[i] != i)
2456 continue;
2457
2458 len = btf_type_size(d->btf->types[i]);
2459 if (len < 0)
2460 return len;
2461
2462 memmove(p, d->btf->types[i], len);
2463 d->hypot_map[i] = next_type_id;
2464 d->btf->types[next_type_id] = (struct btf_type *)p;
2465 p += len;
2466 next_type_id++;
2467 }
2468
2469 /* shrink struct btf's internal types index and update btf_header */
2470 d->btf->nr_types = next_type_id - 1;
2471 d->btf->types_size = d->btf->nr_types;
2472 d->btf->hdr->type_len = p - types_start;
2473 new_types = realloc(d->btf->types,
2474 (1 + d->btf->nr_types) * sizeof(struct btf_type *));
2475 if (!new_types)
2476 return -ENOMEM;
2477 d->btf->types = new_types;
2478
2479 /* make sure string section follows type information without gaps */
2480 d->btf->hdr->str_off = p - (char *)d->btf->nohdr_data;
2481 memmove(p, d->btf->strings, d->btf->hdr->str_len);
2482 d->btf->strings = p;
2483 p += d->btf->hdr->str_len;
2484
2485 d->btf->data_size = p - (char *)d->btf->data;
2486 return 0;
2487}
2488
2489/*
2490 * Figure out final (deduplicated and compacted) type ID for provided original
2491 * `type_id` by first resolving it into corresponding canonical type ID and
2492 * then mapping it to a deduplicated type ID, stored in btf_dedup->hypot_map,
2493 * which is populated during compaction phase.
2494 */
2495static int btf_dedup_remap_type_id(struct btf_dedup *d, __u32 type_id)
2496{
2497 __u32 resolved_type_id, new_type_id;
2498
2499 resolved_type_id = resolve_type_id(d, type_id);
2500 new_type_id = d->hypot_map[resolved_type_id];
2501 if (new_type_id > BTF_MAX_TYPE)
2502 return -EINVAL;
2503 return new_type_id;
2504}
2505
2506/*
2507 * Remap referenced type IDs into deduped type IDs.
2508 *
2509 * After BTF types are deduplicated and compacted, their final type IDs may
2510 * differ from original ones. The map from original to a corresponding
2511 * deduped type ID is stored in btf_dedup->hypot_map and is populated during
2512 * compaction phase. During remapping phase we are rewriting all type IDs
2513 * referenced from any BTF type (e.g., struct fields, func proto args, etc) to
2514 * their final deduped type IDs.
2515 */
2516static int btf_dedup_remap_type(struct btf_dedup *d, __u32 type_id)
2517{
2518 struct btf_type *t = d->btf->types[type_id];
2519 int i, r;
2520
2521 switch (BTF_INFO_KIND(t->info)) {
2522 case BTF_KIND_INT:
2523 case BTF_KIND_ENUM:
2524 break;
2525
2526 case BTF_KIND_FWD:
2527 case BTF_KIND_CONST:
2528 case BTF_KIND_VOLATILE:
2529 case BTF_KIND_RESTRICT:
2530 case BTF_KIND_PTR:
2531 case BTF_KIND_TYPEDEF:
2532 case BTF_KIND_FUNC:
2533 r = btf_dedup_remap_type_id(d, t->type);
2534 if (r < 0)
2535 return r;
2536 t->type = r;
2537 break;
2538
2539 case BTF_KIND_ARRAY: {
2540 struct btf_array *arr_info = (struct btf_array *)(t + 1);
2541
2542 r = btf_dedup_remap_type_id(d, arr_info->type);
2543 if (r < 0)
2544 return r;
2545 arr_info->type = r;
2546 r = btf_dedup_remap_type_id(d, arr_info->index_type);
2547 if (r < 0)
2548 return r;
2549 arr_info->index_type = r;
2550 break;
2551 }
2552
2553 case BTF_KIND_STRUCT:
2554 case BTF_KIND_UNION: {
2555 struct btf_member *member = (struct btf_member *)(t + 1);
2556 __u16 vlen = BTF_INFO_VLEN(t->info);
2557
2558 for (i = 0; i < vlen; i++) {
2559 r = btf_dedup_remap_type_id(d, member->type);
2560 if (r < 0)
2561 return r;
2562 member->type = r;
2563 member++;
2564 }
2565 break;
2566 }
2567
2568 case BTF_KIND_FUNC_PROTO: {
2569 struct btf_param *param = (struct btf_param *)(t + 1);
2570 __u16 vlen = BTF_INFO_VLEN(t->info);
2571
2572 r = btf_dedup_remap_type_id(d, t->type);
2573 if (r < 0)
2574 return r;
2575 t->type = r;
2576
2577 for (i = 0; i < vlen; i++) {
2578 r = btf_dedup_remap_type_id(d, param->type);
2579 if (r < 0)
2580 return r;
2581 param->type = r;
2582 param++;
2583 }
2584 break;
2585 }
2586
2587 default:
2588 return -EINVAL;
2589 }
2590
2591 return 0;
2592}
2593
2594static int btf_dedup_remap_types(struct btf_dedup *d)
2595{
2596 int i, r;
2597
2598 for (i = 1; i <= d->btf->nr_types; i++) {
2599 r = btf_dedup_remap_type(d, i);
2600 if (r < 0)
2601 return r;
2602 }
2603 return 0;
2604}
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index b0610dcdae6b..b393da90cc85 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -55,33 +55,44 @@ struct btf_ext_header {
55 __u32 line_info_len; 55 __u32 line_info_len;
56}; 56};
57 57
58typedef int (*btf_print_fn_t)(const char *, ...)
59 __attribute__((format(printf, 1, 2)));
60
61LIBBPF_API void btf__free(struct btf *btf); 58LIBBPF_API void btf__free(struct btf *btf);
62LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size, btf_print_fn_t err_log); 59LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
63LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, 60LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
64 const char *type_name); 61 const char *type_name);
62LIBBPF_API __u32 btf__get_nr_types(const struct btf *btf);
65LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf, 63LIBBPF_API const struct btf_type *btf__type_by_id(const struct btf *btf,
66 __u32 id); 64 __u32 id);
67LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id); 65LIBBPF_API __s64 btf__resolve_size(const struct btf *btf, __u32 type_id);
68LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id); 66LIBBPF_API int btf__resolve_type(const struct btf *btf, __u32 type_id);
69LIBBPF_API int btf__fd(const struct btf *btf); 67LIBBPF_API int btf__fd(const struct btf *btf);
68LIBBPF_API void btf__get_strings(const struct btf *btf, const char **strings,
69 __u32 *str_len);
70LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset); 70LIBBPF_API const char *btf__name_by_offset(const struct btf *btf, __u32 offset);
71LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf); 71LIBBPF_API int btf__get_from_id(__u32 id, struct btf **btf);
72LIBBPF_API int btf__get_map_kv_tids(const struct btf *btf, const char *map_name,
73 __u32 expected_key_size,
74 __u32 expected_value_size,
75 __u32 *key_type_id, __u32 *value_type_id);
76
77LIBBPF_API struct btf_ext *btf_ext__new(__u8 *data, __u32 size);
78LIBBPF_API void btf_ext__free(struct btf_ext *btf_ext);
79LIBBPF_API int btf_ext__reloc_func_info(const struct btf *btf,
80 const struct btf_ext *btf_ext,
81 const char *sec_name, __u32 insns_cnt,
82 void **func_info, __u32 *cnt);
83LIBBPF_API int btf_ext__reloc_line_info(const struct btf *btf,
84 const struct btf_ext *btf_ext,
85 const char *sec_name, __u32 insns_cnt,
86 void **line_info, __u32 *cnt);
87LIBBPF_API __u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
88LIBBPF_API __u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
89
90struct btf_dedup_opts {
91 bool dont_resolve_fwds;
92};
72 93
73struct btf_ext *btf_ext__new(__u8 *data, __u32 size, btf_print_fn_t err_log); 94LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
74void btf_ext__free(struct btf_ext *btf_ext); 95 const struct btf_dedup_opts *opts);
75int btf_ext__reloc_func_info(const struct btf *btf,
76 const struct btf_ext *btf_ext,
77 const char *sec_name, __u32 insns_cnt,
78 void **func_info, __u32 *func_info_len);
79int btf_ext__reloc_line_info(const struct btf *btf,
80 const struct btf_ext *btf_ext,
81 const char *sec_name, __u32 insns_cnt,
82 void **line_info, __u32 *cnt);
83__u32 btf_ext__func_info_rec_size(const struct btf_ext *btf_ext);
84__u32 btf_ext__line_info_rec_size(const struct btf_ext *btf_ext);
85 96
86#ifdef __cplusplus 97#ifdef __cplusplus
87} /* extern "C" */ 98} /* extern "C" */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 03bc01ca2577..47969aa0faf8 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -42,6 +42,7 @@
42#include "bpf.h" 42#include "bpf.h"
43#include "btf.h" 43#include "btf.h"
44#include "str_error.h" 44#include "str_error.h"
45#include "libbpf_util.h"
45 46
46#ifndef EM_BPF 47#ifndef EM_BPF
47#define EM_BPF 247 48#define EM_BPF 247
@@ -53,39 +54,33 @@
53 54
54#define __printf(a, b) __attribute__((format(printf, a, b))) 55#define __printf(a, b) __attribute__((format(printf, a, b)))
55 56
56__printf(1, 2) 57static int __base_pr(enum libbpf_print_level level, const char *format,
57static int __base_pr(const char *format, ...) 58 va_list args)
58{ 59{
59 va_list args; 60 if (level == LIBBPF_DEBUG)
60 int err; 61 return 0;
61 62
62 va_start(args, format); 63 return vfprintf(stderr, format, args);
63 err = vfprintf(stderr, format, args);
64 va_end(args);
65 return err;
66} 64}
67 65
68static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr; 66static libbpf_print_fn_t __libbpf_pr = __base_pr;
69static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
70static __printf(1, 2) libbpf_print_fn_t __pr_debug;
71
72#define __pr(func, fmt, ...) \
73do { \
74 if ((func)) \
75 (func)("libbpf: " fmt, ##__VA_ARGS__); \
76} while (0)
77 67
78#define pr_warning(fmt, ...) __pr(__pr_warning, fmt, ##__VA_ARGS__) 68void libbpf_set_print(libbpf_print_fn_t fn)
79#define pr_info(fmt, ...) __pr(__pr_info, fmt, ##__VA_ARGS__) 69{
80#define pr_debug(fmt, ...) __pr(__pr_debug, fmt, ##__VA_ARGS__) 70 __libbpf_pr = fn;
71}
81 72
82void libbpf_set_print(libbpf_print_fn_t warn, 73__printf(2, 3)
83 libbpf_print_fn_t info, 74void libbpf_print(enum libbpf_print_level level, const char *format, ...)
84 libbpf_print_fn_t debug)
85{ 75{
86 __pr_warning = warn; 76 va_list args;
87 __pr_info = info; 77
88 __pr_debug = debug; 78 if (!__libbpf_pr)
79 return;
80
81 va_start(args, format);
82 __libbpf_pr(level, format, args);
83 va_end(args);
89} 84}
90 85
91#define STRERR_BUFSIZE 128 86#define STRERR_BUFSIZE 128
@@ -839,8 +834,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
839 else if (strcmp(name, "maps") == 0) 834 else if (strcmp(name, "maps") == 0)
840 obj->efile.maps_shndx = idx; 835 obj->efile.maps_shndx = idx;
841 else if (strcmp(name, BTF_ELF_SEC) == 0) { 836 else if (strcmp(name, BTF_ELF_SEC) == 0) {
842 obj->btf = btf__new(data->d_buf, data->d_size, 837 obj->btf = btf__new(data->d_buf, data->d_size);
843 __pr_debug);
844 if (IS_ERR(obj->btf)) { 838 if (IS_ERR(obj->btf)) {
845 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 839 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
846 BTF_ELF_SEC, PTR_ERR(obj->btf)); 840 BTF_ELF_SEC, PTR_ERR(obj->btf));
@@ -915,8 +909,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
915 BTF_EXT_ELF_SEC, BTF_ELF_SEC); 909 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
916 } else { 910 } else {
917 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, 911 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
918 btf_ext_data->d_size, 912 btf_ext_data->d_size);
919 __pr_debug);
920 if (IS_ERR(obj->btf_ext)) { 913 if (IS_ERR(obj->btf_ext)) {
921 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n", 914 pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
922 BTF_EXT_ELF_SEC, 915 BTF_EXT_ELF_SEC,
@@ -1057,72 +1050,18 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1057 1050
1058static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf) 1051static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1059{ 1052{
1060 const struct btf_type *container_type;
1061 const struct btf_member *key, *value;
1062 struct bpf_map_def *def = &map->def; 1053 struct bpf_map_def *def = &map->def;
1063 const size_t max_name = 256; 1054 __u32 key_type_id, value_type_id;
1064 char container_name[max_name]; 1055 int ret;
1065 __s64 key_size, value_size;
1066 __s32 container_id;
1067
1068 if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1069 max_name) {
1070 pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1071 map->name, map->name);
1072 return -EINVAL;
1073 }
1074
1075 container_id = btf__find_by_name(btf, container_name);
1076 if (container_id < 0) {
1077 pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1078 map->name, container_name);
1079 return container_id;
1080 }
1081
1082 container_type = btf__type_by_id(btf, container_id);
1083 if (!container_type) {
1084 pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1085 map->name, container_id);
1086 return -EINVAL;
1087 }
1088
1089 if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1090 BTF_INFO_VLEN(container_type->info) < 2) {
1091 pr_warning("map:%s container_name:%s is an invalid container struct\n",
1092 map->name, container_name);
1093 return -EINVAL;
1094 }
1095
1096 key = (struct btf_member *)(container_type + 1);
1097 value = key + 1;
1098
1099 key_size = btf__resolve_size(btf, key->type);
1100 if (key_size < 0) {
1101 pr_warning("map:%s invalid BTF key_type_size\n",
1102 map->name);
1103 return key_size;
1104 }
1105
1106 if (def->key_size != key_size) {
1107 pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1108 map->name, (__u32)key_size, def->key_size);
1109 return -EINVAL;
1110 }
1111
1112 value_size = btf__resolve_size(btf, value->type);
1113 if (value_size < 0) {
1114 pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1115 return value_size;
1116 }
1117 1056
1118 if (def->value_size != value_size) { 1057 ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1119 pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n", 1058 def->value_size, &key_type_id,
1120 map->name, (__u32)value_size, def->value_size); 1059 &value_type_id);
1121 return -EINVAL; 1060 if (ret)
1122 } 1061 return ret;
1123 1062
1124 map->btf_key_type_id = key->type; 1063 map->btf_key_type_id = key_type_id;
1125 map->btf_value_type_id = value->type; 1064 map->btf_value_type_id = value_type_id;
1126 1065
1127 return 0; 1066 return 0;
1128} 1067}
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index 43c77e98df6f..69a7c25eaccc 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -47,17 +47,16 @@ enum libbpf_errno {
47 47
48LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size); 48LIBBPF_API int libbpf_strerror(int err, char *buf, size_t size);
49 49
50/* 50enum libbpf_print_level {
51 * __printf is defined in include/linux/compiler-gcc.h. However, 51 LIBBPF_WARN,
52 * it would be better if libbpf.h didn't depend on Linux header files. 52 LIBBPF_INFO,
53 * So instead of __printf, here we use gcc attribute directly. 53 LIBBPF_DEBUG,
54 */ 54};
55typedef int (*libbpf_print_fn_t)(const char *, ...) 55
56 __attribute__((format(printf, 1, 2))); 56typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level,
57 const char *, va_list ap);
57 58
58LIBBPF_API void libbpf_set_print(libbpf_print_fn_t warn, 59LIBBPF_API void libbpf_set_print(libbpf_print_fn_t fn);
59 libbpf_print_fn_t info,
60 libbpf_print_fn_t debug);
61 60
62/* Hide internal to user */ 61/* Hide internal to user */
63struct bpf_object; 62struct bpf_object;
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 62c680fb13d1..89c1149e32ee 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -133,4 +133,14 @@ LIBBPF_0.0.2 {
133 bpf_map_lookup_elem_flags; 133 bpf_map_lookup_elem_flags;
134 bpf_object__find_map_fd_by_name; 134 bpf_object__find_map_fd_by_name;
135 bpf_get_link_xdp_id; 135 bpf_get_link_xdp_id;
136 btf__dedup;
137 btf__get_map_kv_tids;
138 btf__get_nr_types;
139 btf__get_strings;
140 btf_ext__free;
141 btf_ext__func_info_rec_size;
142 btf_ext__line_info_rec_size;
143 btf_ext__new;
144 btf_ext__reloc_func_info;
145 btf_ext__reloc_line_info;
136} LIBBPF_0.0.1; 146} LIBBPF_0.0.1;
diff --git a/tools/lib/bpf/libbpf_util.h b/tools/lib/bpf/libbpf_util.h
new file mode 100644
index 000000000000..81ecda0cb9c9
--- /dev/null
+++ b/tools/lib/bpf/libbpf_util.h
@@ -0,0 +1,30 @@
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2/* Copyright (c) 2019 Facebook */
3
4#ifndef __LIBBPF_LIBBPF_UTIL_H
5#define __LIBBPF_LIBBPF_UTIL_H
6
7#include <stdbool.h>
8
9#ifdef __cplusplus
10extern "C" {
11#endif
12
13extern void libbpf_print(enum libbpf_print_level level,
14 const char *format, ...)
15 __attribute__((format(printf, 2, 3)));
16
17#define __pr(level, fmt, ...) \
18do { \
19 libbpf_print(level, "libbpf: " fmt, ##__VA_ARGS__); \
20} while (0)
21
22#define pr_warning(fmt, ...) __pr(LIBBPF_WARN, fmt, ##__VA_ARGS__)
23#define pr_info(fmt, ...) __pr(LIBBPF_INFO, fmt, ##__VA_ARGS__)
24#define pr_debug(fmt, ...) __pr(LIBBPF_DEBUG, fmt, ##__VA_ARGS__)
25
26#ifdef __cplusplus
27} /* extern "C" */
28#endif
29
30#endif
diff --git a/tools/lib/bpf/test_libbpf.cpp b/tools/lib/bpf/test_libbpf.cpp
index abf3fc25c9fa..fc134873bb6d 100644
--- a/tools/lib/bpf/test_libbpf.cpp
+++ b/tools/lib/bpf/test_libbpf.cpp
@@ -8,11 +8,11 @@
8int main(int argc, char *argv[]) 8int main(int argc, char *argv[])
9{ 9{
10 /* libbpf.h */ 10 /* libbpf.h */
11 libbpf_set_print(NULL, NULL, NULL); 11 libbpf_set_print(NULL);
12 12
13 /* bpf.h */ 13 /* bpf.h */
14 bpf_prog_get_fd_by_id(0); 14 bpf_prog_get_fd_by_id(0);
15 15
16 /* btf.h */ 16 /* btf.h */
17 btf__new(NULL, 0, NULL); 17 btf__new(NULL, 0);
18} 18}
diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c
index 2f3eb6d293ee..037d8ff6a634 100644
--- a/tools/perf/util/bpf-loader.c
+++ b/tools/perf/util/bpf-loader.c
@@ -24,22 +24,12 @@
24#include "llvm-utils.h" 24#include "llvm-utils.h"
25#include "c++/clang-c.h" 25#include "c++/clang-c.h"
26 26
27#define DEFINE_PRINT_FN(name, level) \ 27static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
28static int libbpf_##name(const char *fmt, ...) \ 28 const char *fmt, va_list args)
29{ \ 29{
30 va_list args; \ 30 return veprintf(1, verbose, pr_fmt(fmt), args);
31 int ret; \
32 \
33 va_start(args, fmt); \
34 ret = veprintf(level, verbose, pr_fmt(fmt), args);\
35 va_end(args); \
36 return ret; \
37} 31}
38 32
39DEFINE_PRINT_FN(warning, 1)
40DEFINE_PRINT_FN(info, 1)
41DEFINE_PRINT_FN(debug, 1)
42
43struct bpf_prog_priv { 33struct bpf_prog_priv {
44 bool is_tp; 34 bool is_tp;
45 char *sys_name; 35 char *sys_name;
@@ -59,9 +49,7 @@ bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
59 struct bpf_object *obj; 49 struct bpf_object *obj;
60 50
61 if (!libbpf_initialized) { 51 if (!libbpf_initialized) {
62 libbpf_set_print(libbpf_warning, 52 libbpf_set_print(libbpf_perf_print);
63 libbpf_info,
64 libbpf_debug);
65 libbpf_initialized = true; 53 libbpf_initialized = true;
66 } 54 }
67 55
@@ -79,9 +67,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source)
79 struct bpf_object *obj; 67 struct bpf_object *obj;
80 68
81 if (!libbpf_initialized) { 69 if (!libbpf_initialized) {
82 libbpf_set_print(libbpf_warning, 70 libbpf_set_print(libbpf_perf_print);
83 libbpf_info,
84 libbpf_debug);
85 libbpf_initialized = true; 71 libbpf_initialized = true;
86 } 72 }
87 73
diff --git a/tools/testing/selftests/bpf/tcp_client.py b/tools/testing/selftests/bpf/tcp_client.py
index 7f8200a8702b..a53ed58528d6 100755
--- a/tools/testing/selftests/bpf/tcp_client.py
+++ b/tools/testing/selftests/bpf/tcp_client.py
@@ -30,12 +30,11 @@ def send(sock, s):
30 30
31 31
32serverPort = int(sys.argv[1]) 32serverPort = int(sys.argv[1])
33HostName = socket.gethostname()
34 33
35# create active socket 34# create active socket
36sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) 35sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
37try: 36try:
38 sock.connect((HostName, serverPort)) 37 sock.connect(('localhost', serverPort))
39except socket.error as e: 38except socket.error as e:
40 sys.exit(1) 39 sys.exit(1)
41 40
diff --git a/tools/testing/selftests/bpf/tcp_server.py b/tools/testing/selftests/bpf/tcp_server.py
index b39903fca4c8..0ca60d193bed 100755
--- a/tools/testing/selftests/bpf/tcp_server.py
+++ b/tools/testing/selftests/bpf/tcp_server.py
@@ -35,13 +35,10 @@ MAX_PORTS = 2
35serverPort = SERVER_PORT 35serverPort = SERVER_PORT
36serverSocket = None 36serverSocket = None
37 37
38HostName = socket.gethostname()
39
40# create passive socket 38# create passive socket
41serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM) 39serverSocket = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
42host = socket.gethostname()
43 40
44try: serverSocket.bind((host, 0)) 41try: serverSocket.bind(('localhost', 0))
45except socket.error as msg: 42except socket.error as msg:
46 print('bind fails: ' + str(msg)) 43 print('bind fails: ' + str(msg))
47 44
diff --git a/tools/testing/selftests/bpf/test_btf.c b/tools/testing/selftests/bpf/test_btf.c
index 179f1d8ec5bf..447acc34db94 100644
--- a/tools/testing/selftests/bpf/test_btf.c
+++ b/tools/testing/selftests/bpf/test_btf.c
@@ -52,18 +52,10 @@ static int count_result(int err)
52 return err; 52 return err;
53} 53}
54 54
55#define __printf(a, b) __attribute__((format(printf, a, b))) 55static int __base_pr(enum libbpf_print_level level __attribute__((unused)),
56 56 const char *format, va_list args)
57__printf(1, 2)
58static int __base_pr(const char *format, ...)
59{ 57{
60 va_list args; 58 return vfprintf(stderr, format, args);
61 int err;
62
63 va_start(args, format);
64 err = vfprintf(stderr, format, args);
65 va_end(args);
66 return err;
67} 59}
68 60
69#define BTF_INFO_ENC(kind, kind_flag, vlen) \ 61#define BTF_INFO_ENC(kind, kind_flag, vlen) \
@@ -78,12 +70,21 @@ static int __base_pr(const char *format, ...)
78 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \ 70 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_INT, 0, 0), sz), \
79 BTF_INT_ENC(encoding, bits_offset, bits) 71 BTF_INT_ENC(encoding, bits_offset, bits)
80 72
73#define BTF_FWD_ENC(name, kind_flag) \
74 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_FWD, kind_flag, 0), 0)
75
81#define BTF_ARRAY_ENC(type, index_type, nr_elems) \ 76#define BTF_ARRAY_ENC(type, index_type, nr_elems) \
82 (type), (index_type), (nr_elems) 77 (type), (index_type), (nr_elems)
83#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \ 78#define BTF_TYPE_ARRAY_ENC(type, index_type, nr_elems) \
84 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \ 79 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_ARRAY, 0, 0), 0), \
85 BTF_ARRAY_ENC(type, index_type, nr_elems) 80 BTF_ARRAY_ENC(type, index_type, nr_elems)
86 81
82#define BTF_STRUCT_ENC(name, nr_elems, sz) \
83 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_STRUCT, 0, nr_elems), sz)
84
85#define BTF_UNION_ENC(name, nr_elems, sz) \
86 BTF_TYPE_ENC(name, BTF_INFO_ENC(BTF_KIND_UNION, 0, nr_elems), sz)
87
87#define BTF_MEMBER_ENC(name, type, bits_offset) \ 88#define BTF_MEMBER_ENC(name, type, bits_offset) \
88 (name), (type), (bits_offset) 89 (name), (type), (bits_offset)
89#define BTF_ENUM_ENC(name, val) (name), (val) 90#define BTF_ENUM_ENC(name, val) (name), (val)
@@ -99,6 +100,12 @@ static int __base_pr(const char *format, ...)
99#define BTF_CONST_ENC(type) \ 100#define BTF_CONST_ENC(type) \
100 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type) 101 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_CONST, 0, 0), type)
101 102
103#define BTF_VOLATILE_ENC(type) \
104 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_VOLATILE, 0, 0), type)
105
106#define BTF_RESTRICT_ENC(type) \
107 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_RESTRICT, 0, 0), type)
108
102#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \ 109#define BTF_FUNC_PROTO_ENC(ret_type, nargs) \
103 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type) 110 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, nargs), ret_type)
104 111
@@ -111,6 +118,10 @@ static int __base_pr(const char *format, ...)
111#define BTF_END_RAW 0xdeadbeef 118#define BTF_END_RAW 0xdeadbeef
112#define NAME_TBD 0xdeadb33f 119#define NAME_TBD 0xdeadb33f
113 120
121#define NAME_NTH(N) (0xffff0000 | N)
122#define IS_NAME_NTH(X) ((X & 0xffff0000) == 0xffff0000)
123#define GET_NAME_NTH_IDX(X) (X & 0x0000ffff)
124
114#define MAX_NR_RAW_U32 1024 125#define MAX_NR_RAW_U32 1024
115#define BTF_LOG_BUF_SIZE 65535 126#define BTF_LOG_BUF_SIZE 65535
116 127
@@ -119,12 +130,14 @@ static struct args {
119 unsigned int file_test_num; 130 unsigned int file_test_num;
120 unsigned int get_info_test_num; 131 unsigned int get_info_test_num;
121 unsigned int info_raw_test_num; 132 unsigned int info_raw_test_num;
133 unsigned int dedup_test_num;
122 bool raw_test; 134 bool raw_test;
123 bool file_test; 135 bool file_test;
124 bool get_info_test; 136 bool get_info_test;
125 bool pprint_test; 137 bool pprint_test;
126 bool always_log; 138 bool always_log;
127 bool info_raw_test; 139 bool info_raw_test;
140 bool dedup_test;
128} args; 141} args;
129 142
130static char btf_log_buf[BTF_LOG_BUF_SIZE]; 143static char btf_log_buf[BTF_LOG_BUF_SIZE];
@@ -1965,7 +1978,7 @@ static struct btf_raw_test raw_tests[] = {
1965 /* void (*)(int a, unsigned int <bad_name_off>) */ 1978 /* void (*)(int a, unsigned int <bad_name_off>) */
1966 BTF_FUNC_PROTO_ENC(0, 2), /* [3] */ 1979 BTF_FUNC_PROTO_ENC(0, 2), /* [3] */
1967 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1), 1980 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
1968 BTF_FUNC_PROTO_ARG_ENC(0xffffffff, 2), 1981 BTF_FUNC_PROTO_ARG_ENC(0x0fffffff, 2),
1969 BTF_END_RAW, 1982 BTF_END_RAW,
1970 }, 1983 },
1971 .str_sec = "\0a", 1984 .str_sec = "\0a",
@@ -2835,11 +2848,13 @@ static void *btf_raw_create(const struct btf_header *hdr,
2835 const char **ret_next_str) 2848 const char **ret_next_str)
2836{ 2849{
2837 const char *next_str = str, *end_str = str + str_sec_size; 2850 const char *next_str = str, *end_str = str + str_sec_size;
2851 const char **strs_idx = NULL, **tmp_strs_idx;
2852 int strs_cap = 0, strs_cnt = 0, next_str_idx = 0;
2838 unsigned int size_needed, offset; 2853 unsigned int size_needed, offset;
2839 struct btf_header *ret_hdr; 2854 struct btf_header *ret_hdr;
2840 int i, type_sec_size; 2855 int i, type_sec_size, err = 0;
2841 uint32_t *ret_types; 2856 uint32_t *ret_types;
2842 void *raw_btf; 2857 void *raw_btf = NULL;
2843 2858
2844 type_sec_size = get_raw_sec_size(raw_types); 2859 type_sec_size = get_raw_sec_size(raw_types);
2845 if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types")) 2860 if (CHECK(type_sec_size < 0, "Cannot get nr_raw_types"))
@@ -2854,17 +2869,44 @@ static void *btf_raw_create(const struct btf_header *hdr,
2854 memcpy(raw_btf, hdr, sizeof(*hdr)); 2869 memcpy(raw_btf, hdr, sizeof(*hdr));
2855 offset = sizeof(*hdr); 2870 offset = sizeof(*hdr);
2856 2871
2872 /* Index strings */
2873 while ((next_str = get_next_str(next_str, end_str))) {
2874 if (strs_cnt == strs_cap) {
2875 strs_cap += max(16, strs_cap / 2);
2876 tmp_strs_idx = realloc(strs_idx,
2877 sizeof(*strs_idx) * strs_cap);
2878 if (CHECK(!tmp_strs_idx,
2879 "Cannot allocate memory for strs_idx")) {
2880 err = -1;
2881 goto done;
2882 }
2883 strs_idx = tmp_strs_idx;
2884 }
2885 strs_idx[strs_cnt++] = next_str;
2886 next_str += strlen(next_str);
2887 }
2888
2857 /* Copy type section */ 2889 /* Copy type section */
2858 ret_types = raw_btf + offset; 2890 ret_types = raw_btf + offset;
2859 for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) { 2891 for (i = 0; i < type_sec_size / sizeof(raw_types[0]); i++) {
2860 if (raw_types[i] == NAME_TBD) { 2892 if (raw_types[i] == NAME_TBD) {
2861 next_str = get_next_str(next_str, end_str); 2893 if (CHECK(next_str_idx == strs_cnt,
2862 if (CHECK(!next_str, "Error in getting next_str")) { 2894 "Error in getting next_str #%d",
2863 free(raw_btf); 2895 next_str_idx)) {
2864 return NULL; 2896 err = -1;
2897 goto done;
2865 } 2898 }
2866 ret_types[i] = next_str - str; 2899 ret_types[i] = strs_idx[next_str_idx++] - str;
2867 next_str += strlen(next_str); 2900 } else if (IS_NAME_NTH(raw_types[i])) {
2901 int idx = GET_NAME_NTH_IDX(raw_types[i]);
2902
2903 if (CHECK(idx <= 0 || idx > strs_cnt,
2904 "Error getting string #%d, strs_cnt:%d",
2905 idx, strs_cnt)) {
2906 err = -1;
2907 goto done;
2908 }
2909 ret_types[i] = strs_idx[idx-1] - str;
2868 } else { 2910 } else {
2869 ret_types[i] = raw_types[i]; 2911 ret_types[i] = raw_types[i];
2870 } 2912 }
@@ -2881,8 +2923,17 @@ static void *btf_raw_create(const struct btf_header *hdr,
2881 2923
2882 *btf_size = size_needed; 2924 *btf_size = size_needed;
2883 if (ret_next_str) 2925 if (ret_next_str)
2884 *ret_next_str = next_str; 2926 *ret_next_str =
2927 next_str_idx < strs_cnt ? strs_idx[next_str_idx] : NULL;
2885 2928
2929done:
2930 if (err) {
2931 if (raw_btf)
2932 free(raw_btf);
2933 if (strs_idx)
2934 free(strs_idx);
2935 return NULL;
2936 }
2886 return raw_btf; 2937 return raw_btf;
2887} 2938}
2888 2939
@@ -5551,20 +5602,450 @@ static int test_info_raw(void)
5551 return err; 5602 return err;
5552} 5603}
5553 5604
5605struct btf_raw_data {
5606 __u32 raw_types[MAX_NR_RAW_U32];
5607 const char *str_sec;
5608 __u32 str_sec_size;
5609};
5610
5611struct btf_dedup_test {
5612 const char *descr;
5613 struct btf_raw_data input;
5614 struct btf_raw_data expect;
5615 struct btf_dedup_opts opts;
5616};
5617
5618const struct btf_dedup_test dedup_tests[] = {
5619
5620{
5621 .descr = "dedup: unused strings filtering",
5622 .input = {
5623 .raw_types = {
5624 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 4),
5625 BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 64, 8),
5626 BTF_END_RAW,
5627 },
5628 BTF_STR_SEC("\0unused\0int\0foo\0bar\0long"),
5629 },
5630 .expect = {
5631 .raw_types = {
5632 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
5633 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
5634 BTF_END_RAW,
5635 },
5636 BTF_STR_SEC("\0int\0long"),
5637 },
5638 .opts = {
5639 .dont_resolve_fwds = false,
5640 },
5641},
5642{
5643 .descr = "dedup: strings deduplication",
5644 .input = {
5645 .raw_types = {
5646 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
5647 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
5648 BTF_TYPE_INT_ENC(NAME_NTH(3), BTF_INT_SIGNED, 0, 32, 4),
5649 BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 64, 8),
5650 BTF_TYPE_INT_ENC(NAME_NTH(5), BTF_INT_SIGNED, 0, 32, 4),
5651 BTF_END_RAW,
5652 },
5653 BTF_STR_SEC("\0int\0long int\0int\0long int\0int"),
5654 },
5655 .expect = {
5656 .raw_types = {
5657 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
5658 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 64, 8),
5659 BTF_END_RAW,
5660 },
5661 BTF_STR_SEC("\0int\0long int"),
5662 },
5663 .opts = {
5664 .dont_resolve_fwds = false,
5665 },
5666},
5667{
5668 .descr = "dedup: struct example #1",
5669 /*
5670 * struct s {
5671 * struct s *next;
5672 * const int *a;
5673 * int b[16];
5674 * int c;
5675 * }
5676 */
5677 .input = {
5678 .raw_types = {
5679 /* int */
5680 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5681 /* int[16] */
5682 BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
5683 /* struct s { */
5684 BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [3] */
5685 BTF_MEMBER_ENC(NAME_NTH(3), 4, 0), /* struct s *next; */
5686 BTF_MEMBER_ENC(NAME_NTH(4), 5, 64), /* const int *a; */
5687 BTF_MEMBER_ENC(NAME_NTH(5), 2, 128), /* int b[16]; */
5688 BTF_MEMBER_ENC(NAME_NTH(6), 1, 640), /* int c; */
5689 /* ptr -> [3] struct s */
5690 BTF_PTR_ENC(3), /* [4] */
5691 /* ptr -> [6] const int */
5692 BTF_PTR_ENC(6), /* [5] */
5693 /* const -> [1] int */
5694 BTF_CONST_ENC(1), /* [6] */
5695
5696 /* full copy of the above */
5697 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4), /* [7] */
5698 BTF_TYPE_ARRAY_ENC(7, 7, 16), /* [8] */
5699 BTF_STRUCT_ENC(NAME_NTH(2), 4, 84), /* [9] */
5700 BTF_MEMBER_ENC(NAME_NTH(3), 10, 0),
5701 BTF_MEMBER_ENC(NAME_NTH(4), 11, 64),
5702 BTF_MEMBER_ENC(NAME_NTH(5), 8, 128),
5703 BTF_MEMBER_ENC(NAME_NTH(6), 7, 640),
5704 BTF_PTR_ENC(9), /* [10] */
5705 BTF_PTR_ENC(12), /* [11] */
5706 BTF_CONST_ENC(7), /* [12] */
5707 BTF_END_RAW,
5708 },
5709 BTF_STR_SEC("\0int\0s\0next\0a\0b\0c\0"),
5710 },
5711 .expect = {
5712 .raw_types = {
5713 /* int */
5714 BTF_TYPE_INT_ENC(NAME_NTH(4), BTF_INT_SIGNED, 0, 32, 4), /* [1] */
5715 /* int[16] */
5716 BTF_TYPE_ARRAY_ENC(1, 1, 16), /* [2] */
5717 /* struct s { */
5718 BTF_STRUCT_ENC(NAME_NTH(6), 4, 84), /* [3] */
5719 BTF_MEMBER_ENC(NAME_NTH(5), 4, 0), /* struct s *next; */
5720 BTF_MEMBER_ENC(NAME_NTH(1), 5, 64), /* const int *a; */
5721 BTF_MEMBER_ENC(NAME_NTH(2), 2, 128), /* int b[16]; */
5722 BTF_MEMBER_ENC(NAME_NTH(3), 1, 640), /* int c; */
5723 /* ptr -> [3] struct s */
5724 BTF_PTR_ENC(3), /* [4] */
5725 /* ptr -> [6] const int */
5726 BTF_PTR_ENC(6), /* [5] */
5727 /* const -> [1] int */
5728 BTF_CONST_ENC(1), /* [6] */
5729 BTF_END_RAW,
5730 },
5731 BTF_STR_SEC("\0a\0b\0c\0int\0next\0s"),
5732 },
5733 .opts = {
5734 .dont_resolve_fwds = false,
5735 },
5736},
5737{
5738 .descr = "dedup: all possible kinds (no duplicates)",
5739 .input = {
5740 .raw_types = {
5741 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
5742 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
5743 BTF_ENUM_ENC(NAME_TBD, 0),
5744 BTF_ENUM_ENC(NAME_TBD, 1),
5745 BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
5746 BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
5747 BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
5748 BTF_MEMBER_ENC(NAME_TBD, 1, 0),
5749 BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
5750 BTF_MEMBER_ENC(NAME_TBD, 1, 0),
5751 BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
5752 BTF_PTR_ENC(0), /* [8] ptr */
5753 BTF_CONST_ENC(8), /* [9] const */
5754 BTF_VOLATILE_ENC(8), /* [10] volatile */
5755 BTF_RESTRICT_ENC(8), /* [11] restrict */
5756 BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
5757 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
5758 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
5759 BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
5760 BTF_END_RAW,
5761 },
5762 BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
5763 },
5764 .expect = {
5765 .raw_types = {
5766 BTF_TYPE_INT_ENC(NAME_TBD, BTF_INT_SIGNED, 0, 32, 8), /* [1] int */
5767 BTF_TYPE_ENC(NAME_TBD, BTF_INFO_ENC(BTF_KIND_ENUM, 0, 2), 4), /* [2] enum */
5768 BTF_ENUM_ENC(NAME_TBD, 0),
5769 BTF_ENUM_ENC(NAME_TBD, 1),
5770 BTF_FWD_ENC(NAME_TBD, 1 /* union kind_flag */), /* [3] fwd */
5771 BTF_TYPE_ARRAY_ENC(2, 1, 7), /* [4] array */
5772 BTF_STRUCT_ENC(NAME_TBD, 1, 4), /* [5] struct */
5773 BTF_MEMBER_ENC(NAME_TBD, 1, 0),
5774 BTF_UNION_ENC(NAME_TBD, 1, 4), /* [6] union */
5775 BTF_MEMBER_ENC(NAME_TBD, 1, 0),
5776 BTF_TYPEDEF_ENC(NAME_TBD, 1), /* [7] typedef */
5777 BTF_PTR_ENC(0), /* [8] ptr */
5778 BTF_CONST_ENC(8), /* [9] const */
5779 BTF_VOLATILE_ENC(8), /* [10] volatile */
5780 BTF_RESTRICT_ENC(8), /* [11] restrict */
5781 BTF_FUNC_PROTO_ENC(1, 2), /* [12] func_proto */
5782 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 1),
5783 BTF_FUNC_PROTO_ARG_ENC(NAME_TBD, 8),
5784 BTF_FUNC_ENC(NAME_TBD, 12), /* [13] func */
5785 BTF_END_RAW,
5786 },
5787 BTF_STR_SEC("\0A\0B\0C\0D\0E\0F\0G\0H\0I\0J\0K\0L\0M"),
5788 },
5789 .opts = {
5790 .dont_resolve_fwds = false,
5791 },
5792},
5793{
5794 .descr = "dedup: no int duplicates",
5795 .input = {
5796 .raw_types = {
5797 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
5798 /* different name */
5799 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
5800 /* different encoding */
5801 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
5802 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
5803 /* different bit offset */
5804 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
5805 /* different bit size */
5806 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
5807 /* different byte size */
5808 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
5809 BTF_END_RAW,
5810 },
5811 BTF_STR_SEC("\0int\0some other int"),
5812 },
5813 .expect = {
5814 .raw_types = {
5815 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 8),
5816 /* different name */
5817 BTF_TYPE_INT_ENC(NAME_NTH(2), BTF_INT_SIGNED, 0, 32, 8),
5818 /* different encoding */
5819 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_CHAR, 0, 32, 8),
5820 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_BOOL, 0, 32, 8),
5821 /* different bit offset */
5822 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 8, 32, 8),
5823 /* different bit size */
5824 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 27, 8),
5825 /* different byte size */
5826 BTF_TYPE_INT_ENC(NAME_NTH(1), BTF_INT_SIGNED, 0, 32, 4),
5827 BTF_END_RAW,
5828 },
5829 BTF_STR_SEC("\0int\0some other int"),
5830 },
5831 .opts = {
5832 .dont_resolve_fwds = false,
5833 },
5834},
5835
5836};
5837
5838static int btf_type_size(const struct btf_type *t)
5839{
5840 int base_size = sizeof(struct btf_type);
5841 __u16 vlen = BTF_INFO_VLEN(t->info);
5842 __u16 kind = BTF_INFO_KIND(t->info);
5843
5844 switch (kind) {
5845 case BTF_KIND_FWD:
5846 case BTF_KIND_CONST:
5847 case BTF_KIND_VOLATILE:
5848 case BTF_KIND_RESTRICT:
5849 case BTF_KIND_PTR:
5850 case BTF_KIND_TYPEDEF:
5851 case BTF_KIND_FUNC:
5852 return base_size;
5853 case BTF_KIND_INT:
5854 return base_size + sizeof(__u32);
5855 case BTF_KIND_ENUM:
5856 return base_size + vlen * sizeof(struct btf_enum);
5857 case BTF_KIND_ARRAY:
5858 return base_size + sizeof(struct btf_array);
5859 case BTF_KIND_STRUCT:
5860 case BTF_KIND_UNION:
5861 return base_size + vlen * sizeof(struct btf_member);
5862 case BTF_KIND_FUNC_PROTO:
5863 return base_size + vlen * sizeof(struct btf_param);
5864 default:
5865 fprintf(stderr, "Unsupported BTF_KIND:%u\n", kind);
5866 return -EINVAL;
5867 }
5868}
5869
5870static void dump_btf_strings(const char *strs, __u32 len)
5871{
5872 const char *cur = strs;
5873 int i = 0;
5874
5875 while (cur < strs + len) {
5876 fprintf(stderr, "string #%d: '%s'\n", i, cur);
5877 cur += strlen(cur) + 1;
5878 i++;
5879 }
5880}
5881
5882static int do_test_dedup(unsigned int test_num)
5883{
5884 const struct btf_dedup_test *test = &dedup_tests[test_num - 1];
5885 int err = 0, i;
5886 __u32 test_nr_types, expect_nr_types, test_str_len, expect_str_len;
5887 void *raw_btf;
5888 unsigned int raw_btf_size;
5889 struct btf *test_btf = NULL, *expect_btf = NULL;
5890 const char *ret_test_next_str, *ret_expect_next_str;
5891 const char *test_strs, *expect_strs;
5892 const char *test_str_cur, *test_str_end;
5893 const char *expect_str_cur, *expect_str_end;
5894
5895 fprintf(stderr, "BTF dedup test[%u] (%s):", test_num, test->descr);
5896
5897 raw_btf = btf_raw_create(&hdr_tmpl, test->input.raw_types,
5898 test->input.str_sec, test->input.str_sec_size,
5899 &raw_btf_size, &ret_test_next_str);
5900 if (!raw_btf)
5901 return -1;
5902 test_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
5903 free(raw_btf);
5904 if (CHECK(IS_ERR(test_btf), "invalid test_btf errno:%ld",
5905 PTR_ERR(test_btf))) {
5906 err = -1;
5907 goto done;
5908 }
5909
5910 raw_btf = btf_raw_create(&hdr_tmpl, test->expect.raw_types,
5911 test->expect.str_sec,
5912 test->expect.str_sec_size,
5913 &raw_btf_size, &ret_expect_next_str);
5914 if (!raw_btf)
5915 return -1;
5916 expect_btf = btf__new((__u8 *)raw_btf, raw_btf_size);
5917 free(raw_btf);
5918 if (CHECK(IS_ERR(expect_btf), "invalid expect_btf errno:%ld",
5919 PTR_ERR(expect_btf))) {
5920 err = -1;
5921 goto done;
5922 }
5923
5924 err = btf__dedup(test_btf, NULL, &test->opts);
5925 if (CHECK(err, "btf_dedup failed errno:%d", err)) {
5926 err = -1;
5927 goto done;
5928 }
5929
5930 btf__get_strings(test_btf, &test_strs, &test_str_len);
5931 btf__get_strings(expect_btf, &expect_strs, &expect_str_len);
5932 if (CHECK(test_str_len != expect_str_len,
5933 "test_str_len:%u != expect_str_len:%u",
5934 test_str_len, expect_str_len)) {
5935 fprintf(stderr, "\ntest strings:\n");
5936 dump_btf_strings(test_strs, test_str_len);
5937 fprintf(stderr, "\nexpected strings:\n");
5938 dump_btf_strings(expect_strs, expect_str_len);
5939 err = -1;
5940 goto done;
5941 }
5942
5943 test_str_cur = test_strs;
5944 test_str_end = test_strs + test_str_len;
5945 expect_str_cur = expect_strs;
5946 expect_str_end = expect_strs + expect_str_len;
5947 while (test_str_cur < test_str_end && expect_str_cur < expect_str_end) {
5948 size_t test_len, expect_len;
5949
5950 test_len = strlen(test_str_cur);
5951 expect_len = strlen(expect_str_cur);
5952 if (CHECK(test_len != expect_len,
5953 "test_len:%zu != expect_len:%zu "
5954 "(test_str:%s, expect_str:%s)",
5955 test_len, expect_len, test_str_cur, expect_str_cur)) {
5956 err = -1;
5957 goto done;
5958 }
5959 if (CHECK(strcmp(test_str_cur, expect_str_cur),
5960 "test_str:%s != expect_str:%s",
5961 test_str_cur, expect_str_cur)) {
5962 err = -1;
5963 goto done;
5964 }
5965 test_str_cur += test_len + 1;
5966 expect_str_cur += expect_len + 1;
5967 }
5968 if (CHECK(test_str_cur != test_str_end,
5969 "test_str_cur:%p != test_str_end:%p",
5970 test_str_cur, test_str_end)) {
5971 err = -1;
5972 goto done;
5973 }
5974
5975 test_nr_types = btf__get_nr_types(test_btf);
5976 expect_nr_types = btf__get_nr_types(expect_btf);
5977 if (CHECK(test_nr_types != expect_nr_types,
5978 "test_nr_types:%u != expect_nr_types:%u",
5979 test_nr_types, expect_nr_types)) {
5980 err = -1;
5981 goto done;
5982 }
5983
5984 for (i = 1; i <= test_nr_types; i++) {
5985 const struct btf_type *test_type, *expect_type;
5986 int test_size, expect_size;
5987
5988 test_type = btf__type_by_id(test_btf, i);
5989 expect_type = btf__type_by_id(expect_btf, i);
5990 test_size = btf_type_size(test_type);
5991 expect_size = btf_type_size(expect_type);
5992
5993 if (CHECK(test_size != expect_size,
5994 "type #%d: test_size:%d != expect_size:%u",
5995 i, test_size, expect_size)) {
5996 err = -1;
5997 goto done;
5998 }
5999 if (CHECK(memcmp((void *)test_type,
6000 (void *)expect_type,
6001 test_size),
6002 "type #%d: contents differ", i)) {
6003 err = -1;
6004 goto done;
6005 }
6006 }
6007
6008done:
6009 if (!err)
6010 fprintf(stderr, "OK");
6011 if (!IS_ERR(test_btf))
6012 btf__free(test_btf);
6013 if (!IS_ERR(expect_btf))
6014 btf__free(expect_btf);
6015
6016 return err;
6017}
6018
6019static int test_dedup(void)
6020{
6021 unsigned int i;
6022 int err = 0;
6023
6024 if (args.dedup_test_num)
6025 return count_result(do_test_dedup(args.dedup_test_num));
6026
6027 for (i = 1; i <= ARRAY_SIZE(dedup_tests); i++)
6028 err |= count_result(do_test_dedup(i));
6029
6030 return err;
6031}
6032
5554static void usage(const char *cmd) 6033static void usage(const char *cmd)
5555{ 6034{
5556 fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n" 6035 fprintf(stderr, "Usage: %s [-l] [[-r btf_raw_test_num (1 - %zu)] |\n"
5557 "\t[-g btf_get_info_test_num (1 - %zu)] |\n" 6036 "\t[-g btf_get_info_test_num (1 - %zu)] |\n"
5558 "\t[-f btf_file_test_num (1 - %zu)] |\n" 6037 "\t[-f btf_file_test_num (1 - %zu)] |\n"
5559 "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n" 6038 "\t[-k btf_prog_info_raw_test_num (1 - %zu)] |\n"
5560 "\t[-p (pretty print test)]]\n", 6039 "\t[-p (pretty print test)] |\n"
6040 "\t[-d btf_dedup_test_num (1 - %zu)]]\n",
5561 cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests), 6041 cmd, ARRAY_SIZE(raw_tests), ARRAY_SIZE(get_info_tests),
5562 ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests)); 6042 ARRAY_SIZE(file_tests), ARRAY_SIZE(info_raw_tests),
6043 ARRAY_SIZE(dedup_tests));
5563} 6044}
5564 6045
5565static int parse_args(int argc, char **argv) 6046static int parse_args(int argc, char **argv)
5566{ 6047{
5567 const char *optstr = "lpk:f:r:g:"; 6048 const char *optstr = "hlpk:f:r:g:d:";
5568 int opt; 6049 int opt;
5569 6050
5570 while ((opt = getopt(argc, argv, optstr)) != -1) { 6051 while ((opt = getopt(argc, argv, optstr)) != -1) {
@@ -5591,12 +6072,16 @@ static int parse_args(int argc, char **argv)
5591 args.info_raw_test_num = atoi(optarg); 6072 args.info_raw_test_num = atoi(optarg);
5592 args.info_raw_test = true; 6073 args.info_raw_test = true;
5593 break; 6074 break;
6075 case 'd':
6076 args.dedup_test_num = atoi(optarg);
6077 args.dedup_test = true;
6078 break;
5594 case 'h': 6079 case 'h':
5595 usage(argv[0]); 6080 usage(argv[0]);
5596 exit(0); 6081 exit(0);
5597 default: 6082 default:
5598 usage(argv[0]); 6083 usage(argv[0]);
5599 return -1; 6084 return -1;
5600 } 6085 }
5601 } 6086 }
5602 6087
@@ -5632,6 +6117,14 @@ static int parse_args(int argc, char **argv)
5632 return -1; 6117 return -1;
5633 } 6118 }
5634 6119
6120 if (args.dedup_test_num &&
6121 (args.dedup_test_num < 1 ||
6122 args.dedup_test_num > ARRAY_SIZE(dedup_tests))) {
6123 fprintf(stderr, "BTF dedup test number must be [1 - %zu]\n",
6124 ARRAY_SIZE(dedup_tests));
6125 return -1;
6126 }
6127
5635 return 0; 6128 return 0;
5636} 6129}
5637 6130
@@ -5650,7 +6143,7 @@ int main(int argc, char **argv)
5650 return err; 6143 return err;
5651 6144
5652 if (args.always_log) 6145 if (args.always_log)
5653 libbpf_set_print(__base_pr, __base_pr, __base_pr); 6146 libbpf_set_print(__base_pr);
5654 6147
5655 if (args.raw_test) 6148 if (args.raw_test)
5656 err |= test_raw(); 6149 err |= test_raw();
@@ -5667,14 +6160,18 @@ int main(int argc, char **argv)
5667 if (args.info_raw_test) 6160 if (args.info_raw_test)
5668 err |= test_info_raw(); 6161 err |= test_info_raw();
5669 6162
6163 if (args.dedup_test)
6164 err |= test_dedup();
6165
5670 if (args.raw_test || args.get_info_test || args.file_test || 6166 if (args.raw_test || args.get_info_test || args.file_test ||
5671 args.pprint_test || args.info_raw_test) 6167 args.pprint_test || args.info_raw_test || args.dedup_test)
5672 goto done; 6168 goto done;
5673 6169
5674 err |= test_raw(); 6170 err |= test_raw();
5675 err |= test_get_info(); 6171 err |= test_get_info();
5676 err |= test_file(); 6172 err |= test_file();
5677 err |= test_info_raw(); 6173 err |= test_info_raw();
6174 err |= test_dedup();
5678 6175
5679done: 6176done:
5680 print_summary(); 6177 print_summary();
diff --git a/tools/testing/selftests/bpf/test_libbpf_open.c b/tools/testing/selftests/bpf/test_libbpf_open.c
index 8fcd1c076add..1909ecf4d999 100644
--- a/tools/testing/selftests/bpf/test_libbpf_open.c
+++ b/tools/testing/selftests/bpf/test_libbpf_open.c
@@ -34,23 +34,16 @@ static void usage(char *argv[])
34 printf("\n"); 34 printf("\n");
35} 35}
36 36
37#define DEFINE_PRINT_FN(name, enabled) \ 37static bool debug = 0;
38static int libbpf_##name(const char *fmt, ...) \ 38static int libbpf_debug_print(enum libbpf_print_level level,
39{ \ 39 const char *fmt, va_list args)
40 va_list args; \ 40{
41 int ret; \ 41 if (level == LIBBPF_DEBUG && !debug)
42 \ 42 return 0;
43 va_start(args, fmt); \ 43
44 if (enabled) { \ 44 fprintf(stderr, "[%d] ", level);
45 fprintf(stderr, "[" #name "] "); \ 45 return vfprintf(stderr, fmt, args);
46 ret = vfprintf(stderr, fmt, args); \
47 } \
48 va_end(args); \
49 return ret; \
50} 46}
51DEFINE_PRINT_FN(warning, 1)
52DEFINE_PRINT_FN(info, 1)
53DEFINE_PRINT_FN(debug, 1)
54 47
55#define EXIT_FAIL_LIBBPF EXIT_FAILURE 48#define EXIT_FAIL_LIBBPF EXIT_FAILURE
56#define EXIT_FAIL_OPTION 2 49#define EXIT_FAIL_OPTION 2
@@ -120,15 +113,14 @@ int main(int argc, char **argv)
120 int longindex = 0; 113 int longindex = 0;
121 int opt; 114 int opt;
122 115
123 libbpf_set_print(libbpf_warning, libbpf_info, NULL); 116 libbpf_set_print(libbpf_debug_print);
124 117
125 /* Parse commands line args */ 118 /* Parse commands line args */
126 while ((opt = getopt_long(argc, argv, "hDq", 119 while ((opt = getopt_long(argc, argv, "hDq",
127 long_options, &longindex)) != -1) { 120 long_options, &longindex)) != -1) {
128 switch (opt) { 121 switch (opt) {
129 case 'D': 122 case 'D':
130 libbpf_set_print(libbpf_warning, libbpf_info, 123 debug = 1;
131 libbpf_debug);
132 break; 124 break;
133 case 'q': /* Use in scripting mode */ 125 case 'q': /* Use in scripting mode */
134 verbose = 0; 126 verbose = 0;
diff --git a/tools/testing/selftests/bpf/test_maps.c b/tools/testing/selftests/bpf/test_maps.c
index e7798dd97f4b..3c627771f965 100644
--- a/tools/testing/selftests/bpf/test_maps.c
+++ b/tools/testing/selftests/bpf/test_maps.c
@@ -45,7 +45,7 @@ static int map_flags;
45 } \ 45 } \
46}) 46})
47 47
48static void test_hashmap(int task, void *data) 48static void test_hashmap(unsigned int task, void *data)
49{ 49{
50 long long key, next_key, first_key, value; 50 long long key, next_key, first_key, value;
51 int fd; 51 int fd;
@@ -135,7 +135,7 @@ static void test_hashmap(int task, void *data)
135 close(fd); 135 close(fd);
136} 136}
137 137
138static void test_hashmap_sizes(int task, void *data) 138static void test_hashmap_sizes(unsigned int task, void *data)
139{ 139{
140 int fd, i, j; 140 int fd, i, j;
141 141
@@ -155,7 +155,7 @@ static void test_hashmap_sizes(int task, void *data)
155 } 155 }
156} 156}
157 157
158static void test_hashmap_percpu(int task, void *data) 158static void test_hashmap_percpu(unsigned int task, void *data)
159{ 159{
160 unsigned int nr_cpus = bpf_num_possible_cpus(); 160 unsigned int nr_cpus = bpf_num_possible_cpus();
161 BPF_DECLARE_PERCPU(long, value); 161 BPF_DECLARE_PERCPU(long, value);
@@ -282,7 +282,7 @@ static int helper_fill_hashmap(int max_entries)
282 return fd; 282 return fd;
283} 283}
284 284
285static void test_hashmap_walk(int task, void *data) 285static void test_hashmap_walk(unsigned int task, void *data)
286{ 286{
287 int fd, i, max_entries = 1000; 287 int fd, i, max_entries = 1000;
288 long long key, value, next_key; 288 long long key, value, next_key;
@@ -353,7 +353,7 @@ static void test_hashmap_zero_seed(void)
353 close(second); 353 close(second);
354} 354}
355 355
356static void test_arraymap(int task, void *data) 356static void test_arraymap(unsigned int task, void *data)
357{ 357{
358 int key, next_key, fd; 358 int key, next_key, fd;
359 long long value; 359 long long value;
@@ -408,7 +408,7 @@ static void test_arraymap(int task, void *data)
408 close(fd); 408 close(fd);
409} 409}
410 410
411static void test_arraymap_percpu(int task, void *data) 411static void test_arraymap_percpu(unsigned int task, void *data)
412{ 412{
413 unsigned int nr_cpus = bpf_num_possible_cpus(); 413 unsigned int nr_cpus = bpf_num_possible_cpus();
414 BPF_DECLARE_PERCPU(long, values); 414 BPF_DECLARE_PERCPU(long, values);
@@ -504,7 +504,7 @@ static void test_arraymap_percpu_many_keys(void)
504 close(fd); 504 close(fd);
505} 505}
506 506
507static void test_devmap(int task, void *data) 507static void test_devmap(unsigned int task, void *data)
508{ 508{
509 int fd; 509 int fd;
510 __u32 key, value; 510 __u32 key, value;
@@ -519,7 +519,7 @@ static void test_devmap(int task, void *data)
519 close(fd); 519 close(fd);
520} 520}
521 521
522static void test_queuemap(int task, void *data) 522static void test_queuemap(unsigned int task, void *data)
523{ 523{
524 const int MAP_SIZE = 32; 524 const int MAP_SIZE = 32;
525 __u32 vals[MAP_SIZE + MAP_SIZE/2], val; 525 __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -577,7 +577,7 @@ static void test_queuemap(int task, void *data)
577 close(fd); 577 close(fd);
578} 578}
579 579
580static void test_stackmap(int task, void *data) 580static void test_stackmap(unsigned int task, void *data)
581{ 581{
582 const int MAP_SIZE = 32; 582 const int MAP_SIZE = 32;
583 __u32 vals[MAP_SIZE + MAP_SIZE/2], val; 583 __u32 vals[MAP_SIZE + MAP_SIZE/2], val;
@@ -642,7 +642,7 @@ static void test_stackmap(int task, void *data)
642#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o" 642#define SOCKMAP_PARSE_PROG "./sockmap_parse_prog.o"
643#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o" 643#define SOCKMAP_VERDICT_PROG "./sockmap_verdict_prog.o"
644#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o" 644#define SOCKMAP_TCP_MSG_PROG "./sockmap_tcp_msg_prog.o"
645static void test_sockmap(int tasks, void *data) 645static void test_sockmap(unsigned int tasks, void *data)
646{ 646{
647 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break; 647 struct bpf_map *bpf_map_rx, *bpf_map_tx, *bpf_map_msg, *bpf_map_break;
648 int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break; 648 int map_fd_msg = 0, map_fd_rx = 0, map_fd_tx = 0, map_fd_break;
@@ -1268,10 +1268,11 @@ static void test_map_large(void)
1268} 1268}
1269 1269
1270#define run_parallel(N, FN, DATA) \ 1270#define run_parallel(N, FN, DATA) \
1271 printf("Fork %d tasks to '" #FN "'\n", N); \ 1271 printf("Fork %u tasks to '" #FN "'\n", N); \
1272 __run_parallel(N, FN, DATA) 1272 __run_parallel(N, FN, DATA)
1273 1273
1274static void __run_parallel(int tasks, void (*fn)(int task, void *data), 1274static void __run_parallel(unsigned int tasks,
1275 void (*fn)(unsigned int task, void *data),
1275 void *data) 1276 void *data)
1276{ 1277{
1277 pid_t pid[tasks]; 1278 pid_t pid[tasks];
@@ -1312,7 +1313,7 @@ static void test_map_stress(void)
1312#define DO_UPDATE 1 1313#define DO_UPDATE 1
1313#define DO_DELETE 0 1314#define DO_DELETE 0
1314 1315
1315static void test_update_delete(int fn, void *data) 1316static void test_update_delete(unsigned int fn, void *data)
1316{ 1317{
1317 int do_update = ((int *)data)[1]; 1318 int do_update = ((int *)data)[1];
1318 int fd = ((int *)data)[0]; 1319 int fd = ((int *)data)[0];
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index d59642e70f56..84bea3985d64 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -23,6 +23,7 @@ import string
23import struct 23import struct
24import subprocess 24import subprocess
25import time 25import time
26import traceback
26 27
27logfile = None 28logfile = None
28log_level = 1 29log_level = 1
@@ -78,7 +79,9 @@ def fail(cond, msg):
78 if not cond: 79 if not cond:
79 return 80 return
80 print("FAIL: " + msg) 81 print("FAIL: " + msg)
81 log("FAIL: " + msg, "", level=1) 82 tb = "".join(traceback.extract_stack().format())
83 print(tb)
84 log("FAIL: " + msg, tb, level=1)
82 os.sys.exit(1) 85 os.sys.exit(1)
83 86
84def start_test(msg): 87def start_test(msg):
@@ -589,6 +592,15 @@ def check_verifier_log(output, reference):
589 return 592 return
590 fail(True, "Missing or incorrect message from netdevsim in verifier log") 593 fail(True, "Missing or incorrect message from netdevsim in verifier log")
591 594
595def check_multi_basic(two_xdps):
596 fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
597 fail("prog" in two_xdps, "Base program reported in multi program mode")
598 fail(len(two_xdps["attached"]) != 2,
599 "Wrong attached program count with two programs")
600 fail(two_xdps["attached"][0]["prog"]["id"] ==
601 two_xdps["attached"][1]["prog"]["id"],
602 "Offloaded and other programs have the same id")
603
592def test_spurios_extack(sim, obj, skip_hw, needle): 604def test_spurios_extack(sim, obj, skip_hw, needle):
593 res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw, 605 res = sim.cls_bpf_add_filter(obj, prio=1, handle=1, skip_hw=skip_hw,
594 include_stderr=True) 606 include_stderr=True)
@@ -600,6 +612,67 @@ def test_spurios_extack(sim, obj, skip_hw, needle):
600 include_stderr=True) 612 include_stderr=True)
601 check_no_extack(res, needle) 613 check_no_extack(res, needle)
602 614
615def test_multi_prog(sim, obj, modename, modeid):
616 start_test("Test multi-attachment XDP - %s + offload..." %
617 (modename or "default", ))
618 sim.set_xdp(obj, "offload")
619 xdp = sim.ip_link_show(xdp=True)["xdp"]
620 offloaded = sim.dfs_read("bpf_offloaded_id")
621 fail("prog" not in xdp, "Base program not reported in single program mode")
622 fail(len(xdp["attached"]) != 1,
623 "Wrong attached program count with one program")
624
625 sim.set_xdp(obj, modename)
626 two_xdps = sim.ip_link_show(xdp=True)["xdp"]
627
628 fail(xdp["attached"][0] not in two_xdps["attached"],
629 "Offload program not reported after other activated")
630 check_multi_basic(two_xdps)
631
632 offloaded2 = sim.dfs_read("bpf_offloaded_id")
633 fail(offloaded != offloaded2,
634 "Offload ID changed after loading other program")
635
636 start_test("Test multi-attachment XDP - replace...")
637 ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
638 fail(ret == 0, "Replaced one of programs without -force")
639 check_extack(err, "XDP program already attached.", args)
640
641 if modename == "" or modename == "drv":
642 othermode = "" if modename == "drv" else "drv"
643 start_test("Test multi-attachment XDP - detach...")
644 ret, _, err = sim.unset_xdp(othermode, force=True,
645 fail=False, include_stderr=True)
646 fail(ret == 0, "Removed program with a bad mode")
647 check_extack(err, "program loaded with different flags.", args)
648
649 sim.unset_xdp("offload")
650 xdp = sim.ip_link_show(xdp=True)["xdp"]
651 offloaded = sim.dfs_read("bpf_offloaded_id")
652
653 fail(xdp["mode"] != modeid, "Bad mode reported after multiple programs")
654 fail("prog" not in xdp,
655 "Base program not reported after multi program mode")
656 fail(xdp["attached"][0] not in two_xdps["attached"],
657 "Offload program not reported after other activated")
658 fail(len(xdp["attached"]) != 1,
659 "Wrong attached program count with remaining programs")
660 fail(offloaded != "0", "Offload ID reported with only other program left")
661
662 start_test("Test multi-attachment XDP - reattach...")
663 sim.set_xdp(obj, "offload")
664 two_xdps = sim.ip_link_show(xdp=True)["xdp"]
665
666 fail(xdp["attached"][0] not in two_xdps["attached"],
667 "Other program not reported after offload activated")
668 check_multi_basic(two_xdps)
669
670 start_test("Test multi-attachment XDP - device remove...")
671 sim.remove()
672
673 sim = NetdevSim()
674 sim.set_ethtool_tc_offloads(True)
675 return sim
603 676
604# Parse command line 677# Parse command line
605parser = argparse.ArgumentParser() 678parser = argparse.ArgumentParser()
@@ -842,7 +915,9 @@ try:
842 ret, _, err = sim.set_xdp(obj, "generic", force=True, 915 ret, _, err = sim.set_xdp(obj, "generic", force=True,
843 fail=False, include_stderr=True) 916 fail=False, include_stderr=True)
844 fail(ret == 0, "Replaced XDP program with a program in different mode") 917 fail(ret == 0, "Replaced XDP program with a program in different mode")
845 fail(err.count("File exists") != 1, "Replaced driver XDP with generic") 918 check_extack(err,
919 "native and generic XDP can't be active at the same time.",
920 args)
846 ret, _, err = sim.set_xdp(obj, "", force=True, 921 ret, _, err = sim.set_xdp(obj, "", force=True,
847 fail=False, include_stderr=True) 922 fail=False, include_stderr=True)
848 fail(ret == 0, "Replaced XDP program with a program in different mode") 923 fail(ret == 0, "Replaced XDP program with a program in different mode")
@@ -931,59 +1006,9 @@ try:
931 rm(pin_file) 1006 rm(pin_file)
932 bpftool_prog_list_wait(expected=0) 1007 bpftool_prog_list_wait(expected=0)
933 1008
934 start_test("Test multi-attachment XDP - attach...") 1009 sim = test_multi_prog(sim, obj, "", 1)
935 sim.set_xdp(obj, "offload") 1010 sim = test_multi_prog(sim, obj, "drv", 1)
936 xdp = sim.ip_link_show(xdp=True)["xdp"] 1011 sim = test_multi_prog(sim, obj, "generic", 2)
937 offloaded = sim.dfs_read("bpf_offloaded_id")
938 fail("prog" not in xdp, "Base program not reported in single program mode")
939 fail(len(ipl["xdp"]["attached"]) != 1,
940 "Wrong attached program count with one program")
941
942 sim.set_xdp(obj, "")
943 two_xdps = sim.ip_link_show(xdp=True)["xdp"]
944 offloaded2 = sim.dfs_read("bpf_offloaded_id")
945
946 fail(two_xdps["mode"] != 4, "Bad mode reported with multiple programs")
947 fail("prog" in two_xdps, "Base program reported in multi program mode")
948 fail(xdp["attached"][0] not in two_xdps["attached"],
949 "Offload program not reported after driver activated")
950 fail(len(two_xdps["attached"]) != 2,
951 "Wrong attached program count with two programs")
952 fail(two_xdps["attached"][0]["prog"]["id"] ==
953 two_xdps["attached"][1]["prog"]["id"],
954 "offloaded and drv programs have the same id")
955 fail(offloaded != offloaded2,
956 "offload ID changed after loading driver program")
957
958 start_test("Test multi-attachment XDP - replace...")
959 ret, _, err = sim.set_xdp(obj, "offload", fail=False, include_stderr=True)
960 fail(err.count("busy") != 1, "Replaced one of programs without -force")
961
962 start_test("Test multi-attachment XDP - detach...")
963 ret, _, err = sim.unset_xdp("drv", force=True,
964 fail=False, include_stderr=True)
965 fail(ret == 0, "Removed program with a bad mode")
966 check_extack(err, "program loaded with different flags.", args)
967
968 sim.unset_xdp("offload")
969 xdp = sim.ip_link_show(xdp=True)["xdp"]
970 offloaded = sim.dfs_read("bpf_offloaded_id")
971
972 fail(xdp["mode"] != 1, "Bad mode reported after multiple programs")
973 fail("prog" not in xdp,
974 "Base program not reported after multi program mode")
975 fail(xdp["attached"][0] not in two_xdps["attached"],
976 "Offload program not reported after driver activated")
977 fail(len(ipl["xdp"]["attached"]) != 1,
978 "Wrong attached program count with remaining programs")
979 fail(offloaded != "0", "offload ID reported with only driver program left")
980
981 start_test("Test multi-attachment XDP - device remove...")
982 sim.set_xdp(obj, "offload")
983 sim.remove()
984
985 sim = NetdevSim()
986 sim.set_ethtool_tc_offloads(True)
987 1012
988 start_test("Test mixing of TC and XDP...") 1013 start_test("Test mixing of TC and XDP...")
989 sim.tc_add_ingress() 1014 sim.tc_add_ingress()
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index a08d026ac396..c52bd90fbb34 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -10,6 +10,7 @@
10#include <string.h> 10#include <string.h>
11#include <assert.h> 11#include <assert.h>
12#include <stdlib.h> 12#include <stdlib.h>
13#include <stdarg.h>
13#include <time.h> 14#include <time.h>
14 15
15#include <linux/types.h> 16#include <linux/types.h>
@@ -1783,6 +1784,15 @@ static void test_task_fd_query_tp(void)
1783 "sys_enter_read"); 1784 "sys_enter_read");
1784} 1785}
1785 1786
1787static int libbpf_debug_print(enum libbpf_print_level level,
1788 const char *format, va_list args)
1789{
1790 if (level == LIBBPF_DEBUG)
1791 return 0;
1792
1793 return vfprintf(stderr, format, args);
1794}
1795
1786static void test_reference_tracking() 1796static void test_reference_tracking()
1787{ 1797{
1788 const char *file = "./test_sk_lookup_kern.o"; 1798 const char *file = "./test_sk_lookup_kern.o";
@@ -1809,9 +1819,9 @@ static void test_reference_tracking()
1809 1819
1810 /* Expect verifier failure if test name has 'fail' */ 1820 /* Expect verifier failure if test name has 'fail' */
1811 if (strstr(title, "fail") != NULL) { 1821 if (strstr(title, "fail") != NULL) {
1812 libbpf_set_print(NULL, NULL, NULL); 1822 libbpf_set_print(NULL);
1813 err = !bpf_program__load(prog, "GPL", 0); 1823 err = !bpf_program__load(prog, "GPL", 0);
1814 libbpf_set_print(printf, printf, NULL); 1824 libbpf_set_print(libbpf_debug_print);
1815 } else { 1825 } else {
1816 err = bpf_program__load(prog, "GPL", 0); 1826 err = bpf_program__load(prog, "GPL", 0);
1817 } 1827 }
diff --git a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
index b0195770da6a..c6c69220a569 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_sk_msg.c
@@ -100,6 +100,7 @@
100 .errstr = "invalid bpf_context access", 100 .errstr = "invalid bpf_context access",
101 .result = REJECT, 101 .result = REJECT,
102 .prog_type = BPF_PROG_TYPE_SK_MSG, 102 .prog_type = BPF_PROG_TYPE_SK_MSG,
103 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
103}, 104},
104{ 105{
105 "invalid read past end of SK_MSG", 106 "invalid read past end of SK_MSG",
diff --git a/tools/testing/selftests/bpf/verifier/ctx_skb.c b/tools/testing/selftests/bpf/verifier/ctx_skb.c
index 881f1c7f57a1..c660deb582f1 100644
--- a/tools/testing/selftests/bpf/verifier/ctx_skb.c
+++ b/tools/testing/selftests/bpf/verifier/ctx_skb.c
@@ -687,6 +687,7 @@
687 }, 687 },
688 .errstr = "invalid bpf_context access", 688 .errstr = "invalid bpf_context access",
689 .result = REJECT, 689 .result = REJECT,
690 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
690}, 691},
691{ 692{
692 "check skb->hash half load not permitted, unaligned 3", 693 "check skb->hash half load not permitted, unaligned 3",
diff --git a/tools/testing/selftests/bpf/verifier/jmp32.c b/tools/testing/selftests/bpf/verifier/jmp32.c
index ceb39ffa0e88..f0961c58581e 100644
--- a/tools/testing/selftests/bpf/verifier/jmp32.c
+++ b/tools/testing/selftests/bpf/verifier/jmp32.c
@@ -27,6 +27,7 @@
27 .data64 = { 1ULL << 63 | 1, } 27 .data64 = { 1ULL << 63 | 1, }
28 }, 28 },
29 }, 29 },
30 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
30}, 31},
31{ 32{
32 "jset32: BPF_X", 33 "jset32: BPF_X",
@@ -58,6 +59,7 @@
58 .data64 = { 1ULL << 63 | 1, } 59 .data64 = { 1ULL << 63 | 1, }
59 }, 60 },
60 }, 61 },
62 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
61}, 63},
62{ 64{
63 "jset32: min/max deduction", 65 "jset32: min/max deduction",
@@ -93,6 +95,7 @@
93 .data64 = { -1, } 95 .data64 = { -1, }
94 }, 96 },
95 }, 97 },
98 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
96}, 99},
97{ 100{
98 "jeq32: BPF_X", 101 "jeq32: BPF_X",
@@ -119,6 +122,7 @@
119 .data64 = { 1ULL << 63 | 1, } 122 .data64 = { 1ULL << 63 | 1, }
120 }, 123 },
121 }, 124 },
125 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
122}, 126},
123{ 127{
124 "jeq32: min/max deduction", 128 "jeq32: min/max deduction",
@@ -154,6 +158,7 @@
154 .data64 = { -1, } 158 .data64 = { -1, }
155 }, 159 },
156 }, 160 },
161 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
157}, 162},
158{ 163{
159 "jne32: BPF_X", 164 "jne32: BPF_X",
@@ -180,6 +185,7 @@
180 .data64 = { 1ULL << 63 | 2, } 185 .data64 = { 1ULL << 63 | 2, }
181 }, 186 },
182 }, 187 },
188 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
183}, 189},
184{ 190{
185 "jne32: min/max deduction", 191 "jne32: min/max deduction",
@@ -218,6 +224,7 @@
218 .data64 = { 0, } 224 .data64 = { 0, }
219 }, 225 },
220 }, 226 },
227 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
221}, 228},
222{ 229{
223 "jge32: BPF_X", 230 "jge32: BPF_X",
@@ -244,6 +251,7 @@
244 .data64 = { (UINT_MAX - 1) | 2ULL << 32, } 251 .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
245 }, 252 },
246 }, 253 },
254 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
247}, 255},
248{ 256{
249 "jge32: min/max deduction", 257 "jge32: min/max deduction",
@@ -284,6 +292,7 @@
284 .data64 = { 0, } 292 .data64 = { 0, }
285 }, 293 },
286 }, 294 },
295 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
287}, 296},
288{ 297{
289 "jgt32: BPF_X", 298 "jgt32: BPF_X",
@@ -310,6 +319,7 @@
310 .data64 = { (UINT_MAX - 1) | 2ULL << 32, } 319 .data64 = { (UINT_MAX - 1) | 2ULL << 32, }
311 }, 320 },
312 }, 321 },
322 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
313}, 323},
314{ 324{
315 "jgt32: min/max deduction", 325 "jgt32: min/max deduction",
@@ -350,6 +360,7 @@
350 .data64 = { INT_MAX, } 360 .data64 = { INT_MAX, }
351 }, 361 },
352 }, 362 },
363 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
353}, 364},
354{ 365{
355 "jle32: BPF_X", 366 "jle32: BPF_X",
@@ -376,6 +387,7 @@
376 .data64 = { UINT_MAX, } 387 .data64 = { UINT_MAX, }
377 }, 388 },
378 }, 389 },
390 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
379}, 391},
380{ 392{
381 "jle32: min/max deduction", 393 "jle32: min/max deduction",
@@ -416,6 +428,7 @@
416 .data64 = { INT_MAX - 1, } 428 .data64 = { INT_MAX - 1, }
417 }, 429 },
418 }, 430 },
431 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
419}, 432},
420{ 433{
421 "jlt32: BPF_X", 434 "jlt32: BPF_X",
@@ -442,6 +455,7 @@
442 .data64 = { (INT_MAX - 1) | 3ULL << 32, } 455 .data64 = { (INT_MAX - 1) | 3ULL << 32, }
443 }, 456 },
444 }, 457 },
458 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
445}, 459},
446{ 460{
447 "jlt32: min/max deduction", 461 "jlt32: min/max deduction",
@@ -482,6 +496,7 @@
482 .data64 = { -2, } 496 .data64 = { -2, }
483 }, 497 },
484 }, 498 },
499 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
485}, 500},
486{ 501{
487 "jsge32: BPF_X", 502 "jsge32: BPF_X",
@@ -508,6 +523,7 @@
508 .data64 = { -2, } 523 .data64 = { -2, }
509 }, 524 },
510 }, 525 },
526 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
511}, 527},
512{ 528{
513 "jsge32: min/max deduction", 529 "jsge32: min/max deduction",
@@ -548,6 +564,7 @@
548 .data64 = { 1, } 564 .data64 = { 1, }
549 }, 565 },
550 }, 566 },
567 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
551}, 568},
552{ 569{
553 "jsgt32: BPF_X", 570 "jsgt32: BPF_X",
@@ -574,6 +591,7 @@
574 .data64 = { 0x7fffffff, } 591 .data64 = { 0x7fffffff, }
575 }, 592 },
576 }, 593 },
594 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
577}, 595},
578{ 596{
579 "jsgt32: min/max deduction", 597 "jsgt32: min/max deduction",
@@ -614,6 +632,7 @@
614 .data64 = { 1, } 632 .data64 = { 1, }
615 }, 633 },
616 }, 634 },
635 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
617}, 636},
618{ 637{
619 "jsle32: BPF_X", 638 "jsle32: BPF_X",
@@ -640,6 +659,7 @@
640 .data64 = { 0x7fffffff | 2ULL << 32, } 659 .data64 = { 0x7fffffff | 2ULL << 32, }
641 }, 660 },
642 }, 661 },
662 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
643}, 663},
644{ 664{
645 "jsle32: min/max deduction", 665 "jsle32: min/max deduction",
@@ -680,6 +700,7 @@
680 .data64 = { 1, } 700 .data64 = { 1, }
681 }, 701 },
682 }, 702 },
703 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
683}, 704},
684{ 705{
685 "jslt32: BPF_X", 706 "jslt32: BPF_X",
@@ -706,6 +727,7 @@
706 .data64 = { 0x7fffffff | 2ULL << 32, } 727 .data64 = { 0x7fffffff | 2ULL << 32, }
707 }, 728 },
708 }, 729 },
730 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
709}, 731},
710{ 732{
711 "jslt32: min/max deduction", 733 "jslt32: min/max deduction",
diff --git a/tools/testing/selftests/bpf/verifier/jset.c b/tools/testing/selftests/bpf/verifier/jset.c
index 7e14037acfaf..8dcd4e0383d5 100644
--- a/tools/testing/selftests/bpf/verifier/jset.c
+++ b/tools/testing/selftests/bpf/verifier/jset.c
@@ -53,6 +53,7 @@
53 .data64 = { ~0ULL, } 53 .data64 = { ~0ULL, }
54 }, 54 },
55 }, 55 },
56 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
56}, 57},
57{ 58{
58 "jset: sign-extend", 59 "jset: sign-extend",
@@ -70,6 +71,7 @@
70 .result = ACCEPT, 71 .result = ACCEPT,
71 .retval = 2, 72 .retval = 2,
72 .data = { 1, 0, 0, 0, 0, 0, 0, 1, }, 73 .data = { 1, 0, 0, 0, 0, 0, 0, 1, },
74 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
73}, 75},
74{ 76{
75 "jset: known const compare", 77 "jset: known const compare",
diff --git a/tools/testing/selftests/bpf/verifier/spill_fill.c b/tools/testing/selftests/bpf/verifier/spill_fill.c
index d58db72fdfe8..45d43bf82f26 100644
--- a/tools/testing/selftests/bpf/verifier/spill_fill.c
+++ b/tools/testing/selftests/bpf/verifier/spill_fill.c
@@ -46,6 +46,7 @@
46 .errstr_unpriv = "attempt to corrupt spilled", 46 .errstr_unpriv = "attempt to corrupt spilled",
47 .errstr = "R0 invalid mem access 'inv", 47 .errstr = "R0 invalid mem access 'inv",
48 .result = REJECT, 48 .result = REJECT,
49 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
49}, 50},
50{ 51{
51 "check corrupted spill/fill, LSB", 52 "check corrupted spill/fill, LSB",
diff --git a/tools/testing/selftests/bpf/verifier/spin_lock.c b/tools/testing/selftests/bpf/verifier/spin_lock.c
index d829eef372a4..781621facae4 100644
--- a/tools/testing/selftests/bpf/verifier/spin_lock.c
+++ b/tools/testing/selftests/bpf/verifier/spin_lock.c
@@ -83,6 +83,7 @@
83 .result_unpriv = REJECT, 83 .result_unpriv = REJECT,
84 .errstr_unpriv = "", 84 .errstr_unpriv = "",
85 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 85 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
86 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
86}, 87},
87{ 88{
88 "spin_lock: test4 direct ld/st", 89 "spin_lock: test4 direct ld/st",
@@ -112,6 +113,7 @@
112 .result_unpriv = REJECT, 113 .result_unpriv = REJECT,
113 .errstr_unpriv = "", 114 .errstr_unpriv = "",
114 .prog_type = BPF_PROG_TYPE_CGROUP_SKB, 115 .prog_type = BPF_PROG_TYPE_CGROUP_SKB,
116 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
115}, 117},
116{ 118{
117 "spin_lock: test5 call within a locked region", 119 "spin_lock: test5 call within a locked region",
diff --git a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
index 9ab5ace83e02..4b721a77bebb 100644
--- a/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
+++ b/tools/testing/selftests/bpf/verifier/value_ptr_arith.c
@@ -512,6 +512,7 @@
512 .fixup_map_array_48b = { 3 }, 512 .fixup_map_array_48b = { 3 },
513 .result = ACCEPT, 513 .result = ACCEPT,
514 .retval = 0xabcdef12, 514 .retval = 0xabcdef12,
515 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
515}, 516},
516{ 517{
517 "map access: unknown scalar += value_ptr, 3", 518 "map access: unknown scalar += value_ptr, 3",
@@ -537,6 +538,7 @@
537 .result_unpriv = REJECT, 538 .result_unpriv = REJECT,
538 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range", 539 .errstr_unpriv = "R0 pointer arithmetic of map value goes out of range",
539 .retval = 0xabcdef12, 540 .retval = 0xabcdef12,
541 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
540}, 542},
541{ 543{
542 "map access: unknown scalar += value_ptr, 4", 544 "map access: unknown scalar += value_ptr, 4",
@@ -559,6 +561,7 @@
559 .result = REJECT, 561 .result = REJECT,
560 .errstr = "R1 max value is outside of the array range", 562 .errstr = "R1 max value is outside of the array range",
561 .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range", 563 .errstr_unpriv = "R1 pointer arithmetic of map value goes out of range",
564 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
562}, 565},
563{ 566{
564 "map access: value_ptr += unknown scalar, 1", 567 "map access: value_ptr += unknown scalar, 1",
@@ -598,6 +601,7 @@
598 .fixup_map_array_48b = { 3 }, 601 .fixup_map_array_48b = { 3 },
599 .result = ACCEPT, 602 .result = ACCEPT,
600 .retval = 0xabcdef12, 603 .retval = 0xabcdef12,
604 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
601}, 605},
602{ 606{
603 "map access: value_ptr += unknown scalar, 3", 607 "map access: value_ptr += unknown scalar, 3",