aboutsummaryrefslogtreecommitdiffstats
path: root/tools/lib/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/lib/bpf')
-rw-r--r--tools/lib/bpf/Build4
-rw-r--r--tools/lib/bpf/Makefile12
-rw-r--r--tools/lib/bpf/bpf.c1
-rw-r--r--tools/lib/bpf/bpf.h1
-rw-r--r--tools/lib/bpf/btf.c329
-rw-r--r--tools/lib/bpf/btf.h19
-rw-r--r--tools/lib/bpf/btf_dump.c1336
-rw-r--r--tools/lib/bpf/hashmap.c229
-rw-r--r--tools/lib/bpf/hashmap.h173
-rw-r--r--tools/lib/bpf/libbpf.c175
-rw-r--r--tools/lib/bpf/libbpf.h7
-rw-r--r--tools/lib/bpf/libbpf.map9
-rw-r--r--tools/lib/bpf/libbpf_internal.h2
13 files changed, 2085 insertions, 212 deletions
diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build
index ee9d5362f35b..e3962cfbc9a6 100644
--- a/tools/lib/bpf/Build
+++ b/tools/lib/bpf/Build
@@ -1 +1,3 @@
1libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o 1libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \
2 netlink.o bpf_prog_linfo.o libbpf_probes.o xsk.o hashmap.o \
3 btf_dump.o
diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile
index f91639bf5650..9312066a1ae3 100644
--- a/tools/lib/bpf/Makefile
+++ b/tools/lib/bpf/Makefile
@@ -3,7 +3,7 @@
3 3
4BPF_VERSION = 0 4BPF_VERSION = 0
5BPF_PATCHLEVEL = 0 5BPF_PATCHLEVEL = 0
6BPF_EXTRAVERSION = 3 6BPF_EXTRAVERSION = 4
7 7
8MAKEFLAGS += --no-print-directory 8MAKEFLAGS += --no-print-directory
9 9
@@ -204,6 +204,16 @@ check_abi: $(OUTPUT)libbpf.so
204 "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \ 204 "versioned symbols in $^ ($(VERSIONED_SYM_COUNT))." \
205 "Please make sure all LIBBPF_API symbols are" \ 205 "Please make sure all LIBBPF_API symbols are" \
206 "versioned in $(VERSION_SCRIPT)." >&2; \ 206 "versioned in $(VERSION_SCRIPT)." >&2; \
207 readelf -s --wide $(OUTPUT)libbpf-in.o | \
208 awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$8}'| \
209 sort -u > $(OUTPUT)libbpf_global_syms.tmp; \
210 readelf -s --wide $(OUTPUT)libbpf.so | \
211 grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \
212 sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \
213 diff -u $(OUTPUT)libbpf_global_syms.tmp \
214 $(OUTPUT)libbpf_versioned_syms.tmp; \
215 rm $(OUTPUT)libbpf_global_syms.tmp \
216 $(OUTPUT)libbpf_versioned_syms.tmp; \
207 exit 1; \ 217 exit 1; \
208 fi 218 fi
209 219
diff --git a/tools/lib/bpf/bpf.c b/tools/lib/bpf/bpf.c
index c4a48086dc9a..0d4b4fe10a84 100644
--- a/tools/lib/bpf/bpf.c
+++ b/tools/lib/bpf/bpf.c
@@ -256,6 +256,7 @@ int bpf_load_program_xattr(const struct bpf_load_program_attr *load_attr,
256 if (load_attr->name) 256 if (load_attr->name)
257 memcpy(attr.prog_name, load_attr->name, 257 memcpy(attr.prog_name, load_attr->name,
258 min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1)); 258 min(strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
259 attr.prog_flags = load_attr->prog_flags;
259 260
260 fd = sys_bpf_prog_load(&attr, sizeof(attr)); 261 fd = sys_bpf_prog_load(&attr, sizeof(attr));
261 if (fd >= 0) 262 if (fd >= 0)
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h
index 9593fec75652..ff42ca043dc8 100644
--- a/tools/lib/bpf/bpf.h
+++ b/tools/lib/bpf/bpf.h
@@ -87,6 +87,7 @@ struct bpf_load_program_attr {
87 const void *line_info; 87 const void *line_info;
88 __u32 line_info_cnt; 88 __u32 line_info_cnt;
89 __u32 log_level; 89 __u32 log_level;
90 __u32 prog_flags;
90}; 91};
91 92
92/* Flags to direct loading requirements */ 93/* Flags to direct loading requirements */
diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c
index 03348c4d6bd4..b2478e98c367 100644
--- a/tools/lib/bpf/btf.c
+++ b/tools/lib/bpf/btf.c
@@ -4,14 +4,17 @@
4#include <stdio.h> 4#include <stdio.h>
5#include <stdlib.h> 5#include <stdlib.h>
6#include <string.h> 6#include <string.h>
7#include <fcntl.h>
7#include <unistd.h> 8#include <unistd.h>
8#include <errno.h> 9#include <errno.h>
9#include <linux/err.h> 10#include <linux/err.h>
10#include <linux/btf.h> 11#include <linux/btf.h>
12#include <gelf.h>
11#include "btf.h" 13#include "btf.h"
12#include "bpf.h" 14#include "bpf.h"
13#include "libbpf.h" 15#include "libbpf.h"
14#include "libbpf_internal.h" 16#include "libbpf_internal.h"
17#include "hashmap.h"
15 18
16#define max(a, b) ((a) > (b) ? (a) : (b)) 19#define max(a, b) ((a) > (b) ? (a) : (b))
17#define min(a, b) ((a) < (b) ? (a) : (b)) 20#define min(a, b) ((a) < (b) ? (a) : (b))
@@ -417,6 +420,132 @@ done:
417 return btf; 420 return btf;
418} 421}
419 422
423static bool btf_check_endianness(const GElf_Ehdr *ehdr)
424{
425#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
426 return ehdr->e_ident[EI_DATA] == ELFDATA2LSB;
427#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
428 return ehdr->e_ident[EI_DATA] == ELFDATA2MSB;
429#else
430# error "Unrecognized __BYTE_ORDER__"
431#endif
432}
433
434struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext)
435{
436 Elf_Data *btf_data = NULL, *btf_ext_data = NULL;
437 int err = 0, fd = -1, idx = 0;
438 struct btf *btf = NULL;
439 Elf_Scn *scn = NULL;
440 Elf *elf = NULL;
441 GElf_Ehdr ehdr;
442
443 if (elf_version(EV_CURRENT) == EV_NONE) {
444 pr_warning("failed to init libelf for %s\n", path);
445 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
446 }
447
448 fd = open(path, O_RDONLY);
449 if (fd < 0) {
450 err = -errno;
451 pr_warning("failed to open %s: %s\n", path, strerror(errno));
452 return ERR_PTR(err);
453 }
454
455 err = -LIBBPF_ERRNO__FORMAT;
456
457 elf = elf_begin(fd, ELF_C_READ, NULL);
458 if (!elf) {
459 pr_warning("failed to open %s as ELF file\n", path);
460 goto done;
461 }
462 if (!gelf_getehdr(elf, &ehdr)) {
463 pr_warning("failed to get EHDR from %s\n", path);
464 goto done;
465 }
466 if (!btf_check_endianness(&ehdr)) {
467 pr_warning("non-native ELF endianness is not supported\n");
468 goto done;
469 }
470 if (!elf_rawdata(elf_getscn(elf, ehdr.e_shstrndx), NULL)) {
471 pr_warning("failed to get e_shstrndx from %s\n", path);
472 goto done;
473 }
474
475 while ((scn = elf_nextscn(elf, scn)) != NULL) {
476 GElf_Shdr sh;
477 char *name;
478
479 idx++;
480 if (gelf_getshdr(scn, &sh) != &sh) {
481 pr_warning("failed to get section(%d) header from %s\n",
482 idx, path);
483 goto done;
484 }
485 name = elf_strptr(elf, ehdr.e_shstrndx, sh.sh_name);
486 if (!name) {
487 pr_warning("failed to get section(%d) name from %s\n",
488 idx, path);
489 goto done;
490 }
491 if (strcmp(name, BTF_ELF_SEC) == 0) {
492 btf_data = elf_getdata(scn, 0);
493 if (!btf_data) {
494 pr_warning("failed to get section(%d, %s) data from %s\n",
495 idx, name, path);
496 goto done;
497 }
498 continue;
499 } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) {
500 btf_ext_data = elf_getdata(scn, 0);
501 if (!btf_ext_data) {
502 pr_warning("failed to get section(%d, %s) data from %s\n",
503 idx, name, path);
504 goto done;
505 }
506 continue;
507 }
508 }
509
510 err = 0;
511
512 if (!btf_data) {
513 err = -ENOENT;
514 goto done;
515 }
516 btf = btf__new(btf_data->d_buf, btf_data->d_size);
517 if (IS_ERR(btf))
518 goto done;
519
520 if (btf_ext && btf_ext_data) {
521 *btf_ext = btf_ext__new(btf_ext_data->d_buf,
522 btf_ext_data->d_size);
523 if (IS_ERR(*btf_ext))
524 goto done;
525 } else if (btf_ext) {
526 *btf_ext = NULL;
527 }
528done:
529 if (elf)
530 elf_end(elf);
531 close(fd);
532
533 if (err)
534 return ERR_PTR(err);
535 /*
536 * btf is always parsed before btf_ext, so no need to clean up
537 * btf_ext, if btf loading failed
538 */
539 if (IS_ERR(btf))
540 return btf;
541 if (btf_ext && IS_ERR(*btf_ext)) {
542 btf__free(btf);
543 err = PTR_ERR(*btf_ext);
544 return ERR_PTR(err);
545 }
546 return btf;
547}
548
420static int compare_vsi_off(const void *_a, const void *_b) 549static int compare_vsi_off(const void *_a, const void *_b)
421{ 550{
422 const struct btf_var_secinfo *a = _a; 551 const struct btf_var_secinfo *a = _a;
@@ -1165,16 +1294,9 @@ done:
1165 return err; 1294 return err;
1166} 1295}
1167 1296
1168#define BTF_DEDUP_TABLE_DEFAULT_SIZE (1 << 14)
1169#define BTF_DEDUP_TABLE_MAX_SIZE_LOG 31
1170#define BTF_UNPROCESSED_ID ((__u32)-1) 1297#define BTF_UNPROCESSED_ID ((__u32)-1)
1171#define BTF_IN_PROGRESS_ID ((__u32)-2) 1298#define BTF_IN_PROGRESS_ID ((__u32)-2)
1172 1299
1173struct btf_dedup_node {
1174 struct btf_dedup_node *next;
1175 __u32 type_id;
1176};
1177
1178struct btf_dedup { 1300struct btf_dedup {
1179 /* .BTF section to be deduped in-place */ 1301 /* .BTF section to be deduped in-place */
1180 struct btf *btf; 1302 struct btf *btf;
@@ -1190,7 +1312,7 @@ struct btf_dedup {
1190 * candidates, which is fine because we rely on subsequent 1312 * candidates, which is fine because we rely on subsequent
1191 * btf_xxx_equal() checks to authoritatively verify type equality. 1313 * btf_xxx_equal() checks to authoritatively verify type equality.
1192 */ 1314 */
1193 struct btf_dedup_node **dedup_table; 1315 struct hashmap *dedup_table;
1194 /* Canonical types map */ 1316 /* Canonical types map */
1195 __u32 *map; 1317 __u32 *map;
1196 /* Hypothetical mapping, used during type graph equivalence checks */ 1318 /* Hypothetical mapping, used during type graph equivalence checks */
@@ -1215,30 +1337,18 @@ struct btf_str_ptrs {
1215 __u32 cap; 1337 __u32 cap;
1216}; 1338};
1217 1339
1218static inline __u32 hash_combine(__u32 h, __u32 value) 1340static long hash_combine(long h, long value)
1219{ 1341{
1220/* 2^31 + 2^29 - 2^25 + 2^22 - 2^19 - 2^16 + 1 */ 1342 return h * 31 + value;
1221#define GOLDEN_RATIO_PRIME 0x9e370001UL
1222 return h * 37 + value * GOLDEN_RATIO_PRIME;
1223#undef GOLDEN_RATIO_PRIME
1224} 1343}
1225 1344
1226#define for_each_dedup_cand(d, hash, node) \ 1345#define for_each_dedup_cand(d, node, hash) \
1227 for (node = d->dedup_table[hash & (d->opts.dedup_table_size - 1)]; \ 1346 hashmap__for_each_key_entry(d->dedup_table, node, (void *)hash)
1228 node; \
1229 node = node->next)
1230 1347
1231static int btf_dedup_table_add(struct btf_dedup *d, __u32 hash, __u32 type_id) 1348static int btf_dedup_table_add(struct btf_dedup *d, long hash, __u32 type_id)
1232{ 1349{
1233 struct btf_dedup_node *node = malloc(sizeof(struct btf_dedup_node)); 1350 return hashmap__append(d->dedup_table,
1234 int bucket = hash & (d->opts.dedup_table_size - 1); 1351 (void *)hash, (void *)(long)type_id);
1235
1236 if (!node)
1237 return -ENOMEM;
1238 node->type_id = type_id;
1239 node->next = d->dedup_table[bucket];
1240 d->dedup_table[bucket] = node;
1241 return 0;
1242} 1352}
1243 1353
1244static int btf_dedup_hypot_map_add(struct btf_dedup *d, 1354static int btf_dedup_hypot_map_add(struct btf_dedup *d,
@@ -1267,36 +1377,10 @@ static void btf_dedup_clear_hypot_map(struct btf_dedup *d)
1267 d->hypot_cnt = 0; 1377 d->hypot_cnt = 0;
1268} 1378}
1269 1379
1270static void btf_dedup_table_free(struct btf_dedup *d)
1271{
1272 struct btf_dedup_node *head, *tmp;
1273 int i;
1274
1275 if (!d->dedup_table)
1276 return;
1277
1278 for (i = 0; i < d->opts.dedup_table_size; i++) {
1279 while (d->dedup_table[i]) {
1280 tmp = d->dedup_table[i];
1281 d->dedup_table[i] = tmp->next;
1282 free(tmp);
1283 }
1284
1285 head = d->dedup_table[i];
1286 while (head) {
1287 tmp = head;
1288 head = head->next;
1289 free(tmp);
1290 }
1291 }
1292
1293 free(d->dedup_table);
1294 d->dedup_table = NULL;
1295}
1296
1297static void btf_dedup_free(struct btf_dedup *d) 1380static void btf_dedup_free(struct btf_dedup *d)
1298{ 1381{
1299 btf_dedup_table_free(d); 1382 hashmap__free(d->dedup_table);
1383 d->dedup_table = NULL;
1300 1384
1301 free(d->map); 1385 free(d->map);
1302 d->map = NULL; 1386 d->map = NULL;
@@ -1310,40 +1394,43 @@ static void btf_dedup_free(struct btf_dedup *d)
1310 free(d); 1394 free(d);
1311} 1395}
1312 1396
1313/* Find closest power of two >= to size, capped at 2^max_size_log */ 1397static size_t btf_dedup_identity_hash_fn(const void *key, void *ctx)
1314static __u32 roundup_pow2_max(__u32 size, int max_size_log)
1315{ 1398{
1316 int i; 1399 return (size_t)key;
1400}
1317 1401
1318 for (i = 0; i < max_size_log && (1U << i) < size; i++) 1402static size_t btf_dedup_collision_hash_fn(const void *key, void *ctx)
1319 ; 1403{
1320 return 1U << i; 1404 return 0;
1321} 1405}
1322 1406
1407static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
1408{
1409 return k1 == k2;
1410}
1323 1411
1324static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext, 1412static struct btf_dedup *btf_dedup_new(struct btf *btf, struct btf_ext *btf_ext,
1325 const struct btf_dedup_opts *opts) 1413 const struct btf_dedup_opts *opts)
1326{ 1414{
1327 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup)); 1415 struct btf_dedup *d = calloc(1, sizeof(struct btf_dedup));
1416 hashmap_hash_fn hash_fn = btf_dedup_identity_hash_fn;
1328 int i, err = 0; 1417 int i, err = 0;
1329 __u32 sz;
1330 1418
1331 if (!d) 1419 if (!d)
1332 return ERR_PTR(-ENOMEM); 1420 return ERR_PTR(-ENOMEM);
1333 1421
1334 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds; 1422 d->opts.dont_resolve_fwds = opts && opts->dont_resolve_fwds;
1335 sz = opts && opts->dedup_table_size ? opts->dedup_table_size 1423 /* dedup_table_size is now used only to force collisions in tests */
1336 : BTF_DEDUP_TABLE_DEFAULT_SIZE; 1424 if (opts && opts->dedup_table_size == 1)
1337 sz = roundup_pow2_max(sz, BTF_DEDUP_TABLE_MAX_SIZE_LOG); 1425 hash_fn = btf_dedup_collision_hash_fn;
1338 d->opts.dedup_table_size = sz;
1339 1426
1340 d->btf = btf; 1427 d->btf = btf;
1341 d->btf_ext = btf_ext; 1428 d->btf_ext = btf_ext;
1342 1429
1343 d->dedup_table = calloc(d->opts.dedup_table_size, 1430 d->dedup_table = hashmap__new(hash_fn, btf_dedup_equal_fn, NULL);
1344 sizeof(struct btf_dedup_node *)); 1431 if (IS_ERR(d->dedup_table)) {
1345 if (!d->dedup_table) { 1432 err = PTR_ERR(d->dedup_table);
1346 err = -ENOMEM; 1433 d->dedup_table = NULL;
1347 goto done; 1434 goto done;
1348 } 1435 }
1349 1436
@@ -1662,9 +1749,9 @@ done:
1662 return err; 1749 return err;
1663} 1750}
1664 1751
1665static __u32 btf_hash_common(struct btf_type *t) 1752static long btf_hash_common(struct btf_type *t)
1666{ 1753{
1667 __u32 h; 1754 long h;
1668 1755
1669 h = hash_combine(0, t->name_off); 1756 h = hash_combine(0, t->name_off);
1670 h = hash_combine(h, t->info); 1757 h = hash_combine(h, t->info);
@@ -1680,10 +1767,10 @@ static bool btf_equal_common(struct btf_type *t1, struct btf_type *t2)
1680} 1767}
1681 1768
1682/* Calculate type signature hash of INT. */ 1769/* Calculate type signature hash of INT. */
1683static __u32 btf_hash_int(struct btf_type *t) 1770static long btf_hash_int(struct btf_type *t)
1684{ 1771{
1685 __u32 info = *(__u32 *)(t + 1); 1772 __u32 info = *(__u32 *)(t + 1);
1686 __u32 h; 1773 long h;
1687 1774
1688 h = btf_hash_common(t); 1775 h = btf_hash_common(t);
1689 h = hash_combine(h, info); 1776 h = hash_combine(h, info);
@@ -1703,9 +1790,9 @@ static bool btf_equal_int(struct btf_type *t1, struct btf_type *t2)
1703} 1790}
1704 1791
1705/* Calculate type signature hash of ENUM. */ 1792/* Calculate type signature hash of ENUM. */
1706static __u32 btf_hash_enum(struct btf_type *t) 1793static long btf_hash_enum(struct btf_type *t)
1707{ 1794{
1708 __u32 h; 1795 long h;
1709 1796
1710 /* don't hash vlen and enum members to support enum fwd resolving */ 1797 /* don't hash vlen and enum members to support enum fwd resolving */
1711 h = hash_combine(0, t->name_off); 1798 h = hash_combine(0, t->name_off);
@@ -1757,11 +1844,11 @@ static bool btf_compat_enum(struct btf_type *t1, struct btf_type *t2)
1757 * as referenced type IDs equivalence is established separately during type 1844 * as referenced type IDs equivalence is established separately during type
1758 * graph equivalence check algorithm. 1845 * graph equivalence check algorithm.
1759 */ 1846 */
1760static __u32 btf_hash_struct(struct btf_type *t) 1847static long btf_hash_struct(struct btf_type *t)
1761{ 1848{
1762 struct btf_member *member = (struct btf_member *)(t + 1); 1849 struct btf_member *member = (struct btf_member *)(t + 1);
1763 __u32 vlen = BTF_INFO_VLEN(t->info); 1850 __u32 vlen = BTF_INFO_VLEN(t->info);
1764 __u32 h = btf_hash_common(t); 1851 long h = btf_hash_common(t);
1765 int i; 1852 int i;
1766 1853
1767 for (i = 0; i < vlen; i++) { 1854 for (i = 0; i < vlen; i++) {
@@ -1804,10 +1891,10 @@ static bool btf_shallow_equal_struct(struct btf_type *t1, struct btf_type *t2)
1804 * under assumption that they were already resolved to canonical type IDs and 1891 * under assumption that they were already resolved to canonical type IDs and
1805 * are not going to change. 1892 * are not going to change.
1806 */ 1893 */
1807static __u32 btf_hash_array(struct btf_type *t) 1894static long btf_hash_array(struct btf_type *t)
1808{ 1895{
1809 struct btf_array *info = (struct btf_array *)(t + 1); 1896 struct btf_array *info = (struct btf_array *)(t + 1);
1810 __u32 h = btf_hash_common(t); 1897 long h = btf_hash_common(t);
1811 1898
1812 h = hash_combine(h, info->type); 1899 h = hash_combine(h, info->type);
1813 h = hash_combine(h, info->index_type); 1900 h = hash_combine(h, info->index_type);
@@ -1858,11 +1945,11 @@ static bool btf_compat_array(struct btf_type *t1, struct btf_type *t2)
1858 * under assumption that they were already resolved to canonical type IDs and 1945 * under assumption that they were already resolved to canonical type IDs and
1859 * are not going to change. 1946 * are not going to change.
1860 */ 1947 */
1861static inline __u32 btf_hash_fnproto(struct btf_type *t) 1948static long btf_hash_fnproto(struct btf_type *t)
1862{ 1949{
1863 struct btf_param *member = (struct btf_param *)(t + 1); 1950 struct btf_param *member = (struct btf_param *)(t + 1);
1864 __u16 vlen = BTF_INFO_VLEN(t->info); 1951 __u16 vlen = BTF_INFO_VLEN(t->info);
1865 __u32 h = btf_hash_common(t); 1952 long h = btf_hash_common(t);
1866 int i; 1953 int i;
1867 1954
1868 for (i = 0; i < vlen; i++) { 1955 for (i = 0; i < vlen; i++) {
@@ -1880,7 +1967,7 @@ static inline __u32 btf_hash_fnproto(struct btf_type *t)
1880 * This function is called during reference types deduplication to compare 1967 * This function is called during reference types deduplication to compare
1881 * FUNC_PROTO to potential canonical representative. 1968 * FUNC_PROTO to potential canonical representative.
1882 */ 1969 */
1883static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2) 1970static bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1884{ 1971{
1885 struct btf_param *m1, *m2; 1972 struct btf_param *m1, *m2;
1886 __u16 vlen; 1973 __u16 vlen;
@@ -1906,7 +1993,7 @@ static inline bool btf_equal_fnproto(struct btf_type *t1, struct btf_type *t2)
1906 * IDs. This check is performed during type graph equivalence check and 1993 * IDs. This check is performed during type graph equivalence check and
1907 * referenced types equivalence is checked separately. 1994 * referenced types equivalence is checked separately.
1908 */ 1995 */
1909static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2) 1996static bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1910{ 1997{
1911 struct btf_param *m1, *m2; 1998 struct btf_param *m1, *m2;
1912 __u16 vlen; 1999 __u16 vlen;
@@ -1937,11 +2024,12 @@ static inline bool btf_compat_fnproto(struct btf_type *t1, struct btf_type *t2)
1937static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id) 2024static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1938{ 2025{
1939 struct btf_type *t = d->btf->types[type_id]; 2026 struct btf_type *t = d->btf->types[type_id];
2027 struct hashmap_entry *hash_entry;
1940 struct btf_type *cand; 2028 struct btf_type *cand;
1941 struct btf_dedup_node *cand_node;
1942 /* if we don't find equivalent type, then we are canonical */ 2029 /* if we don't find equivalent type, then we are canonical */
1943 __u32 new_id = type_id; 2030 __u32 new_id = type_id;
1944 __u32 h; 2031 __u32 cand_id;
2032 long h;
1945 2033
1946 switch (BTF_INFO_KIND(t->info)) { 2034 switch (BTF_INFO_KIND(t->info)) {
1947 case BTF_KIND_CONST: 2035 case BTF_KIND_CONST:
@@ -1960,10 +2048,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1960 2048
1961 case BTF_KIND_INT: 2049 case BTF_KIND_INT:
1962 h = btf_hash_int(t); 2050 h = btf_hash_int(t);
1963 for_each_dedup_cand(d, h, cand_node) { 2051 for_each_dedup_cand(d, hash_entry, h) {
1964 cand = d->btf->types[cand_node->type_id]; 2052 cand_id = (__u32)(long)hash_entry->value;
2053 cand = d->btf->types[cand_id];
1965 if (btf_equal_int(t, cand)) { 2054 if (btf_equal_int(t, cand)) {
1966 new_id = cand_node->type_id; 2055 new_id = cand_id;
1967 break; 2056 break;
1968 } 2057 }
1969 } 2058 }
@@ -1971,10 +2060,11 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1971 2060
1972 case BTF_KIND_ENUM: 2061 case BTF_KIND_ENUM:
1973 h = btf_hash_enum(t); 2062 h = btf_hash_enum(t);
1974 for_each_dedup_cand(d, h, cand_node) { 2063 for_each_dedup_cand(d, hash_entry, h) {
1975 cand = d->btf->types[cand_node->type_id]; 2064 cand_id = (__u32)(long)hash_entry->value;
2065 cand = d->btf->types[cand_id];
1976 if (btf_equal_enum(t, cand)) { 2066 if (btf_equal_enum(t, cand)) {
1977 new_id = cand_node->type_id; 2067 new_id = cand_id;
1978 break; 2068 break;
1979 } 2069 }
1980 if (d->opts.dont_resolve_fwds) 2070 if (d->opts.dont_resolve_fwds)
@@ -1982,21 +2072,22 @@ static int btf_dedup_prim_type(struct btf_dedup *d, __u32 type_id)
1982 if (btf_compat_enum(t, cand)) { 2072 if (btf_compat_enum(t, cand)) {
1983 if (btf_is_enum_fwd(t)) { 2073 if (btf_is_enum_fwd(t)) {
1984 /* resolve fwd to full enum */ 2074 /* resolve fwd to full enum */
1985 new_id = cand_node->type_id; 2075 new_id = cand_id;
1986 break; 2076 break;
1987 } 2077 }
1988 /* resolve canonical enum fwd to full enum */ 2078 /* resolve canonical enum fwd to full enum */
1989 d->map[cand_node->type_id] = type_id; 2079 d->map[cand_id] = type_id;
1990 } 2080 }
1991 } 2081 }
1992 break; 2082 break;
1993 2083
1994 case BTF_KIND_FWD: 2084 case BTF_KIND_FWD:
1995 h = btf_hash_common(t); 2085 h = btf_hash_common(t);
1996 for_each_dedup_cand(d, h, cand_node) { 2086 for_each_dedup_cand(d, hash_entry, h) {
1997 cand = d->btf->types[cand_node->type_id]; 2087 cand_id = (__u32)(long)hash_entry->value;
2088 cand = d->btf->types[cand_id];
1998 if (btf_equal_common(t, cand)) { 2089 if (btf_equal_common(t, cand)) {
1999 new_id = cand_node->type_id; 2090 new_id = cand_id;
2000 break; 2091 break;
2001 } 2092 }
2002 } 2093 }
@@ -2397,12 +2488,12 @@ static void btf_dedup_merge_hypot_map(struct btf_dedup *d)
2397 */ 2488 */
2398static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id) 2489static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2399{ 2490{
2400 struct btf_dedup_node *cand_node;
2401 struct btf_type *cand_type, *t; 2491 struct btf_type *cand_type, *t;
2492 struct hashmap_entry *hash_entry;
2402 /* if we don't find equivalent type, then we are canonical */ 2493 /* if we don't find equivalent type, then we are canonical */
2403 __u32 new_id = type_id; 2494 __u32 new_id = type_id;
2404 __u16 kind; 2495 __u16 kind;
2405 __u32 h; 2496 long h;
2406 2497
2407 /* already deduped or is in process of deduping (loop detected) */ 2498 /* already deduped or is in process of deduping (loop detected) */
2408 if (d->map[type_id] <= BTF_MAX_NR_TYPES) 2499 if (d->map[type_id] <= BTF_MAX_NR_TYPES)
@@ -2415,7 +2506,8 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2415 return 0; 2506 return 0;
2416 2507
2417 h = btf_hash_struct(t); 2508 h = btf_hash_struct(t);
2418 for_each_dedup_cand(d, h, cand_node) { 2509 for_each_dedup_cand(d, hash_entry, h) {
2510 __u32 cand_id = (__u32)(long)hash_entry->value;
2419 int eq; 2511 int eq;
2420 2512
2421 /* 2513 /*
@@ -2428,17 +2520,17 @@ static int btf_dedup_struct_type(struct btf_dedup *d, __u32 type_id)
2428 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because 2520 * creating a loop (FWD -> STRUCT and STRUCT -> FWD), because
2429 * FWD and compatible STRUCT/UNION are considered equivalent. 2521 * FWD and compatible STRUCT/UNION are considered equivalent.
2430 */ 2522 */
2431 cand_type = d->btf->types[cand_node->type_id]; 2523 cand_type = d->btf->types[cand_id];
2432 if (!btf_shallow_equal_struct(t, cand_type)) 2524 if (!btf_shallow_equal_struct(t, cand_type))
2433 continue; 2525 continue;
2434 2526
2435 btf_dedup_clear_hypot_map(d); 2527 btf_dedup_clear_hypot_map(d);
2436 eq = btf_dedup_is_equiv(d, type_id, cand_node->type_id); 2528 eq = btf_dedup_is_equiv(d, type_id, cand_id);
2437 if (eq < 0) 2529 if (eq < 0)
2438 return eq; 2530 return eq;
2439 if (!eq) 2531 if (!eq)
2440 continue; 2532 continue;
2441 new_id = cand_node->type_id; 2533 new_id = cand_id;
2442 btf_dedup_merge_hypot_map(d); 2534 btf_dedup_merge_hypot_map(d);
2443 break; 2535 break;
2444 } 2536 }
@@ -2488,12 +2580,12 @@ static int btf_dedup_struct_types(struct btf_dedup *d)
2488 */ 2580 */
2489static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id) 2581static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2490{ 2582{
2491 struct btf_dedup_node *cand_node; 2583 struct hashmap_entry *hash_entry;
2584 __u32 new_id = type_id, cand_id;
2492 struct btf_type *t, *cand; 2585 struct btf_type *t, *cand;
2493 /* if we don't find equivalent type, then we are representative type */ 2586 /* if we don't find equivalent type, then we are representative type */
2494 __u32 new_id = type_id;
2495 int ref_type_id; 2587 int ref_type_id;
2496 __u32 h; 2588 long h;
2497 2589
2498 if (d->map[type_id] == BTF_IN_PROGRESS_ID) 2590 if (d->map[type_id] == BTF_IN_PROGRESS_ID)
2499 return -ELOOP; 2591 return -ELOOP;
@@ -2516,10 +2608,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2516 t->type = ref_type_id; 2608 t->type = ref_type_id;
2517 2609
2518 h = btf_hash_common(t); 2610 h = btf_hash_common(t);
2519 for_each_dedup_cand(d, h, cand_node) { 2611 for_each_dedup_cand(d, hash_entry, h) {
2520 cand = d->btf->types[cand_node->type_id]; 2612 cand_id = (__u32)(long)hash_entry->value;
2613 cand = d->btf->types[cand_id];
2521 if (btf_equal_common(t, cand)) { 2614 if (btf_equal_common(t, cand)) {
2522 new_id = cand_node->type_id; 2615 new_id = cand_id;
2523 break; 2616 break;
2524 } 2617 }
2525 } 2618 }
@@ -2539,10 +2632,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2539 info->index_type = ref_type_id; 2632 info->index_type = ref_type_id;
2540 2633
2541 h = btf_hash_array(t); 2634 h = btf_hash_array(t);
2542 for_each_dedup_cand(d, h, cand_node) { 2635 for_each_dedup_cand(d, hash_entry, h) {
2543 cand = d->btf->types[cand_node->type_id]; 2636 cand_id = (__u32)(long)hash_entry->value;
2637 cand = d->btf->types[cand_id];
2544 if (btf_equal_array(t, cand)) { 2638 if (btf_equal_array(t, cand)) {
2545 new_id = cand_node->type_id; 2639 new_id = cand_id;
2546 break; 2640 break;
2547 } 2641 }
2548 } 2642 }
@@ -2570,10 +2664,11 @@ static int btf_dedup_ref_type(struct btf_dedup *d, __u32 type_id)
2570 } 2664 }
2571 2665
2572 h = btf_hash_fnproto(t); 2666 h = btf_hash_fnproto(t);
2573 for_each_dedup_cand(d, h, cand_node) { 2667 for_each_dedup_cand(d, hash_entry, h) {
2574 cand = d->btf->types[cand_node->type_id]; 2668 cand_id = (__u32)(long)hash_entry->value;
2669 cand = d->btf->types[cand_id];
2575 if (btf_equal_fnproto(t, cand)) { 2670 if (btf_equal_fnproto(t, cand)) {
2576 new_id = cand_node->type_id; 2671 new_id = cand_id;
2577 break; 2672 break;
2578 } 2673 }
2579 } 2674 }
@@ -2600,7 +2695,9 @@ static int btf_dedup_ref_types(struct btf_dedup *d)
2600 if (err < 0) 2695 if (err < 0)
2601 return err; 2696 return err;
2602 } 2697 }
2603 btf_dedup_table_free(d); 2698 /* we won't need d->dedup_table anymore */
2699 hashmap__free(d->dedup_table);
2700 d->dedup_table = NULL;
2604 return 0; 2701 return 0;
2605} 2702}
2606 2703
diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h
index c7b399e81fce..ba4ffa831aa4 100644
--- a/tools/lib/bpf/btf.h
+++ b/tools/lib/bpf/btf.h
@@ -4,6 +4,7 @@
4#ifndef __LIBBPF_BTF_H 4#ifndef __LIBBPF_BTF_H
5#define __LIBBPF_BTF_H 5#define __LIBBPF_BTF_H
6 6
7#include <stdarg.h>
7#include <linux/types.h> 8#include <linux/types.h>
8 9
9#ifdef __cplusplus 10#ifdef __cplusplus
@@ -59,6 +60,8 @@ struct btf_ext_header {
59 60
60LIBBPF_API void btf__free(struct btf *btf); 61LIBBPF_API void btf__free(struct btf *btf);
61LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size); 62LIBBPF_API struct btf *btf__new(__u8 *data, __u32 size);
63LIBBPF_API struct btf *btf__parse_elf(const char *path,
64 struct btf_ext **btf_ext);
62LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf); 65LIBBPF_API int btf__finalize_data(struct bpf_object *obj, struct btf *btf);
63LIBBPF_API int btf__load(struct btf *btf); 66LIBBPF_API int btf__load(struct btf *btf);
64LIBBPF_API __s32 btf__find_by_name(const struct btf *btf, 67LIBBPF_API __s32 btf__find_by_name(const struct btf *btf,
@@ -100,6 +103,22 @@ struct btf_dedup_opts {
100LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext, 103LIBBPF_API int btf__dedup(struct btf *btf, struct btf_ext *btf_ext,
101 const struct btf_dedup_opts *opts); 104 const struct btf_dedup_opts *opts);
102 105
106struct btf_dump;
107
108struct btf_dump_opts {
109 void *ctx;
110};
111
112typedef void (*btf_dump_printf_fn_t)(void *ctx, const char *fmt, va_list args);
113
114LIBBPF_API struct btf_dump *btf_dump__new(const struct btf *btf,
115 const struct btf_ext *btf_ext,
116 const struct btf_dump_opts *opts,
117 btf_dump_printf_fn_t printf_fn);
118LIBBPF_API void btf_dump__free(struct btf_dump *d);
119
120LIBBPF_API int btf_dump__dump_type(struct btf_dump *d, __u32 id);
121
103#ifdef __cplusplus 122#ifdef __cplusplus
104} /* extern "C" */ 123} /* extern "C" */
105#endif 124#endif
diff --git a/tools/lib/bpf/btf_dump.c b/tools/lib/bpf/btf_dump.c
new file mode 100644
index 000000000000..4b22db77e2cc
--- /dev/null
+++ b/tools/lib/bpf/btf_dump.c
@@ -0,0 +1,1336 @@
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * BTF-to-C type converter.
5 *
6 * Copyright (c) 2019 Facebook
7 */
8
9#include <stdbool.h>
10#include <stddef.h>
11#include <stdlib.h>
12#include <string.h>
13#include <errno.h>
14#include <linux/err.h>
15#include <linux/btf.h>
16#include "btf.h"
17#include "hashmap.h"
18#include "libbpf.h"
19#include "libbpf_internal.h"
20
21#define min(x, y) ((x) < (y) ? (x) : (y))
22#define max(x, y) ((x) < (y) ? (y) : (x))
23
24static const char PREFIXES[] = "\t\t\t\t\t\t\t\t\t\t\t\t\t";
25static const size_t PREFIX_CNT = sizeof(PREFIXES) - 1;
26
27static const char *pfx(int lvl)
28{
29 return lvl >= PREFIX_CNT ? PREFIXES : &PREFIXES[PREFIX_CNT - lvl];
30}
31
32enum btf_dump_type_order_state {
33 NOT_ORDERED,
34 ORDERING,
35 ORDERED,
36};
37
38enum btf_dump_type_emit_state {
39 NOT_EMITTED,
40 EMITTING,
41 EMITTED,
42};
43
44/* per-type auxiliary state */
45struct btf_dump_type_aux_state {
46 /* topological sorting state */
47 enum btf_dump_type_order_state order_state: 2;
48 /* emitting state used to determine the need for forward declaration */
49 enum btf_dump_type_emit_state emit_state: 2;
50 /* whether forward declaration was already emitted */
51 __u8 fwd_emitted: 1;
52 /* whether unique non-duplicate name was already assigned */
53 __u8 name_resolved: 1;
54};
55
56struct btf_dump {
57 const struct btf *btf;
58 const struct btf_ext *btf_ext;
59 btf_dump_printf_fn_t printf_fn;
60 struct btf_dump_opts opts;
61
62 /* per-type auxiliary state */
63 struct btf_dump_type_aux_state *type_states;
64 /* per-type optional cached unique name, must be freed, if present */
65 const char **cached_names;
66
67 /* topo-sorted list of dependent type definitions */
68 __u32 *emit_queue;
69 int emit_queue_cap;
70 int emit_queue_cnt;
71
72 /*
73 * stack of type declarations (e.g., chain of modifiers, arrays,
74 * funcs, etc)
75 */
76 __u32 *decl_stack;
77 int decl_stack_cap;
78 int decl_stack_cnt;
79
80 /* maps struct/union/enum name to a number of name occurrences */
81 struct hashmap *type_names;
82 /*
83 * maps typedef identifiers and enum value names to a number of such
84 * name occurrences
85 */
86 struct hashmap *ident_names;
87};
88
89static size_t str_hash_fn(const void *key, void *ctx)
90{
91 const char *s = key;
92 size_t h = 0;
93
94 while (*s) {
95 h = h * 31 + *s;
96 s++;
97 }
98 return h;
99}
100
101static bool str_equal_fn(const void *a, const void *b, void *ctx)
102{
103 return strcmp(a, b) == 0;
104}
105
106static __u16 btf_kind_of(const struct btf_type *t)
107{
108 return BTF_INFO_KIND(t->info);
109}
110
111static __u16 btf_vlen_of(const struct btf_type *t)
112{
113 return BTF_INFO_VLEN(t->info);
114}
115
116static bool btf_kflag_of(const struct btf_type *t)
117{
118 return BTF_INFO_KFLAG(t->info);
119}
120
121static const char *btf_name_of(const struct btf_dump *d, __u32 name_off)
122{
123 return btf__name_by_offset(d->btf, name_off);
124}
125
126static void btf_dump_printf(const struct btf_dump *d, const char *fmt, ...)
127{
128 va_list args;
129
130 va_start(args, fmt);
131 d->printf_fn(d->opts.ctx, fmt, args);
132 va_end(args);
133}
134
135struct btf_dump *btf_dump__new(const struct btf *btf,
136 const struct btf_ext *btf_ext,
137 const struct btf_dump_opts *opts,
138 btf_dump_printf_fn_t printf_fn)
139{
140 struct btf_dump *d;
141 int err;
142
143 d = calloc(1, sizeof(struct btf_dump));
144 if (!d)
145 return ERR_PTR(-ENOMEM);
146
147 d->btf = btf;
148 d->btf_ext = btf_ext;
149 d->printf_fn = printf_fn;
150 d->opts.ctx = opts ? opts->ctx : NULL;
151
152 d->type_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
153 if (IS_ERR(d->type_names)) {
154 err = PTR_ERR(d->type_names);
155 d->type_names = NULL;
156 btf_dump__free(d);
157 return ERR_PTR(err);
158 }
159 d->ident_names = hashmap__new(str_hash_fn, str_equal_fn, NULL);
160 if (IS_ERR(d->ident_names)) {
161 err = PTR_ERR(d->ident_names);
162 d->ident_names = NULL;
163 btf_dump__free(d);
164 return ERR_PTR(err);
165 }
166
167 return d;
168}
169
170void btf_dump__free(struct btf_dump *d)
171{
172 int i, cnt;
173
174 if (!d)
175 return;
176
177 free(d->type_states);
178 if (d->cached_names) {
179 /* any set cached name is owned by us and should be freed */
180 for (i = 0, cnt = btf__get_nr_types(d->btf); i <= cnt; i++) {
181 if (d->cached_names[i])
182 free((void *)d->cached_names[i]);
183 }
184 }
185 free(d->cached_names);
186 free(d->emit_queue);
187 free(d->decl_stack);
188 hashmap__free(d->type_names);
189 hashmap__free(d->ident_names);
190
191 free(d);
192}
193
194static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr);
195static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id);
196
197/*
198 * Dump BTF type in a compilable C syntax, including all the necessary
199 * dependent types, necessary for compilation. If some of the dependent types
200 * were already emitted as part of previous btf_dump__dump_type() invocation
201 * for another type, they won't be emitted again. This API allows callers to
202 * filter out BTF types according to user-defined criterias and emitted only
203 * minimal subset of types, necessary to compile everything. Full struct/union
204 * definitions will still be emitted, even if the only usage is through
205 * pointer and could be satisfied with just a forward declaration.
206 *
207 * Dumping is done in two high-level passes:
208 * 1. Topologically sort type definitions to satisfy C rules of compilation.
209 * 2. Emit type definitions in C syntax.
210 *
211 * Returns 0 on success; <0, otherwise.
212 */
213int btf_dump__dump_type(struct btf_dump *d, __u32 id)
214{
215 int err, i;
216
217 if (id > btf__get_nr_types(d->btf))
218 return -EINVAL;
219
220 /* type states are lazily allocated, as they might not be needed */
221 if (!d->type_states) {
222 d->type_states = calloc(1 + btf__get_nr_types(d->btf),
223 sizeof(d->type_states[0]));
224 if (!d->type_states)
225 return -ENOMEM;
226 d->cached_names = calloc(1 + btf__get_nr_types(d->btf),
227 sizeof(d->cached_names[0]));
228 if (!d->cached_names)
229 return -ENOMEM;
230
231 /* VOID is special */
232 d->type_states[0].order_state = ORDERED;
233 d->type_states[0].emit_state = EMITTED;
234 }
235
236 d->emit_queue_cnt = 0;
237 err = btf_dump_order_type(d, id, false);
238 if (err < 0)
239 return err;
240
241 for (i = 0; i < d->emit_queue_cnt; i++)
242 btf_dump_emit_type(d, d->emit_queue[i], 0 /*top-level*/);
243
244 return 0;
245}
246
247static int btf_dump_add_emit_queue_id(struct btf_dump *d, __u32 id)
248{
249 __u32 *new_queue;
250 size_t new_cap;
251
252 if (d->emit_queue_cnt >= d->emit_queue_cap) {
253 new_cap = max(16, d->emit_queue_cap * 3 / 2);
254 new_queue = realloc(d->emit_queue,
255 new_cap * sizeof(new_queue[0]));
256 if (!new_queue)
257 return -ENOMEM;
258 d->emit_queue = new_queue;
259 d->emit_queue_cap = new_cap;
260 }
261
262 d->emit_queue[d->emit_queue_cnt++] = id;
263 return 0;
264}
265
266/*
267 * Determine order of emitting dependent types and specified type to satisfy
268 * C compilation rules. This is done through topological sorting with an
269 * additional complication which comes from C rules. The main idea for C is
270 * that if some type is "embedded" into a struct/union, it's size needs to be
271 * known at the time of definition of containing type. E.g., for:
272 *
273 * struct A {};
274 * struct B { struct A x; }
275 *
276 * struct A *HAS* to be defined before struct B, because it's "embedded",
277 * i.e., it is part of struct B layout. But in the following case:
278 *
279 * struct A;
280 * struct B { struct A *x; }
281 * struct A {};
282 *
283 * it's enough to just have a forward declaration of struct A at the time of
284 * struct B definition, as struct B has a pointer to struct A, so the size of
285 * field x is known without knowing struct A size: it's sizeof(void *).
286 *
287 * Unfortunately, there are some trickier cases we need to handle, e.g.:
288 *
289 * struct A {}; // if this was forward-declaration: compilation error
290 * struct B {
291 * struct { // anonymous struct
292 * struct A y;
293 * } *x;
294 * };
295 *
296 * In this case, struct B's field x is a pointer, so it's size is known
297 * regardless of the size of (anonymous) struct it points to. But because this
298 * struct is anonymous and thus defined inline inside struct B, *and* it
299 * embeds struct A, compiler requires full definition of struct A to be known
300 * before struct B can be defined. This creates a transitive dependency
301 * between struct A and struct B. If struct A was forward-declared before
302 * struct B definition and fully defined after struct B definition, that would
303 * trigger compilation error.
304 *
305 * All this means that while we are doing topological sorting on BTF type
306 * graph, we need to determine relationships between different types (graph
307 * nodes):
308 * - weak link (relationship) between X and Y, if Y *CAN* be
309 * forward-declared at the point of X definition;
310 * - strong link, if Y *HAS* to be fully-defined before X can be defined.
311 *
312 * The rule is as follows. Given a chain of BTF types from X to Y, if there is
313 * BTF_KIND_PTR type in the chain and at least one non-anonymous type
314 * Z (excluding X, including Y), then link is weak. Otherwise, it's strong.
315 * Weak/strong relationship is determined recursively during DFS traversal and
316 * is returned as a result from btf_dump_order_type().
317 *
318 * btf_dump_order_type() is trying to avoid unnecessary forward declarations,
319 * but it is not guaranteeing that no extraneous forward declarations will be
320 * emitted.
321 *
322 * To avoid extra work, algorithm marks some of BTF types as ORDERED, when
323 * it's done with them, but not for all (e.g., VOLATILE, CONST, RESTRICT,
324 * ARRAY, FUNC_PROTO), as weak/strong semantics for those depends on the
325 * entire graph path, so depending where from one came to that BTF type, it
326 * might cause weak or strong ordering. For types like STRUCT/UNION/INT/ENUM,
327 * once they are processed, there is no need to do it again, so they are
328 * marked as ORDERED. We can mark PTR as ORDERED as well, as it semi-forces
329 * weak link, unless subsequent referenced STRUCT/UNION/ENUM is anonymous. But
330 * in any case, once those are processed, no need to do it again, as the
331 * result won't change.
332 *
333 * Returns:
334 * - 1, if type is part of strong link (so there is strong topological
335 * ordering requirements);
336 * - 0, if type is part of weak link (so can be satisfied through forward
337 * declaration);
338 * - <0, on error (e.g., unsatisfiable type loop detected).
339 */
340static int btf_dump_order_type(struct btf_dump *d, __u32 id, bool through_ptr)
341{
342 /*
343 * Order state is used to detect strong link cycles, but only for BTF
344 * kinds that are or could be an independent definition (i.e.,
345 * stand-alone fwd decl, enum, typedef, struct, union). Ptrs, arrays,
346 * func_protos, modifiers are just means to get to these definitions.
347 * Int/void don't need definitions, they are assumed to be always
348 * properly defined. We also ignore datasec, var, and funcs for now.
349 * So for all non-defining kinds, we never even set ordering state,
350 * for defining kinds we set ORDERING and subsequently ORDERED if it
351 * forms a strong link.
352 */
353 struct btf_dump_type_aux_state *tstate = &d->type_states[id];
354 const struct btf_type *t;
355 __u16 kind, vlen;
356 int err, i;
357
358 /* return true, letting typedefs know that it's ok to be emitted */
359 if (tstate->order_state == ORDERED)
360 return 1;
361
362 t = btf__type_by_id(d->btf, id);
363 kind = btf_kind_of(t);
364
365 if (tstate->order_state == ORDERING) {
366 /* type loop, but resolvable through fwd declaration */
367 if ((kind == BTF_KIND_STRUCT || kind == BTF_KIND_UNION) &&
368 through_ptr && t->name_off != 0)
369 return 0;
370 pr_warning("unsatisfiable type cycle, id:[%u]\n", id);
371 return -ELOOP;
372 }
373
374 switch (kind) {
375 case BTF_KIND_INT:
376 tstate->order_state = ORDERED;
377 return 0;
378
379 case BTF_KIND_PTR:
380 err = btf_dump_order_type(d, t->type, true);
381 tstate->order_state = ORDERED;
382 return err;
383
384 case BTF_KIND_ARRAY: {
385 const struct btf_array *a = (void *)(t + 1);
386
387 return btf_dump_order_type(d, a->type, through_ptr);
388 }
389 case BTF_KIND_STRUCT:
390 case BTF_KIND_UNION: {
391 const struct btf_member *m = (void *)(t + 1);
392 /*
393 * struct/union is part of strong link, only if it's embedded
394 * (so no ptr in a path) or it's anonymous (so has to be
395 * defined inline, even if declared through ptr)
396 */
397 if (through_ptr && t->name_off != 0)
398 return 0;
399
400 tstate->order_state = ORDERING;
401
402 vlen = btf_vlen_of(t);
403 for (i = 0; i < vlen; i++, m++) {
404 err = btf_dump_order_type(d, m->type, false);
405 if (err < 0)
406 return err;
407 }
408
409 if (t->name_off != 0) {
410 err = btf_dump_add_emit_queue_id(d, id);
411 if (err < 0)
412 return err;
413 }
414
415 tstate->order_state = ORDERED;
416 return 1;
417 }
418 case BTF_KIND_ENUM:
419 case BTF_KIND_FWD:
420 if (t->name_off != 0) {
421 err = btf_dump_add_emit_queue_id(d, id);
422 if (err)
423 return err;
424 }
425 tstate->order_state = ORDERED;
426 return 1;
427
428 case BTF_KIND_TYPEDEF: {
429 int is_strong;
430
431 is_strong = btf_dump_order_type(d, t->type, through_ptr);
432 if (is_strong < 0)
433 return is_strong;
434
435 /* typedef is similar to struct/union w.r.t. fwd-decls */
436 if (through_ptr && !is_strong)
437 return 0;
438
439 /* typedef is always a named definition */
440 err = btf_dump_add_emit_queue_id(d, id);
441 if (err)
442 return err;
443
444 d->type_states[id].order_state = ORDERED;
445 return 1;
446 }
447 case BTF_KIND_VOLATILE:
448 case BTF_KIND_CONST:
449 case BTF_KIND_RESTRICT:
450 return btf_dump_order_type(d, t->type, through_ptr);
451
452 case BTF_KIND_FUNC_PROTO: {
453 const struct btf_param *p = (void *)(t + 1);
454 bool is_strong;
455
456 err = btf_dump_order_type(d, t->type, through_ptr);
457 if (err < 0)
458 return err;
459 is_strong = err > 0;
460
461 vlen = btf_vlen_of(t);
462 for (i = 0; i < vlen; i++, p++) {
463 err = btf_dump_order_type(d, p->type, through_ptr);
464 if (err < 0)
465 return err;
466 if (err > 0)
467 is_strong = true;
468 }
469 return is_strong;
470 }
471 case BTF_KIND_FUNC:
472 case BTF_KIND_VAR:
473 case BTF_KIND_DATASEC:
474 d->type_states[id].order_state = ORDERED;
475 return 0;
476
477 default:
478 return -EINVAL;
479 }
480}
481
482static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
483 const struct btf_type *t);
484static void btf_dump_emit_struct_def(struct btf_dump *d, __u32 id,
485 const struct btf_type *t, int lvl);
486
487static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
488 const struct btf_type *t);
489static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
490 const struct btf_type *t, int lvl);
491
492static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
493 const struct btf_type *t);
494
495static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
496 const struct btf_type *t, int lvl);
497
498/* a local view into a shared stack */
499struct id_stack {
500 const __u32 *ids;
501 int cnt;
502};
503
504static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
505 const char *fname, int lvl);
506static void btf_dump_emit_type_chain(struct btf_dump *d,
507 struct id_stack *decl_stack,
508 const char *fname, int lvl);
509
510static const char *btf_dump_type_name(struct btf_dump *d, __u32 id);
511static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id);
512static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
513 const char *orig_name);
514
515static bool btf_dump_is_blacklisted(struct btf_dump *d, __u32 id)
516{
517 const struct btf_type *t = btf__type_by_id(d->btf, id);
518
519 /* __builtin_va_list is a compiler built-in, which causes compilation
520 * errors, when compiling w/ different compiler, then used to compile
521 * original code (e.g., GCC to compile kernel, Clang to use generated
522 * C header from BTF). As it is built-in, it should be already defined
523 * properly internally in compiler.
524 */
525 if (t->name_off == 0)
526 return false;
527 return strcmp(btf_name_of(d, t->name_off), "__builtin_va_list") == 0;
528}
529
530/*
531 * Emit C-syntax definitions of types from chains of BTF types.
532 *
533 * High-level handling of determining necessary forward declarations are handled
534 * by btf_dump_emit_type() itself, but all nitty-gritty details of emitting type
535 * declarations/definitions in C syntax are handled by a combo of
536 * btf_dump_emit_type_decl()/btf_dump_emit_type_chain() w/ delegation to
537 * corresponding btf_dump_emit_*_{def,fwd}() functions.
538 *
539 * We also keep track of "containing struct/union type ID" to determine when
540 * we reference it from inside and thus can avoid emitting unnecessary forward
541 * declaration.
542 *
543 * This algorithm is designed in such a way, that even if some error occurs
544 * (either technical, e.g., out of memory, or logical, i.e., malformed BTF
545 * that doesn't comply to C rules completely), algorithm will try to proceed
546 * and produce as much meaningful output as possible.
547 */
548static void btf_dump_emit_type(struct btf_dump *d, __u32 id, __u32 cont_id)
549{
550 struct btf_dump_type_aux_state *tstate = &d->type_states[id];
551 bool top_level_def = cont_id == 0;
552 const struct btf_type *t;
553 __u16 kind;
554
555 if (tstate->emit_state == EMITTED)
556 return;
557
558 t = btf__type_by_id(d->btf, id);
559 kind = btf_kind_of(t);
560
561 if (top_level_def && t->name_off == 0) {
562 pr_warning("unexpected nameless definition, id:[%u]\n", id);
563 return;
564 }
565
566 if (tstate->emit_state == EMITTING) {
567 if (tstate->fwd_emitted)
568 return;
569
570 switch (kind) {
571 case BTF_KIND_STRUCT:
572 case BTF_KIND_UNION:
573 /*
574 * if we are referencing a struct/union that we are
575 * part of - then no need for fwd declaration
576 */
577 if (id == cont_id)
578 return;
579 if (t->name_off == 0) {
580 pr_warning("anonymous struct/union loop, id:[%u]\n",
581 id);
582 return;
583 }
584 btf_dump_emit_struct_fwd(d, id, t);
585 btf_dump_printf(d, ";\n\n");
586 tstate->fwd_emitted = 1;
587 break;
588 case BTF_KIND_TYPEDEF:
589 /*
590 * for typedef fwd_emitted means typedef definition
591 * was emitted, but it can be used only for "weak"
592 * references through pointer only, not for embedding
593 */
594 if (!btf_dump_is_blacklisted(d, id)) {
595 btf_dump_emit_typedef_def(d, id, t, 0);
596 btf_dump_printf(d, ";\n\n");
597 };
598 tstate->fwd_emitted = 1;
599 break;
600 default:
601 break;
602 }
603
604 return;
605 }
606
607 switch (kind) {
608 case BTF_KIND_INT:
609 tstate->emit_state = EMITTED;
610 break;
611 case BTF_KIND_ENUM:
612 if (top_level_def) {
613 btf_dump_emit_enum_def(d, id, t, 0);
614 btf_dump_printf(d, ";\n\n");
615 }
616 tstate->emit_state = EMITTED;
617 break;
618 case BTF_KIND_PTR:
619 case BTF_KIND_VOLATILE:
620 case BTF_KIND_CONST:
621 case BTF_KIND_RESTRICT:
622 btf_dump_emit_type(d, t->type, cont_id);
623 break;
624 case BTF_KIND_ARRAY: {
625 const struct btf_array *a = (void *)(t + 1);
626
627 btf_dump_emit_type(d, a->type, cont_id);
628 break;
629 }
630 case BTF_KIND_FWD:
631 btf_dump_emit_fwd_def(d, id, t);
632 btf_dump_printf(d, ";\n\n");
633 tstate->emit_state = EMITTED;
634 break;
635 case BTF_KIND_TYPEDEF:
636 tstate->emit_state = EMITTING;
637 btf_dump_emit_type(d, t->type, id);
638 /*
639 * typedef can server as both definition and forward
640 * declaration; at this stage someone depends on
641 * typedef as a forward declaration (refers to it
642 * through pointer), so unless we already did it,
643 * emit typedef as a forward declaration
644 */
645 if (!tstate->fwd_emitted && !btf_dump_is_blacklisted(d, id)) {
646 btf_dump_emit_typedef_def(d, id, t, 0);
647 btf_dump_printf(d, ";\n\n");
648 }
649 tstate->emit_state = EMITTED;
650 break;
651 case BTF_KIND_STRUCT:
652 case BTF_KIND_UNION:
653 tstate->emit_state = EMITTING;
654 /* if it's a top-level struct/union definition or struct/union
655 * is anonymous, then in C we'll be emitting all fields and
656 * their types (as opposed to just `struct X`), so we need to
657 * make sure that all types, referenced from struct/union
658 * members have necessary forward-declarations, where
659 * applicable
660 */
661 if (top_level_def || t->name_off == 0) {
662 const struct btf_member *m = (void *)(t + 1);
663 __u16 vlen = btf_vlen_of(t);
664 int i, new_cont_id;
665
666 new_cont_id = t->name_off == 0 ? cont_id : id;
667 for (i = 0; i < vlen; i++, m++)
668 btf_dump_emit_type(d, m->type, new_cont_id);
669 } else if (!tstate->fwd_emitted && id != cont_id) {
670 btf_dump_emit_struct_fwd(d, id, t);
671 btf_dump_printf(d, ";\n\n");
672 tstate->fwd_emitted = 1;
673 }
674
675 if (top_level_def) {
676 btf_dump_emit_struct_def(d, id, t, 0);
677 btf_dump_printf(d, ";\n\n");
678 tstate->emit_state = EMITTED;
679 } else {
680 tstate->emit_state = NOT_EMITTED;
681 }
682 break;
683 case BTF_KIND_FUNC_PROTO: {
684 const struct btf_param *p = (void *)(t + 1);
685 __u16 vlen = btf_vlen_of(t);
686 int i;
687
688 btf_dump_emit_type(d, t->type, cont_id);
689 for (i = 0; i < vlen; i++, p++)
690 btf_dump_emit_type(d, p->type, cont_id);
691
692 break;
693 }
694 default:
695 break;
696 }
697}
698
699static int btf_align_of(const struct btf *btf, __u32 id)
700{
701 const struct btf_type *t = btf__type_by_id(btf, id);
702 __u16 kind = btf_kind_of(t);
703
704 switch (kind) {
705 case BTF_KIND_INT:
706 case BTF_KIND_ENUM:
707 return min(sizeof(void *), t->size);
708 case BTF_KIND_PTR:
709 return sizeof(void *);
710 case BTF_KIND_TYPEDEF:
711 case BTF_KIND_VOLATILE:
712 case BTF_KIND_CONST:
713 case BTF_KIND_RESTRICT:
714 return btf_align_of(btf, t->type);
715 case BTF_KIND_ARRAY: {
716 const struct btf_array *a = (void *)(t + 1);
717
718 return btf_align_of(btf, a->type);
719 }
720 case BTF_KIND_STRUCT:
721 case BTF_KIND_UNION: {
722 const struct btf_member *m = (void *)(t + 1);
723 __u16 vlen = btf_vlen_of(t);
724 int i, align = 1;
725
726 for (i = 0; i < vlen; i++, m++)
727 align = max(align, btf_align_of(btf, m->type));
728
729 return align;
730 }
731 default:
732 pr_warning("unsupported BTF_KIND:%u\n", btf_kind_of(t));
733 return 1;
734 }
735}
736
737static bool btf_is_struct_packed(const struct btf *btf, __u32 id,
738 const struct btf_type *t)
739{
740 const struct btf_member *m;
741 int align, i, bit_sz;
742 __u16 vlen;
743 bool kflag;
744
745 align = btf_align_of(btf, id);
746 /* size of a non-packed struct has to be a multiple of its alignment*/
747 if (t->size % align)
748 return true;
749
750 m = (void *)(t + 1);
751 kflag = btf_kflag_of(t);
752 vlen = btf_vlen_of(t);
753 /* all non-bitfield fields have to be naturally aligned */
754 for (i = 0; i < vlen; i++, m++) {
755 align = btf_align_of(btf, m->type);
756 bit_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
757 if (bit_sz == 0 && m->offset % (8 * align) != 0)
758 return true;
759 }
760
761 /*
762 * if original struct was marked as packed, but its layout is
763 * naturally aligned, we'll detect that it's not packed
764 */
765 return false;
766}
767
768static int chip_away_bits(int total, int at_most)
769{
770 return total % at_most ? : at_most;
771}
772
773static void btf_dump_emit_bit_padding(const struct btf_dump *d,
774 int cur_off, int m_off, int m_bit_sz,
775 int align, int lvl)
776{
777 int off_diff = m_off - cur_off;
778 int ptr_bits = sizeof(void *) * 8;
779
780 if (off_diff <= 0)
781 /* no gap */
782 return;
783 if (m_bit_sz == 0 && off_diff < align * 8)
784 /* natural padding will take care of a gap */
785 return;
786
787 while (off_diff > 0) {
788 const char *pad_type;
789 int pad_bits;
790
791 if (ptr_bits > 32 && off_diff > 32) {
792 pad_type = "long";
793 pad_bits = chip_away_bits(off_diff, ptr_bits);
794 } else if (off_diff > 16) {
795 pad_type = "int";
796 pad_bits = chip_away_bits(off_diff, 32);
797 } else if (off_diff > 8) {
798 pad_type = "short";
799 pad_bits = chip_away_bits(off_diff, 16);
800 } else {
801 pad_type = "char";
802 pad_bits = chip_away_bits(off_diff, 8);
803 }
804 btf_dump_printf(d, "\n%s%s: %d;", pfx(lvl), pad_type, pad_bits);
805 off_diff -= pad_bits;
806 }
807}
808
809static void btf_dump_emit_struct_fwd(struct btf_dump *d, __u32 id,
810 const struct btf_type *t)
811{
812 btf_dump_printf(d, "%s %s",
813 btf_kind_of(t) == BTF_KIND_STRUCT ? "struct" : "union",
814 btf_dump_type_name(d, id));
815}
816
817static void btf_dump_emit_struct_def(struct btf_dump *d,
818 __u32 id,
819 const struct btf_type *t,
820 int lvl)
821{
822 const struct btf_member *m = (void *)(t + 1);
823 bool kflag = btf_kflag_of(t), is_struct;
824 int align, i, packed, off = 0;
825 __u16 vlen = btf_vlen_of(t);
826
827 is_struct = btf_kind_of(t) == BTF_KIND_STRUCT;
828 packed = is_struct ? btf_is_struct_packed(d->btf, id, t) : 0;
829 align = packed ? 1 : btf_align_of(d->btf, id);
830
831 btf_dump_printf(d, "%s%s%s {",
832 is_struct ? "struct" : "union",
833 t->name_off ? " " : "",
834 btf_dump_type_name(d, id));
835
836 for (i = 0; i < vlen; i++, m++) {
837 const char *fname;
838 int m_off, m_sz;
839
840 fname = btf_name_of(d, m->name_off);
841 m_sz = kflag ? BTF_MEMBER_BITFIELD_SIZE(m->offset) : 0;
842 m_off = kflag ? BTF_MEMBER_BIT_OFFSET(m->offset) : m->offset;
843 align = packed ? 1 : btf_align_of(d->btf, m->type);
844
845 btf_dump_emit_bit_padding(d, off, m_off, m_sz, align, lvl + 1);
846 btf_dump_printf(d, "\n%s", pfx(lvl + 1));
847 btf_dump_emit_type_decl(d, m->type, fname, lvl + 1);
848
849 if (m_sz) {
850 btf_dump_printf(d, ": %d", m_sz);
851 off = m_off + m_sz;
852 } else {
853 m_sz = max(0, btf__resolve_size(d->btf, m->type));
854 off = m_off + m_sz * 8;
855 }
856 btf_dump_printf(d, ";");
857 }
858
859 if (vlen)
860 btf_dump_printf(d, "\n");
861 btf_dump_printf(d, "%s}", pfx(lvl));
862 if (packed)
863 btf_dump_printf(d, " __attribute__((packed))");
864}
865
866static void btf_dump_emit_enum_fwd(struct btf_dump *d, __u32 id,
867 const struct btf_type *t)
868{
869 btf_dump_printf(d, "enum %s", btf_dump_type_name(d, id));
870}
871
872static void btf_dump_emit_enum_def(struct btf_dump *d, __u32 id,
873 const struct btf_type *t,
874 int lvl)
875{
876 const struct btf_enum *v = (void *)(t+1);
877 __u16 vlen = btf_vlen_of(t);
878 const char *name;
879 size_t dup_cnt;
880 int i;
881
882 btf_dump_printf(d, "enum%s%s",
883 t->name_off ? " " : "",
884 btf_dump_type_name(d, id));
885
886 if (vlen) {
887 btf_dump_printf(d, " {");
888 for (i = 0; i < vlen; i++, v++) {
889 name = btf_name_of(d, v->name_off);
890 /* enumerators share namespace with typedef idents */
891 dup_cnt = btf_dump_name_dups(d, d->ident_names, name);
892 if (dup_cnt > 1) {
893 btf_dump_printf(d, "\n%s%s___%zu = %d,",
894 pfx(lvl + 1), name, dup_cnt,
895 (__s32)v->val);
896 } else {
897 btf_dump_printf(d, "\n%s%s = %d,",
898 pfx(lvl + 1), name,
899 (__s32)v->val);
900 }
901 }
902 btf_dump_printf(d, "\n%s}", pfx(lvl));
903 }
904}
905
906static void btf_dump_emit_fwd_def(struct btf_dump *d, __u32 id,
907 const struct btf_type *t)
908{
909 const char *name = btf_dump_type_name(d, id);
910
911 if (btf_kflag_of(t))
912 btf_dump_printf(d, "union %s", name);
913 else
914 btf_dump_printf(d, "struct %s", name);
915}
916
917static void btf_dump_emit_typedef_def(struct btf_dump *d, __u32 id,
918 const struct btf_type *t, int lvl)
919{
920 const char *name = btf_dump_ident_name(d, id);
921
922 btf_dump_printf(d, "typedef ");
923 btf_dump_emit_type_decl(d, t->type, name, lvl);
924}
925
926static int btf_dump_push_decl_stack_id(struct btf_dump *d, __u32 id)
927{
928 __u32 *new_stack;
929 size_t new_cap;
930
931 if (d->decl_stack_cnt >= d->decl_stack_cap) {
932 new_cap = max(16, d->decl_stack_cap * 3 / 2);
933 new_stack = realloc(d->decl_stack,
934 new_cap * sizeof(new_stack[0]));
935 if (!new_stack)
936 return -ENOMEM;
937 d->decl_stack = new_stack;
938 d->decl_stack_cap = new_cap;
939 }
940
941 d->decl_stack[d->decl_stack_cnt++] = id;
942
943 return 0;
944}
945
946/*
947 * Emit type declaration (e.g., field type declaration in a struct or argument
948 * declaration in function prototype) in correct C syntax.
949 *
950 * For most types it's trivial, but there are few quirky type declaration
951 * cases worth mentioning:
952 * - function prototypes (especially nesting of function prototypes);
953 * - arrays;
954 * - const/volatile/restrict for pointers vs other types.
955 *
956 * For a good discussion of *PARSING* C syntax (as a human), see
957 * Peter van der Linden's "Expert C Programming: Deep C Secrets",
958 * Ch.3 "Unscrambling Declarations in C".
959 *
960 * It won't help with BTF to C conversion much, though, as it's an opposite
961 * problem. So we came up with this algorithm in reverse to van der Linden's
962 * parsing algorithm. It goes from structured BTF representation of type
963 * declaration to a valid compilable C syntax.
964 *
965 * For instance, consider this C typedef:
966 * typedef const int * const * arr[10] arr_t;
967 * It will be represented in BTF with this chain of BTF types:
968 * [typedef] -> [array] -> [ptr] -> [const] -> [ptr] -> [const] -> [int]
969 *
970 * Notice how [const] modifier always goes before type it modifies in BTF type
971 * graph, but in C syntax, const/volatile/restrict modifiers are written to
972 * the right of pointers, but to the left of other types. There are also other
973 * quirks, like function pointers, arrays of them, functions returning other
974 * functions, etc.
975 *
976 * We handle that by pushing all the types to a stack, until we hit "terminal"
977 * type (int/enum/struct/union/fwd). Then depending on the kind of a type on
978 * top of a stack, modifiers are handled differently. Array/function pointers
979 * have also wildly different syntax and how nesting of them are done. See
980 * code for authoritative definition.
981 *
982 * To avoid allocating new stack for each independent chain of BTF types, we
983 * share one bigger stack, with each chain working only on its own local view
984 * of a stack frame. Some care is required to "pop" stack frames after
985 * processing type declaration chain.
986 */
987static void btf_dump_emit_type_decl(struct btf_dump *d, __u32 id,
988 const char *fname, int lvl)
989{
990 struct id_stack decl_stack;
991 const struct btf_type *t;
992 int err, stack_start;
993 __u16 kind;
994
995 stack_start = d->decl_stack_cnt;
996 for (;;) {
997 err = btf_dump_push_decl_stack_id(d, id);
998 if (err < 0) {
999 /*
1000 * if we don't have enough memory for entire type decl
1001 * chain, restore stack, emit warning, and try to
1002 * proceed nevertheless
1003 */
1004 pr_warning("not enough memory for decl stack:%d", err);
1005 d->decl_stack_cnt = stack_start;
1006 return;
1007 }
1008
1009 /* VOID */
1010 if (id == 0)
1011 break;
1012
1013 t = btf__type_by_id(d->btf, id);
1014 kind = btf_kind_of(t);
1015 switch (kind) {
1016 case BTF_KIND_PTR:
1017 case BTF_KIND_VOLATILE:
1018 case BTF_KIND_CONST:
1019 case BTF_KIND_RESTRICT:
1020 case BTF_KIND_FUNC_PROTO:
1021 id = t->type;
1022 break;
1023 case BTF_KIND_ARRAY: {
1024 const struct btf_array *a = (void *)(t + 1);
1025
1026 id = a->type;
1027 break;
1028 }
1029 case BTF_KIND_INT:
1030 case BTF_KIND_ENUM:
1031 case BTF_KIND_FWD:
1032 case BTF_KIND_STRUCT:
1033 case BTF_KIND_UNION:
1034 case BTF_KIND_TYPEDEF:
1035 goto done;
1036 default:
1037 pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
1038 kind, id);
1039 goto done;
1040 }
1041 }
1042done:
1043 /*
1044 * We might be inside a chain of declarations (e.g., array of function
1045 * pointers returning anonymous (so inlined) structs, having another
1046 * array field). Each of those needs its own "stack frame" to handle
1047 * emitting of declarations. Those stack frames are non-overlapping
1048 * portions of shared btf_dump->decl_stack. To make it a bit nicer to
1049 * handle this set of nested stacks, we create a view corresponding to
1050 * our own "stack frame" and work with it as an independent stack.
1051 * We'll need to clean up after emit_type_chain() returns, though.
1052 */
1053 decl_stack.ids = d->decl_stack + stack_start;
1054 decl_stack.cnt = d->decl_stack_cnt - stack_start;
1055 btf_dump_emit_type_chain(d, &decl_stack, fname, lvl);
1056 /*
1057 * emit_type_chain() guarantees that it will pop its entire decl_stack
1058 * frame before returning. But it works with a read-only view into
1059 * decl_stack, so it doesn't actually pop anything from the
1060 * perspective of shared btf_dump->decl_stack, per se. We need to
1061 * reset decl_stack state to how it was before us to avoid it growing
1062 * all the time.
1063 */
1064 d->decl_stack_cnt = stack_start;
1065}
1066
1067static void btf_dump_emit_mods(struct btf_dump *d, struct id_stack *decl_stack)
1068{
1069 const struct btf_type *t;
1070 __u32 id;
1071
1072 while (decl_stack->cnt) {
1073 id = decl_stack->ids[decl_stack->cnt - 1];
1074 t = btf__type_by_id(d->btf, id);
1075
1076 switch (btf_kind_of(t)) {
1077 case BTF_KIND_VOLATILE:
1078 btf_dump_printf(d, "volatile ");
1079 break;
1080 case BTF_KIND_CONST:
1081 btf_dump_printf(d, "const ");
1082 break;
1083 case BTF_KIND_RESTRICT:
1084 btf_dump_printf(d, "restrict ");
1085 break;
1086 default:
1087 return;
1088 }
1089 decl_stack->cnt--;
1090 }
1091}
1092
1093static bool btf_is_mod_kind(const struct btf *btf, __u32 id)
1094{
1095 const struct btf_type *t = btf__type_by_id(btf, id);
1096
1097 switch (btf_kind_of(t)) {
1098 case BTF_KIND_VOLATILE:
1099 case BTF_KIND_CONST:
1100 case BTF_KIND_RESTRICT:
1101 return true;
1102 default:
1103 return false;
1104 }
1105}
1106
1107static void btf_dump_emit_name(const struct btf_dump *d,
1108 const char *name, bool last_was_ptr)
1109{
1110 bool separate = name[0] && !last_was_ptr;
1111
1112 btf_dump_printf(d, "%s%s", separate ? " " : "", name);
1113}
1114
1115static void btf_dump_emit_type_chain(struct btf_dump *d,
1116 struct id_stack *decls,
1117 const char *fname, int lvl)
1118{
1119 /*
1120 * last_was_ptr is used to determine if we need to separate pointer
1121 * asterisk (*) from previous part of type signature with space, so
1122 * that we get `int ***`, instead of `int * * *`. We default to true
1123 * for cases where we have single pointer in a chain. E.g., in ptr ->
1124 * func_proto case. func_proto will start a new emit_type_chain call
1125 * with just ptr, which should be emitted as (*) or (*<fname>), so we
1126 * don't want to prepend space for that last pointer.
1127 */
1128 bool last_was_ptr = true;
1129 const struct btf_type *t;
1130 const char *name;
1131 __u16 kind;
1132 __u32 id;
1133
1134 while (decls->cnt) {
1135 id = decls->ids[--decls->cnt];
1136 if (id == 0) {
1137 /* VOID is a special snowflake */
1138 btf_dump_emit_mods(d, decls);
1139 btf_dump_printf(d, "void");
1140 last_was_ptr = false;
1141 continue;
1142 }
1143
1144 t = btf__type_by_id(d->btf, id);
1145 kind = btf_kind_of(t);
1146
1147 switch (kind) {
1148 case BTF_KIND_INT:
1149 btf_dump_emit_mods(d, decls);
1150 name = btf_name_of(d, t->name_off);
1151 btf_dump_printf(d, "%s", name);
1152 break;
1153 case BTF_KIND_STRUCT:
1154 case BTF_KIND_UNION:
1155 btf_dump_emit_mods(d, decls);
1156 /* inline anonymous struct/union */
1157 if (t->name_off == 0)
1158 btf_dump_emit_struct_def(d, id, t, lvl);
1159 else
1160 btf_dump_emit_struct_fwd(d, id, t);
1161 break;
1162 case BTF_KIND_ENUM:
1163 btf_dump_emit_mods(d, decls);
1164 /* inline anonymous enum */
1165 if (t->name_off == 0)
1166 btf_dump_emit_enum_def(d, id, t, lvl);
1167 else
1168 btf_dump_emit_enum_fwd(d, id, t);
1169 break;
1170 case BTF_KIND_FWD:
1171 btf_dump_emit_mods(d, decls);
1172 btf_dump_emit_fwd_def(d, id, t);
1173 break;
1174 case BTF_KIND_TYPEDEF:
1175 btf_dump_emit_mods(d, decls);
1176 btf_dump_printf(d, "%s", btf_dump_ident_name(d, id));
1177 break;
1178 case BTF_KIND_PTR:
1179 btf_dump_printf(d, "%s", last_was_ptr ? "*" : " *");
1180 break;
1181 case BTF_KIND_VOLATILE:
1182 btf_dump_printf(d, " volatile");
1183 break;
1184 case BTF_KIND_CONST:
1185 btf_dump_printf(d, " const");
1186 break;
1187 case BTF_KIND_RESTRICT:
1188 btf_dump_printf(d, " restrict");
1189 break;
1190 case BTF_KIND_ARRAY: {
1191 const struct btf_array *a = (void *)(t + 1);
1192 const struct btf_type *next_t;
1193 __u32 next_id;
1194 bool multidim;
1195 /*
1196 * GCC has a bug
1197 * (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8354)
1198 * which causes it to emit extra const/volatile
1199 * modifiers for an array, if array's element type has
1200 * const/volatile modifiers. Clang doesn't do that.
1201 * In general, it doesn't seem very meaningful to have
1202 * a const/volatile modifier for array, so we are
1203 * going to silently skip them here.
1204 */
1205 while (decls->cnt) {
1206 next_id = decls->ids[decls->cnt - 1];
1207 if (btf_is_mod_kind(d->btf, next_id))
1208 decls->cnt--;
1209 else
1210 break;
1211 }
1212
1213 if (decls->cnt == 0) {
1214 btf_dump_emit_name(d, fname, last_was_ptr);
1215 btf_dump_printf(d, "[%u]", a->nelems);
1216 return;
1217 }
1218
1219 next_t = btf__type_by_id(d->btf, next_id);
1220 multidim = btf_kind_of(next_t) == BTF_KIND_ARRAY;
1221 /* we need space if we have named non-pointer */
1222 if (fname[0] && !last_was_ptr)
1223 btf_dump_printf(d, " ");
1224 /* no parentheses for multi-dimensional array */
1225 if (!multidim)
1226 btf_dump_printf(d, "(");
1227 btf_dump_emit_type_chain(d, decls, fname, lvl);
1228 if (!multidim)
1229 btf_dump_printf(d, ")");
1230 btf_dump_printf(d, "[%u]", a->nelems);
1231 return;
1232 }
1233 case BTF_KIND_FUNC_PROTO: {
1234 const struct btf_param *p = (void *)(t + 1);
1235 __u16 vlen = btf_vlen_of(t);
1236 int i;
1237
1238 btf_dump_emit_mods(d, decls);
1239 if (decls->cnt) {
1240 btf_dump_printf(d, " (");
1241 btf_dump_emit_type_chain(d, decls, fname, lvl);
1242 btf_dump_printf(d, ")");
1243 } else {
1244 btf_dump_emit_name(d, fname, last_was_ptr);
1245 }
1246 btf_dump_printf(d, "(");
1247 /*
1248 * Clang for BPF target generates func_proto with no
1249 * args as a func_proto with a single void arg (e.g.,
1250 * `int (*f)(void)` vs just `int (*f)()`). We are
1251 * going to pretend there are no args for such case.
1252 */
1253 if (vlen == 1 && p->type == 0) {
1254 btf_dump_printf(d, ")");
1255 return;
1256 }
1257
1258 for (i = 0; i < vlen; i++, p++) {
1259 if (i > 0)
1260 btf_dump_printf(d, ", ");
1261
1262 /* last arg of type void is vararg */
1263 if (i == vlen - 1 && p->type == 0) {
1264 btf_dump_printf(d, "...");
1265 break;
1266 }
1267
1268 name = btf_name_of(d, p->name_off);
1269 btf_dump_emit_type_decl(d, p->type, name, lvl);
1270 }
1271
1272 btf_dump_printf(d, ")");
1273 return;
1274 }
1275 default:
1276 pr_warning("unexpected type in decl chain, kind:%u, id:[%u]\n",
1277 kind, id);
1278 return;
1279 }
1280
1281 last_was_ptr = kind == BTF_KIND_PTR;
1282 }
1283
1284 btf_dump_emit_name(d, fname, last_was_ptr);
1285}
1286
1287/* return number of duplicates (occurrences) of a given name */
1288static size_t btf_dump_name_dups(struct btf_dump *d, struct hashmap *name_map,
1289 const char *orig_name)
1290{
1291 size_t dup_cnt = 0;
1292
1293 hashmap__find(name_map, orig_name, (void **)&dup_cnt);
1294 dup_cnt++;
1295 hashmap__set(name_map, orig_name, (void *)dup_cnt, NULL, NULL);
1296
1297 return dup_cnt;
1298}
1299
1300static const char *btf_dump_resolve_name(struct btf_dump *d, __u32 id,
1301 struct hashmap *name_map)
1302{
1303 struct btf_dump_type_aux_state *s = &d->type_states[id];
1304 const struct btf_type *t = btf__type_by_id(d->btf, id);
1305 const char *orig_name = btf_name_of(d, t->name_off);
1306 const char **cached_name = &d->cached_names[id];
1307 size_t dup_cnt;
1308
1309 if (t->name_off == 0)
1310 return "";
1311
1312 if (s->name_resolved)
1313 return *cached_name ? *cached_name : orig_name;
1314
1315 dup_cnt = btf_dump_name_dups(d, name_map, orig_name);
1316 if (dup_cnt > 1) {
1317 const size_t max_len = 256;
1318 char new_name[max_len];
1319
1320 snprintf(new_name, max_len, "%s___%zu", orig_name, dup_cnt);
1321 *cached_name = strdup(new_name);
1322 }
1323
1324 s->name_resolved = 1;
1325 return *cached_name ? *cached_name : orig_name;
1326}
1327
1328static const char *btf_dump_type_name(struct btf_dump *d, __u32 id)
1329{
1330 return btf_dump_resolve_name(d, id, d->type_names);
1331}
1332
1333static const char *btf_dump_ident_name(struct btf_dump *d, __u32 id)
1334{
1335 return btf_dump_resolve_name(d, id, d->ident_names);
1336}
diff --git a/tools/lib/bpf/hashmap.c b/tools/lib/bpf/hashmap.c
new file mode 100644
index 000000000000..6122272943e6
--- /dev/null
+++ b/tools/lib/bpf/hashmap.c
@@ -0,0 +1,229 @@
1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3/*
4 * Generic non-thread safe hash map implementation.
5 *
6 * Copyright (c) 2019 Facebook
7 */
8#include <stdint.h>
9#include <stdlib.h>
10#include <stdio.h>
11#include <errno.h>
12#include <linux/err.h>
13#include "hashmap.h"
14
15/* start with 4 buckets */
16#define HASHMAP_MIN_CAP_BITS 2
17
18static void hashmap_add_entry(struct hashmap_entry **pprev,
19 struct hashmap_entry *entry)
20{
21 entry->next = *pprev;
22 *pprev = entry;
23}
24
25static void hashmap_del_entry(struct hashmap_entry **pprev,
26 struct hashmap_entry *entry)
27{
28 *pprev = entry->next;
29 entry->next = NULL;
30}
31
32void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
33 hashmap_equal_fn equal_fn, void *ctx)
34{
35 map->hash_fn = hash_fn;
36 map->equal_fn = equal_fn;
37 map->ctx = ctx;
38
39 map->buckets = NULL;
40 map->cap = 0;
41 map->cap_bits = 0;
42 map->sz = 0;
43}
44
45struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
46 hashmap_equal_fn equal_fn,
47 void *ctx)
48{
49 struct hashmap *map = malloc(sizeof(struct hashmap));
50
51 if (!map)
52 return ERR_PTR(-ENOMEM);
53 hashmap__init(map, hash_fn, equal_fn, ctx);
54 return map;
55}
56
57void hashmap__clear(struct hashmap *map)
58{
59 free(map->buckets);
60 map->cap = map->cap_bits = map->sz = 0;
61}
62
63void hashmap__free(struct hashmap *map)
64{
65 if (!map)
66 return;
67
68 hashmap__clear(map);
69 free(map);
70}
71
72size_t hashmap__size(const struct hashmap *map)
73{
74 return map->sz;
75}
76
77size_t hashmap__capacity(const struct hashmap *map)
78{
79 return map->cap;
80}
81
82static bool hashmap_needs_to_grow(struct hashmap *map)
83{
84 /* grow if empty or more than 75% filled */
85 return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap);
86}
87
88static int hashmap_grow(struct hashmap *map)
89{
90 struct hashmap_entry **new_buckets;
91 struct hashmap_entry *cur, *tmp;
92 size_t new_cap_bits, new_cap;
93 size_t h;
94 int bkt;
95
96 new_cap_bits = map->cap_bits + 1;
97 if (new_cap_bits < HASHMAP_MIN_CAP_BITS)
98 new_cap_bits = HASHMAP_MIN_CAP_BITS;
99
100 new_cap = 1UL << new_cap_bits;
101 new_buckets = calloc(new_cap, sizeof(new_buckets[0]));
102 if (!new_buckets)
103 return -ENOMEM;
104
105 hashmap__for_each_entry_safe(map, cur, tmp, bkt) {
106 h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits);
107 hashmap_add_entry(&new_buckets[h], cur);
108 }
109
110 map->cap = new_cap;
111 map->cap_bits = new_cap_bits;
112 free(map->buckets);
113 map->buckets = new_buckets;
114
115 return 0;
116}
117
118static bool hashmap_find_entry(const struct hashmap *map,
119 const void *key, size_t hash,
120 struct hashmap_entry ***pprev,
121 struct hashmap_entry **entry)
122{
123 struct hashmap_entry *cur, **prev_ptr;
124
125 if (!map->buckets)
126 return false;
127
128 for (prev_ptr = &map->buckets[hash], cur = *prev_ptr;
129 cur;
130 prev_ptr = &cur->next, cur = cur->next) {
131 if (map->equal_fn(cur->key, key, map->ctx)) {
132 if (pprev)
133 *pprev = prev_ptr;
134 *entry = cur;
135 return true;
136 }
137 }
138
139 return false;
140}
141
142int hashmap__insert(struct hashmap *map, const void *key, void *value,
143 enum hashmap_insert_strategy strategy,
144 const void **old_key, void **old_value)
145{
146 struct hashmap_entry *entry;
147 size_t h;
148 int err;
149
150 if (old_key)
151 *old_key = NULL;
152 if (old_value)
153 *old_value = NULL;
154
155 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
156 if (strategy != HASHMAP_APPEND &&
157 hashmap_find_entry(map, key, h, NULL, &entry)) {
158 if (old_key)
159 *old_key = entry->key;
160 if (old_value)
161 *old_value = entry->value;
162
163 if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) {
164 entry->key = key;
165 entry->value = value;
166 return 0;
167 } else if (strategy == HASHMAP_ADD) {
168 return -EEXIST;
169 }
170 }
171
172 if (strategy == HASHMAP_UPDATE)
173 return -ENOENT;
174
175 if (hashmap_needs_to_grow(map)) {
176 err = hashmap_grow(map);
177 if (err)
178 return err;
179 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
180 }
181
182 entry = malloc(sizeof(struct hashmap_entry));
183 if (!entry)
184 return -ENOMEM;
185
186 entry->key = key;
187 entry->value = value;
188 hashmap_add_entry(&map->buckets[h], entry);
189 map->sz++;
190
191 return 0;
192}
193
194bool hashmap__find(const struct hashmap *map, const void *key, void **value)
195{
196 struct hashmap_entry *entry;
197 size_t h;
198
199 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
200 if (!hashmap_find_entry(map, key, h, NULL, &entry))
201 return false;
202
203 if (value)
204 *value = entry->value;
205 return true;
206}
207
208bool hashmap__delete(struct hashmap *map, const void *key,
209 const void **old_key, void **old_value)
210{
211 struct hashmap_entry **pprev, *entry;
212 size_t h;
213
214 h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits);
215 if (!hashmap_find_entry(map, key, h, &pprev, &entry))
216 return false;
217
218 if (old_key)
219 *old_key = entry->key;
220 if (old_value)
221 *old_value = entry->value;
222
223 hashmap_del_entry(pprev, entry);
224 free(entry);
225 map->sz--;
226
227 return true;
228}
229
diff --git a/tools/lib/bpf/hashmap.h b/tools/lib/bpf/hashmap.h
new file mode 100644
index 000000000000..03748a742146
--- /dev/null
+++ b/tools/lib/bpf/hashmap.h
@@ -0,0 +1,173 @@
1/* SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) */
2
3/*
4 * Generic non-thread safe hash map implementation.
5 *
6 * Copyright (c) 2019 Facebook
7 */
8#ifndef __LIBBPF_HASHMAP_H
9#define __LIBBPF_HASHMAP_H
10
11#include <stdbool.h>
12#include <stddef.h>
13#include "libbpf_internal.h"
14
15static inline size_t hash_bits(size_t h, int bits)
16{
17 /* shuffle bits and return requested number of upper bits */
18 return (h * 11400714819323198485llu) >> (__WORDSIZE - bits);
19}
20
21typedef size_t (*hashmap_hash_fn)(const void *key, void *ctx);
22typedef bool (*hashmap_equal_fn)(const void *key1, const void *key2, void *ctx);
23
24struct hashmap_entry {
25 const void *key;
26 void *value;
27 struct hashmap_entry *next;
28};
29
30struct hashmap {
31 hashmap_hash_fn hash_fn;
32 hashmap_equal_fn equal_fn;
33 void *ctx;
34
35 struct hashmap_entry **buckets;
36 size_t cap;
37 size_t cap_bits;
38 size_t sz;
39};
40
41#define HASHMAP_INIT(hash_fn, equal_fn, ctx) { \
42 .hash_fn = (hash_fn), \
43 .equal_fn = (equal_fn), \
44 .ctx = (ctx), \
45 .buckets = NULL, \
46 .cap = 0, \
47 .cap_bits = 0, \
48 .sz = 0, \
49}
50
51void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn,
52 hashmap_equal_fn equal_fn, void *ctx);
53struct hashmap *hashmap__new(hashmap_hash_fn hash_fn,
54 hashmap_equal_fn equal_fn,
55 void *ctx);
56void hashmap__clear(struct hashmap *map);
57void hashmap__free(struct hashmap *map);
58
59size_t hashmap__size(const struct hashmap *map);
60size_t hashmap__capacity(const struct hashmap *map);
61
62/*
63 * Hashmap insertion strategy:
64 * - HASHMAP_ADD - only add key/value if key doesn't exist yet;
65 * - HASHMAP_SET - add key/value pair if key doesn't exist yet; otherwise,
66 * update value;
67 * - HASHMAP_UPDATE - update value, if key already exists; otherwise, do
68 * nothing and return -ENOENT;
69 * - HASHMAP_APPEND - always add key/value pair, even if key already exists.
70 * This turns hashmap into a multimap by allowing multiple values to be
71 * associated with the same key. Most useful read API for such hashmap is
72 * hashmap__for_each_key_entry() iteration. If hashmap__find() is still
73 * used, it will return last inserted key/value entry (first in a bucket
74 * chain).
75 */
76enum hashmap_insert_strategy {
77 HASHMAP_ADD,
78 HASHMAP_SET,
79 HASHMAP_UPDATE,
80 HASHMAP_APPEND,
81};
82
83/*
84 * hashmap__insert() adds key/value entry w/ various semantics, depending on
85 * provided strategy value. If a given key/value pair replaced already
86 * existing key/value pair, both old key and old value will be returned
87 * through old_key and old_value to allow calling code do proper memory
88 * management.
89 */
90int hashmap__insert(struct hashmap *map, const void *key, void *value,
91 enum hashmap_insert_strategy strategy,
92 const void **old_key, void **old_value);
93
94static inline int hashmap__add(struct hashmap *map,
95 const void *key, void *value)
96{
97 return hashmap__insert(map, key, value, HASHMAP_ADD, NULL, NULL);
98}
99
100static inline int hashmap__set(struct hashmap *map,
101 const void *key, void *value,
102 const void **old_key, void **old_value)
103{
104 return hashmap__insert(map, key, value, HASHMAP_SET,
105 old_key, old_value);
106}
107
108static inline int hashmap__update(struct hashmap *map,
109 const void *key, void *value,
110 const void **old_key, void **old_value)
111{
112 return hashmap__insert(map, key, value, HASHMAP_UPDATE,
113 old_key, old_value);
114}
115
116static inline int hashmap__append(struct hashmap *map,
117 const void *key, void *value)
118{
119 return hashmap__insert(map, key, value, HASHMAP_APPEND, NULL, NULL);
120}
121
122bool hashmap__delete(struct hashmap *map, const void *key,
123 const void **old_key, void **old_value);
124
125bool hashmap__find(const struct hashmap *map, const void *key, void **value);
126
127/*
128 * hashmap__for_each_entry - iterate over all entries in hashmap
129 * @map: hashmap to iterate
130 * @cur: struct hashmap_entry * used as a loop cursor
131 * @bkt: integer used as a bucket loop cursor
132 */
133#define hashmap__for_each_entry(map, cur, bkt) \
134 for (bkt = 0; bkt < map->cap; bkt++) \
135 for (cur = map->buckets[bkt]; cur; cur = cur->next)
136
137/*
138 * hashmap__for_each_entry_safe - iterate over all entries in hashmap, safe
139 * against removals
140 * @map: hashmap to iterate
141 * @cur: struct hashmap_entry * used as a loop cursor
142 * @tmp: struct hashmap_entry * used as a temporary next cursor storage
143 * @bkt: integer used as a bucket loop cursor
144 */
145#define hashmap__for_each_entry_safe(map, cur, tmp, bkt) \
146 for (bkt = 0; bkt < map->cap; bkt++) \
147 for (cur = map->buckets[bkt]; \
148 cur && ({tmp = cur->next; true; }); \
149 cur = tmp)
150
151/*
152 * hashmap__for_each_key_entry - iterate over entries associated with given key
153 * @map: hashmap to iterate
154 * @cur: struct hashmap_entry * used as a loop cursor
155 * @key: key to iterate entries for
156 */
157#define hashmap__for_each_key_entry(map, cur, _key) \
158 for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
159 map->cap_bits); \
160 map->buckets ? map->buckets[bkt] : NULL; }); \
161 cur; \
162 cur = cur->next) \
163 if (map->equal_fn(cur->key, (_key), map->ctx))
164
165#define hashmap__for_each_key_entry_safe(map, cur, tmp, _key) \
166 for (cur = ({ size_t bkt = hash_bits(map->hash_fn((_key), map->ctx),\
167 map->cap_bits); \
168 cur = map->buckets ? map->buckets[bkt] : NULL; }); \
169 cur && ({ tmp = cur->next; true; }); \
170 cur = tmp) \
171 if (map->equal_fn(cur->key, (_key), map->ctx))
172
173#endif /* __LIBBPF_HASHMAP_H */
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 197b574406b3..ba89d9727137 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -188,6 +188,7 @@ struct bpf_program {
188 void *line_info; 188 void *line_info;
189 __u32 line_info_rec_size; 189 __u32 line_info_rec_size;
190 __u32 line_info_cnt; 190 __u32 line_info_cnt;
191 __u32 prog_flags;
191}; 192};
192 193
193enum libbpf_map_type { 194enum libbpf_map_type {
@@ -348,8 +349,11 @@ static int
348bpf_program__init(void *data, size_t size, char *section_name, int idx, 349bpf_program__init(void *data, size_t size, char *section_name, int idx,
349 struct bpf_program *prog) 350 struct bpf_program *prog)
350{ 351{
351 if (size < sizeof(struct bpf_insn)) { 352 const size_t bpf_insn_sz = sizeof(struct bpf_insn);
352 pr_warning("corrupted section '%s'\n", section_name); 353
354 if (size == 0 || size % bpf_insn_sz) {
355 pr_warning("corrupted section '%s', size: %zu\n",
356 section_name, size);
353 return -EINVAL; 357 return -EINVAL;
354 } 358 }
355 359
@@ -375,9 +379,8 @@ bpf_program__init(void *data, size_t size, char *section_name, int idx,
375 section_name); 379 section_name);
376 goto errout; 380 goto errout;
377 } 381 }
378 prog->insns_cnt = size / sizeof(struct bpf_insn); 382 prog->insns_cnt = size / bpf_insn_sz;
379 memcpy(prog->insns, data, 383 memcpy(prog->insns, data, size);
380 prog->insns_cnt * sizeof(struct bpf_insn));
381 prog->idx = idx; 384 prog->idx = idx;
382 prog->instances.fds = NULL; 385 prog->instances.fds = NULL;
383 prog->instances.nr = -1; 386 prog->instances.nr = -1;
@@ -494,15 +497,14 @@ static struct bpf_object *bpf_object__new(const char *path,
494 497
495 strcpy(obj->path, path); 498 strcpy(obj->path, path);
496 /* Using basename() GNU version which doesn't modify arg. */ 499 /* Using basename() GNU version which doesn't modify arg. */
497 strncpy(obj->name, basename((void *)path), 500 strncpy(obj->name, basename((void *)path), sizeof(obj->name) - 1);
498 sizeof(obj->name) - 1);
499 end = strchr(obj->name, '.'); 501 end = strchr(obj->name, '.');
500 if (end) 502 if (end)
501 *end = 0; 503 *end = 0;
502 504
503 obj->efile.fd = -1; 505 obj->efile.fd = -1;
504 /* 506 /*
505 * Caller of this function should also calls 507 * Caller of this function should also call
506 * bpf_object__elf_finish() after data collection to return 508 * bpf_object__elf_finish() after data collection to return
507 * obj_buf to user. If not, we should duplicate the buffer to 509 * obj_buf to user. If not, we should duplicate the buffer to
508 * avoid user freeing them before elf finish. 510 * avoid user freeing them before elf finish.
@@ -562,38 +564,35 @@ static int bpf_object__elf_init(struct bpf_object *obj)
562 } else { 564 } else {
563 obj->efile.fd = open(obj->path, O_RDONLY); 565 obj->efile.fd = open(obj->path, O_RDONLY);
564 if (obj->efile.fd < 0) { 566 if (obj->efile.fd < 0) {
565 char errmsg[STRERR_BUFSIZE]; 567 char errmsg[STRERR_BUFSIZE], *cp;
566 char *cp = libbpf_strerror_r(errno, errmsg,
567 sizeof(errmsg));
568 568
569 err = -errno;
570 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
569 pr_warning("failed to open %s: %s\n", obj->path, cp); 571 pr_warning("failed to open %s: %s\n", obj->path, cp);
570 return -errno; 572 return err;
571 } 573 }
572 574
573 obj->efile.elf = elf_begin(obj->efile.fd, 575 obj->efile.elf = elf_begin(obj->efile.fd,
574 LIBBPF_ELF_C_READ_MMAP, 576 LIBBPF_ELF_C_READ_MMAP, NULL);
575 NULL);
576 } 577 }
577 578
578 if (!obj->efile.elf) { 579 if (!obj->efile.elf) {
579 pr_warning("failed to open %s as ELF file\n", 580 pr_warning("failed to open %s as ELF file\n", obj->path);
580 obj->path);
581 err = -LIBBPF_ERRNO__LIBELF; 581 err = -LIBBPF_ERRNO__LIBELF;
582 goto errout; 582 goto errout;
583 } 583 }
584 584
585 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) { 585 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
586 pr_warning("failed to get EHDR from %s\n", 586 pr_warning("failed to get EHDR from %s\n", obj->path);
587 obj->path);
588 err = -LIBBPF_ERRNO__FORMAT; 587 err = -LIBBPF_ERRNO__FORMAT;
589 goto errout; 588 goto errout;
590 } 589 }
591 ep = &obj->efile.ehdr; 590 ep = &obj->efile.ehdr;
592 591
593 /* Old LLVM set e_machine to EM_NONE */ 592 /* Old LLVM set e_machine to EM_NONE */
594 if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) { 593 if (ep->e_type != ET_REL ||
595 pr_warning("%s is not an eBPF object file\n", 594 (ep->e_machine && ep->e_machine != EM_BPF)) {
596 obj->path); 595 pr_warning("%s is not an eBPF object file\n", obj->path);
597 err = -LIBBPF_ERRNO__FORMAT; 596 err = -LIBBPF_ERRNO__FORMAT;
598 goto errout; 597 goto errout;
599 } 598 }
@@ -604,47 +603,31 @@ errout:
604 return err; 603 return err;
605} 604}
606 605
607static int 606static int bpf_object__check_endianness(struct bpf_object *obj)
608bpf_object__check_endianness(struct bpf_object *obj) 607{
609{ 608#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
610 static unsigned int const endian = 1; 609 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
611 610 return 0;
612 switch (obj->efile.ehdr.e_ident[EI_DATA]) { 611#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
613 case ELFDATA2LSB: 612 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
614 /* We are big endian, BPF obj is little endian. */ 613 return 0;
615 if (*(unsigned char const *)&endian != 1) 614#else
616 goto mismatch; 615# error "Unrecognized __BYTE_ORDER__"
617 break; 616#endif
618 617 pr_warning("endianness mismatch.\n");
619 case ELFDATA2MSB:
620 /* We are little endian, BPF obj is big endian. */
621 if (*(unsigned char const *)&endian != 0)
622 goto mismatch;
623 break;
624 default:
625 return -LIBBPF_ERRNO__ENDIAN;
626 }
627
628 return 0;
629
630mismatch:
631 pr_warning("Error: endianness mismatch.\n");
632 return -LIBBPF_ERRNO__ENDIAN; 618 return -LIBBPF_ERRNO__ENDIAN;
633} 619}
634 620
635static int 621static int
636bpf_object__init_license(struct bpf_object *obj, 622bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
637 void *data, size_t size)
638{ 623{
639 memcpy(obj->license, data, 624 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
640 min(size, sizeof(obj->license) - 1));
641 pr_debug("license of %s is %s\n", obj->path, obj->license); 625 pr_debug("license of %s is %s\n", obj->path, obj->license);
642 return 0; 626 return 0;
643} 627}
644 628
645static int 629static int
646bpf_object__init_kversion(struct bpf_object *obj, 630bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
647 void *data, size_t size)
648{ 631{
649 __u32 kver; 632 __u32 kver;
650 633
@@ -654,8 +637,7 @@ bpf_object__init_kversion(struct bpf_object *obj,
654 } 637 }
655 memcpy(&kver, data, sizeof(kver)); 638 memcpy(&kver, data, sizeof(kver));
656 obj->kern_version = kver; 639 obj->kern_version = kver;
657 pr_debug("kernel version of %s is %x\n", obj->path, 640 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
658 obj->kern_version);
659 return 0; 641 return 0;
660} 642}
661 643
@@ -811,8 +793,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
811 def->key_size = sizeof(int); 793 def->key_size = sizeof(int);
812 def->value_size = data->d_size; 794 def->value_size = data->d_size;
813 def->max_entries = 1; 795 def->max_entries = 1;
814 def->map_flags = type == LIBBPF_MAP_RODATA ? 796 def->map_flags = type == LIBBPF_MAP_RODATA ? BPF_F_RDONLY_PROG : 0;
815 BPF_F_RDONLY_PROG : 0;
816 if (data_buff) { 797 if (data_buff) {
817 *data_buff = malloc(data->d_size); 798 *data_buff = malloc(data->d_size);
818 if (!*data_buff) { 799 if (!*data_buff) {
@@ -827,8 +808,7 @@ bpf_object__init_internal_map(struct bpf_object *obj, struct bpf_map *map,
827 return 0; 808 return 0;
828} 809}
829 810
830static int 811static int bpf_object__init_maps(struct bpf_object *obj, int flags)
831bpf_object__init_maps(struct bpf_object *obj, int flags)
832{ 812{
833 int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0; 813 int i, map_idx, map_def_sz = 0, nr_syms, nr_maps = 0, nr_maps_glob = 0;
834 bool strict = !(flags & MAPS_RELAX_COMPAT); 814 bool strict = !(flags & MAPS_RELAX_COMPAT);
@@ -930,6 +910,11 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
930 map_name = elf_strptr(obj->efile.elf, 910 map_name = elf_strptr(obj->efile.elf,
931 obj->efile.strtabidx, 911 obj->efile.strtabidx,
932 sym.st_name); 912 sym.st_name);
913 if (!map_name) {
914 pr_warning("failed to get map #%d name sym string for obj %s\n",
915 map_idx, obj->path);
916 return -LIBBPF_ERRNO__FORMAT;
917 }
933 918
934 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC; 919 obj->maps[map_idx].libbpf_type = LIBBPF_MAP_UNSPEC;
935 obj->maps[map_idx].offset = sym.st_value; 920 obj->maps[map_idx].offset = sym.st_value;
@@ -1104,8 +1089,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1104 1089
1105 /* Elf is corrupted/truncated, avoid calling elf_strptr. */ 1090 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1106 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) { 1091 if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
1107 pr_warning("failed to get e_shstrndx from %s\n", 1092 pr_warning("failed to get e_shstrndx from %s\n", obj->path);
1108 obj->path);
1109 return -LIBBPF_ERRNO__FORMAT; 1093 return -LIBBPF_ERRNO__FORMAT;
1110 } 1094 }
1111 1095
@@ -1226,7 +1210,7 @@ static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
1226 1210
1227 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) { 1211 if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
1228 pr_warning("Corrupted ELF file: index of strtab invalid\n"); 1212 pr_warning("Corrupted ELF file: index of strtab invalid\n");
1229 return LIBBPF_ERRNO__FORMAT; 1213 return -LIBBPF_ERRNO__FORMAT;
1230 } 1214 }
1231 if (btf_data) { 1215 if (btf_data) {
1232 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); 1216 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
@@ -1346,8 +1330,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1346 size_t nr_maps = obj->nr_maps; 1330 size_t nr_maps = obj->nr_maps;
1347 int i, nrels; 1331 int i, nrels;
1348 1332
1349 pr_debug("collecting relocating info for: '%s'\n", 1333 pr_debug("collecting relocating info for: '%s'\n", prog->section_name);
1350 prog->section_name);
1351 nrels = shdr->sh_size / shdr->sh_entsize; 1334 nrels = shdr->sh_size / shdr->sh_entsize;
1352 1335
1353 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels); 1336 prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
@@ -1372,9 +1355,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1372 return -LIBBPF_ERRNO__FORMAT; 1355 return -LIBBPF_ERRNO__FORMAT;
1373 } 1356 }
1374 1357
1375 if (!gelf_getsym(symbols, 1358 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
1376 GELF_R_SYM(rel.r_info),
1377 &sym)) {
1378 pr_warning("relocation: symbol %"PRIx64" not found\n", 1359 pr_warning("relocation: symbol %"PRIx64" not found\n",
1379 GELF_R_SYM(rel.r_info)); 1360 GELF_R_SYM(rel.r_info));
1380 return -LIBBPF_ERRNO__FORMAT; 1361 return -LIBBPF_ERRNO__FORMAT;
@@ -1435,8 +1416,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1435 if (maps[map_idx].libbpf_type != type) 1416 if (maps[map_idx].libbpf_type != type)
1436 continue; 1417 continue;
1437 if (type != LIBBPF_MAP_UNSPEC || 1418 if (type != LIBBPF_MAP_UNSPEC ||
1438 (type == LIBBPF_MAP_UNSPEC && 1419 maps[map_idx].offset == sym.st_value) {
1439 maps[map_idx].offset == sym.st_value)) {
1440 pr_debug("relocation: find map %zd (%s) for insn %u\n", 1420 pr_debug("relocation: find map %zd (%s) for insn %u\n",
1441 map_idx, maps[map_idx].name, insn_idx); 1421 map_idx, maps[map_idx].name, insn_idx);
1442 break; 1422 break;
@@ -1444,7 +1424,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1444 } 1424 }
1445 1425
1446 if (map_idx >= nr_maps) { 1426 if (map_idx >= nr_maps) {
1447 pr_warning("bpf relocation: map_idx %d large than %d\n", 1427 pr_warning("bpf relocation: map_idx %d larger than %d\n",
1448 (int)map_idx, (int)nr_maps - 1); 1428 (int)map_idx, (int)nr_maps - 1);
1449 return -LIBBPF_ERRNO__RELOC; 1429 return -LIBBPF_ERRNO__RELOC;
1450 } 1430 }
@@ -1756,7 +1736,7 @@ bpf_object__create_maps(struct bpf_object *obj)
1756 create_attr.key_size = def->key_size; 1736 create_attr.key_size = def->key_size;
1757 create_attr.value_size = def->value_size; 1737 create_attr.value_size = def->value_size;
1758 create_attr.max_entries = def->max_entries; 1738 create_attr.max_entries = def->max_entries;
1759 create_attr.btf_fd = 0; 1739 create_attr.btf_fd = -1;
1760 create_attr.btf_key_type_id = 0; 1740 create_attr.btf_key_type_id = 0;
1761 create_attr.btf_value_type_id = 0; 1741 create_attr.btf_value_type_id = 0;
1762 if (bpf_map_type__is_map_in_map(def->type) && 1742 if (bpf_map_type__is_map_in_map(def->type) &&
@@ -1770,11 +1750,11 @@ bpf_object__create_maps(struct bpf_object *obj)
1770 } 1750 }
1771 1751
1772 *pfd = bpf_create_map_xattr(&create_attr); 1752 *pfd = bpf_create_map_xattr(&create_attr);
1773 if (*pfd < 0 && create_attr.btf_key_type_id) { 1753 if (*pfd < 0 && create_attr.btf_fd >= 0) {
1774 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); 1754 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1775 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", 1755 pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1776 map->name, cp, errno); 1756 map->name, cp, errno);
1777 create_attr.btf_fd = 0; 1757 create_attr.btf_fd = -1;
1778 create_attr.btf_key_type_id = 0; 1758 create_attr.btf_key_type_id = 0;
1779 create_attr.btf_value_type_id = 0; 1759 create_attr.btf_value_type_id = 0;
1780 map->btf_key_type_id = 0; 1760 map->btf_key_type_id = 0;
@@ -1803,7 +1783,7 @@ err_out:
1803 } 1783 }
1804 } 1784 }
1805 1785
1806 pr_debug("create map %s: fd=%d\n", map->name, *pfd); 1786 pr_debug("created map %s: fd=%d\n", map->name, *pfd);
1807 } 1787 }
1808 1788
1809 return 0; 1789 return 0;
@@ -1824,18 +1804,14 @@ check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1824 if (btf_prog_info) { 1804 if (btf_prog_info) {
1825 /* 1805 /*
1826 * Some info has already been found but has problem 1806 * Some info has already been found but has problem
1827 * in the last btf_ext reloc. Must have to error 1807 * in the last btf_ext reloc. Must have to error out.
1828 * out.
1829 */ 1808 */
1830 pr_warning("Error in relocating %s for sec %s.\n", 1809 pr_warning("Error in relocating %s for sec %s.\n",
1831 info_name, prog->section_name); 1810 info_name, prog->section_name);
1832 return err; 1811 return err;
1833 } 1812 }
1834 1813
1835 /* 1814 /* Have problem loading the very first info. Ignore the rest. */
1836 * Have problem loading the very first info. Ignore
1837 * the rest.
1838 */
1839 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n", 1815 pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1840 info_name, prog->section_name, info_name); 1816 info_name, prog->section_name, info_name);
1841 return 0; 1817 return 0;
@@ -2039,9 +2015,7 @@ static int bpf_object__collect_reloc(struct bpf_object *obj)
2039 return -LIBBPF_ERRNO__RELOC; 2015 return -LIBBPF_ERRNO__RELOC;
2040 } 2016 }
2041 2017
2042 err = bpf_program__collect_reloc(prog, 2018 err = bpf_program__collect_reloc(prog, shdr, data, obj);
2043 shdr, data,
2044 obj);
2045 if (err) 2019 if (err)
2046 return err; 2020 return err;
2047 } 2021 }
@@ -2058,6 +2032,9 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2058 char *log_buf; 2032 char *log_buf;
2059 int ret; 2033 int ret;
2060 2034
2035 if (!insns || !insns_cnt)
2036 return -EINVAL;
2037
2061 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr)); 2038 memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
2062 load_attr.prog_type = prog->type; 2039 load_attr.prog_type = prog->type;
2063 load_attr.expected_attach_type = prog->expected_attach_type; 2040 load_attr.expected_attach_type = prog->expected_attach_type;
@@ -2068,7 +2045,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2068 load_attr.license = license; 2045 load_attr.license = license;
2069 load_attr.kern_version = kern_version; 2046 load_attr.kern_version = kern_version;
2070 load_attr.prog_ifindex = prog->prog_ifindex; 2047 load_attr.prog_ifindex = prog->prog_ifindex;
2071 load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0; 2048 load_attr.prog_btf_fd = prog->btf_fd;
2072 load_attr.func_info = prog->func_info; 2049 load_attr.func_info = prog->func_info;
2073 load_attr.func_info_rec_size = prog->func_info_rec_size; 2050 load_attr.func_info_rec_size = prog->func_info_rec_size;
2074 load_attr.func_info_cnt = prog->func_info_cnt; 2051 load_attr.func_info_cnt = prog->func_info_cnt;
@@ -2076,8 +2053,7 @@ load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
2076 load_attr.line_info_rec_size = prog->line_info_rec_size; 2053 load_attr.line_info_rec_size = prog->line_info_rec_size;
2077 load_attr.line_info_cnt = prog->line_info_cnt; 2054 load_attr.line_info_cnt = prog->line_info_cnt;
2078 load_attr.log_level = prog->log_level; 2055 load_attr.log_level = prog->log_level;
2079 if (!load_attr.insns || !load_attr.insns_cnt) 2056 load_attr.prog_flags = prog->prog_flags;
2080 return -EINVAL;
2081 2057
2082retry_load: 2058retry_load:
2083 log_buf = malloc(log_buf_size); 2059 log_buf = malloc(log_buf_size);
@@ -2222,7 +2198,7 @@ static bool bpf_program__is_function_storage(struct bpf_program *prog,
2222} 2198}
2223 2199
2224static int 2200static int
2225bpf_object__load_progs(struct bpf_object *obj) 2201bpf_object__load_progs(struct bpf_object *obj, int log_level)
2226{ 2202{
2227 size_t i; 2203 size_t i;
2228 int err; 2204 int err;
@@ -2230,6 +2206,7 @@ bpf_object__load_progs(struct bpf_object *obj)
2230 for (i = 0; i < obj->nr_programs; i++) { 2206 for (i = 0; i < obj->nr_programs; i++) {
2231 if (bpf_program__is_function_storage(&obj->programs[i], obj)) 2207 if (bpf_program__is_function_storage(&obj->programs[i], obj))
2232 continue; 2208 continue;
2209 obj->programs[i].log_level |= log_level;
2233 err = bpf_program__load(&obj->programs[i], 2210 err = bpf_program__load(&obj->programs[i],
2234 obj->license, 2211 obj->license,
2235 obj->kern_version); 2212 obj->kern_version);
@@ -2356,11 +2333,9 @@ struct bpf_object *bpf_object__open_buffer(void *obj_buf,
2356 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", 2333 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
2357 (unsigned long)obj_buf, 2334 (unsigned long)obj_buf,
2358 (unsigned long)obj_buf_sz); 2335 (unsigned long)obj_buf_sz);
2359 tmp_name[sizeof(tmp_name) - 1] = '\0';
2360 name = tmp_name; 2336 name = tmp_name;
2361 } 2337 }
2362 pr_debug("loading object '%s' from buffer\n", 2338 pr_debug("loading object '%s' from buffer\n", name);
2363 name);
2364 2339
2365 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true); 2340 return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
2366} 2341}
@@ -2381,10 +2356,14 @@ int bpf_object__unload(struct bpf_object *obj)
2381 return 0; 2356 return 0;
2382} 2357}
2383 2358
2384int bpf_object__load(struct bpf_object *obj) 2359int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
2385{ 2360{
2361 struct bpf_object *obj;
2386 int err; 2362 int err;
2387 2363
2364 if (!attr)
2365 return -EINVAL;
2366 obj = attr->obj;
2388 if (!obj) 2367 if (!obj)
2389 return -EINVAL; 2368 return -EINVAL;
2390 2369
@@ -2397,7 +2376,7 @@ int bpf_object__load(struct bpf_object *obj)
2397 2376
2398 CHECK_ERR(bpf_object__create_maps(obj), err, out); 2377 CHECK_ERR(bpf_object__create_maps(obj), err, out);
2399 CHECK_ERR(bpf_object__relocate(obj), err, out); 2378 CHECK_ERR(bpf_object__relocate(obj), err, out);
2400 CHECK_ERR(bpf_object__load_progs(obj), err, out); 2379 CHECK_ERR(bpf_object__load_progs(obj, attr->log_level), err, out);
2401 2380
2402 return 0; 2381 return 0;
2403out: 2382out:
@@ -2406,6 +2385,15 @@ out:
2406 return err; 2385 return err;
2407} 2386}
2408 2387
2388int bpf_object__load(struct bpf_object *obj)
2389{
2390 struct bpf_object_load_attr attr = {
2391 .obj = obj,
2392 };
2393
2394 return bpf_object__load_xattr(&attr);
2395}
2396
2409static int check_path(const char *path) 2397static int check_path(const char *path)
2410{ 2398{
2411 char *cp, errmsg[STRERR_BUFSIZE]; 2399 char *cp, errmsg[STRERR_BUFSIZE];
@@ -3458,9 +3446,7 @@ bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
3458 3446
3459long libbpf_get_error(const void *ptr) 3447long libbpf_get_error(const void *ptr)
3460{ 3448{
3461 if (IS_ERR(ptr)) 3449 return PTR_ERR_OR_ZERO(ptr);
3462 return PTR_ERR(ptr);
3463 return 0;
3464} 3450}
3465 3451
3466int bpf_prog_load(const char *file, enum bpf_prog_type type, 3452int bpf_prog_load(const char *file, enum bpf_prog_type type,
@@ -3521,6 +3507,7 @@ int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
3521 expected_attach_type); 3507 expected_attach_type);
3522 3508
3523 prog->log_level = attr->log_level; 3509 prog->log_level = attr->log_level;
3510 prog->prog_flags = attr->prog_flags;
3524 if (!first_prog) 3511 if (!first_prog)
3525 first_prog = prog; 3512 first_prog = prog;
3526 } 3513 }
diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h
index c5ff00515ce7..1af0d48178c8 100644
--- a/tools/lib/bpf/libbpf.h
+++ b/tools/lib/bpf/libbpf.h
@@ -89,8 +89,14 @@ LIBBPF_API int bpf_object__unpin_programs(struct bpf_object *obj,
89LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path); 89LIBBPF_API int bpf_object__pin(struct bpf_object *object, const char *path);
90LIBBPF_API void bpf_object__close(struct bpf_object *object); 90LIBBPF_API void bpf_object__close(struct bpf_object *object);
91 91
92struct bpf_object_load_attr {
93 struct bpf_object *obj;
94 int log_level;
95};
96
92/* Load/unload object into/from kernel */ 97/* Load/unload object into/from kernel */
93LIBBPF_API int bpf_object__load(struct bpf_object *obj); 98LIBBPF_API int bpf_object__load(struct bpf_object *obj);
99LIBBPF_API int bpf_object__load_xattr(struct bpf_object_load_attr *attr);
94LIBBPF_API int bpf_object__unload(struct bpf_object *obj); 100LIBBPF_API int bpf_object__unload(struct bpf_object *obj);
95LIBBPF_API const char *bpf_object__name(struct bpf_object *obj); 101LIBBPF_API const char *bpf_object__name(struct bpf_object *obj);
96LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj); 102LIBBPF_API unsigned int bpf_object__kversion(struct bpf_object *obj);
@@ -320,6 +326,7 @@ struct bpf_prog_load_attr {
320 enum bpf_attach_type expected_attach_type; 326 enum bpf_attach_type expected_attach_type;
321 int ifindex; 327 int ifindex;
322 int log_level; 328 int log_level;
329 int prog_flags;
323}; 330};
324 331
325LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, 332LIBBPF_API int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map
index 673001787cba..46dcda89df21 100644
--- a/tools/lib/bpf/libbpf.map
+++ b/tools/lib/bpf/libbpf.map
@@ -164,3 +164,12 @@ LIBBPF_0.0.3 {
164 bpf_map_freeze; 164 bpf_map_freeze;
165 btf__finalize_data; 165 btf__finalize_data;
166} LIBBPF_0.0.2; 166} LIBBPF_0.0.2;
167
168LIBBPF_0.0.4 {
169 global:
170 btf_dump__dump_type;
171 btf_dump__free;
172 btf_dump__new;
173 btf__parse_elf;
174 bpf_object__load_xattr;
175} LIBBPF_0.0.3;
diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h
index f3025b4d90e1..850f7bdec5cb 100644
--- a/tools/lib/bpf/libbpf_internal.h
+++ b/tools/lib/bpf/libbpf_internal.h
@@ -9,6 +9,8 @@
9#ifndef __LIBBPF_LIBBPF_INTERNAL_H 9#ifndef __LIBBPF_LIBBPF_INTERNAL_H
10#define __LIBBPF_LIBBPF_INTERNAL_H 10#define __LIBBPF_LIBBPF_INTERNAL_H
11 11
12#include "libbpf.h"
13
12#define BTF_INFO_ENC(kind, kind_flag, vlen) \ 14#define BTF_INFO_ENC(kind, kind_flag, vlen) \
13 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN)) 15 ((!!(kind_flag) << 31) | ((kind) << 24) | ((vlen) & BTF_MAX_VLEN))
14#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type) 16#define BTF_TYPE_ENC(name, info, size_or_type) (name), (info), (size_or_type)