aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2019-04-23 18:45:56 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-04-25 16:47:29 -0400
commit8837fe5dd09bd0331b3e2d1b6e400b7fcda8963a (patch)
tree7582c3fbf5373b27befd235747ac44a58d083514
parenta21b48a2f2afa53bbc989cce6fc81edbed39eab0 (diff)
bpf, libbpf: handle old kernels more graceful wrt global data sections
Andrii reported a corner case where e.g. global static data is present in the BPF ELF file in form of .data/.bss/.rodata section, but without any relocations to it. Such programs could be loaded before commit d859900c4c56 ("bpf, libbpf: support global data/bss/rodata sections"), whereas afterwards if kernel lacks support then loading would fail. Add a probing mechanism which skips setting up libbpf internal maps in case of missing kernel support. In presence of relocation entries, we abort the load attempt. Fixes: d859900c4c56 ("bpf, libbpf: support global data/bss/rodata sections") Reported-by: Andrii Nakryiko <andriin@fb.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
-rw-r--r--tools/lib/bpf/libbpf.c99
1 files changed, 86 insertions, 13 deletions
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index d817bf20f3d6..85315dedbde4 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -126,6 +126,8 @@ static inline __u64 ptr_to_u64(const void *ptr)
126struct bpf_capabilities { 126struct bpf_capabilities {
127 /* v4.14: kernel support for program & map names. */ 127 /* v4.14: kernel support for program & map names. */
128 __u32 name:1; 128 __u32 name:1;
129 /* v5.2: kernel support for global data sections. */
130 __u32 global_data:1;
129}; 131};
130 132
131/* 133/*
@@ -854,12 +856,15 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
854 * 856 *
855 * TODO: Detect array of map and report error. 857 * TODO: Detect array of map and report error.
856 */ 858 */
857 if (obj->efile.data_shndx >= 0) 859 if (obj->caps.global_data) {
858 nr_maps_glob++; 860 if (obj->efile.data_shndx >= 0)
859 if (obj->efile.rodata_shndx >= 0) 861 nr_maps_glob++;
860 nr_maps_glob++; 862 if (obj->efile.rodata_shndx >= 0)
861 if (obj->efile.bss_shndx >= 0) 863 nr_maps_glob++;
862 nr_maps_glob++; 864 if (obj->efile.bss_shndx >= 0)
865 nr_maps_glob++;
866 }
867
863 for (i = 0; data && i < nr_syms; i++) { 868 for (i = 0; data && i < nr_syms; i++) {
864 GElf_Sym sym; 869 GElf_Sym sym;
865 870
@@ -971,6 +976,9 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
971 map_idx++; 976 map_idx++;
972 } 977 }
973 978
979 if (!obj->caps.global_data)
980 goto finalize;
981
974 /* 982 /*
975 * Populate rest of obj->maps with libbpf internal maps. 983 * Populate rest of obj->maps with libbpf internal maps.
976 */ 984 */
@@ -988,6 +996,7 @@ bpf_object__init_maps(struct bpf_object *obj, int flags)
988 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++], 996 ret = bpf_object__init_internal_map(obj, &obj->maps[map_idx++],
989 LIBBPF_MAP_BSS, 997 LIBBPF_MAP_BSS,
990 obj->efile.bss, NULL); 998 obj->efile.bss, NULL);
999finalize:
991 if (!ret) 1000 if (!ret)
992 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), 1001 qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]),
993 compare_bpf_map); 1002 compare_bpf_map);
@@ -1333,11 +1342,17 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
1333 if (bpf_object__shndx_is_maps(obj, shdr_idx) || 1342 if (bpf_object__shndx_is_maps(obj, shdr_idx) ||
1334 bpf_object__shndx_is_data(obj, shdr_idx)) { 1343 bpf_object__shndx_is_data(obj, shdr_idx)) {
1335 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); 1344 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
1336 if (type != LIBBPF_MAP_UNSPEC && 1345 if (type != LIBBPF_MAP_UNSPEC) {
1337 GELF_ST_BIND(sym.st_info) == STB_GLOBAL) { 1346 if (GELF_ST_BIND(sym.st_info) == STB_GLOBAL) {
1338 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n", 1347 pr_warning("bpf: relocation: not yet supported relo for non-static global \'%s\' variable found in insns[%d].code 0x%x\n",
1339 name, insn_idx, insns[insn_idx].code); 1348 name, insn_idx, insns[insn_idx].code);
1340 return -LIBBPF_ERRNO__RELOC; 1349 return -LIBBPF_ERRNO__RELOC;
1350 }
1351 if (!obj->caps.global_data) {
1352 pr_warning("bpf: relocation: kernel does not support global \'%s\' variable access in insns[%d]\n",
1353 name, insn_idx);
1354 return -LIBBPF_ERRNO__RELOC;
1355 }
1341 } 1356 }
1342 1357
1343 for (map_idx = 0; map_idx < nr_maps; map_idx++) { 1358 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
@@ -1496,9 +1511,67 @@ bpf_object__probe_name(struct bpf_object *obj)
1496} 1511}
1497 1512
1498static int 1513static int
1514bpf_object__probe_global_data(struct bpf_object *obj)
1515{
1516 struct bpf_load_program_attr prg_attr;
1517 struct bpf_create_map_attr map_attr;
1518 char *cp, errmsg[STRERR_BUFSIZE];
1519 struct bpf_insn insns[] = {
1520 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
1521 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
1522 BPF_MOV64_IMM(BPF_REG_0, 0),
1523 BPF_EXIT_INSN(),
1524 };
1525 int ret, map;
1526
1527 memset(&map_attr, 0, sizeof(map_attr));
1528 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
1529 map_attr.key_size = sizeof(int);
1530 map_attr.value_size = 32;
1531 map_attr.max_entries = 1;
1532
1533 map = bpf_create_map_xattr(&map_attr);
1534 if (map < 0) {
1535 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1536 pr_warning("Error in %s():%s(%d). Couldn't create simple array map.\n",
1537 __func__, cp, errno);
1538 return -errno;
1539 }
1540
1541 insns[0].imm = map;
1542
1543 memset(&prg_attr, 0, sizeof(prg_attr));
1544 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1545 prg_attr.insns = insns;
1546 prg_attr.insns_cnt = ARRAY_SIZE(insns);
1547 prg_attr.license = "GPL";
1548
1549 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
1550 if (ret >= 0) {
1551 obj->caps.global_data = 1;
1552 close(ret);
1553 }
1554
1555 close(map);
1556 return 0;
1557}
1558
1559static int
1499bpf_object__probe_caps(struct bpf_object *obj) 1560bpf_object__probe_caps(struct bpf_object *obj)
1500{ 1561{
1501 return bpf_object__probe_name(obj); 1562 int (*probe_fn[])(struct bpf_object *obj) = {
1563 bpf_object__probe_name,
1564 bpf_object__probe_global_data,
1565 };
1566 int i, ret;
1567
1568 for (i = 0; i < ARRAY_SIZE(probe_fn); i++) {
1569 ret = probe_fn[i](obj);
1570 if (ret < 0)
1571 return ret;
1572 }
1573
1574 return 0;
1502} 1575}
1503 1576
1504static int 1577static int
@@ -2100,6 +2173,7 @@ __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
2100 2173
2101 CHECK_ERR(bpf_object__elf_init(obj), err, out); 2174 CHECK_ERR(bpf_object__elf_init(obj), err, out);
2102 CHECK_ERR(bpf_object__check_endianness(obj), err, out); 2175 CHECK_ERR(bpf_object__check_endianness(obj), err, out);
2176 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
2103 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out); 2177 CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
2104 CHECK_ERR(bpf_object__collect_reloc(obj), err, out); 2178 CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
2105 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out); 2179 CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
@@ -2193,7 +2267,6 @@ int bpf_object__load(struct bpf_object *obj)
2193 2267
2194 obj->loaded = true; 2268 obj->loaded = true;
2195 2269
2196 CHECK_ERR(bpf_object__probe_caps(obj), err, out);
2197 CHECK_ERR(bpf_object__create_maps(obj), err, out); 2270 CHECK_ERR(bpf_object__create_maps(obj), err, out);
2198 CHECK_ERR(bpf_object__relocate(obj), err, out); 2271 CHECK_ERR(bpf_object__relocate(obj), err, out);
2199 CHECK_ERR(bpf_object__load_progs(obj), err, out); 2272 CHECK_ERR(bpf_object__load_progs(obj), err, out);