aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
commitfcffe2edbd390cad499b27d20512ef000d7ecf54 (patch)
tree13120e1efcf0ad226785b721f4b38272ffdd2028
parent4f83435ad777358d9cdc138868feebbe2a23f577 (diff)
parent624588d9d6cc0a1a270a65fb4d5220f1ceddcf38 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2017-12-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Fix incorrect state pruning related to recognition of zero initialized stack slots, where stacksafe exploration would mistakenly return a positive pruning verdict too early ignoring other slots, from Gianluca. 2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one in maximum call depth check, and rework maximum stack depth tracking logic to fix a bypass of the total stack size check reported by Jann. Also fix a bug in arm64 JIT where prog->jited_len was uninitialized. Addition of various test cases to BPF selftests, from Alexei. 3) Addition of a BPF selftest to test_verifier that is related to BPF to BPF calls which demonstrates a late caller stack size increase and thus out of bounds access. Fixed above in 2). Test case from Jann. 4) Addition of correlating BPF helper calls, BPF to BPF calls as well as BPF maps to bpftool xlated dump in order to allow for better BPF program introspection and debugging, from Daniel. 5) Fixing several bugs in BPF to BPF calls kallsyms handling in order to get it actually to work for subprogs, from Daniel. 6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple of build errors for libbpf on sparc64, from David. 7) Allow narrower context access for BPF dev cgroup typed programs in order to adapt to LLVM code generation. Also adjust memlock rlimit in the test_dev_cgroup BPF selftest, from Yonghong. 8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py relies on netdevsim device being available, from Jakub. 9) Reduce scope of xdp_do_generic_redirect_map() to being static, from Xiongwei. 10) Minor cleanups and spelling fixes in BPF verifier, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--arch/arm64/net/bpf_jit_comp.c1
-rw-r--r--arch/sparc/net/bpf_jit_comp_64.c44
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/filter.h9
-rw-r--r--include/uapi/linux/bpf.h3
-rw-r--r--kernel/bpf/cgroup.c15
-rw-r--r--kernel/bpf/core.c4
-rw-r--r--kernel/bpf/disasm.c65
-rw-r--r--kernel/bpf/disasm.h29
-rw-r--r--kernel/bpf/syscall.c93
-rw-r--r--kernel/bpf/verifier.c126
-rw-r--r--net/core/filter.c5
-rw-r--r--tools/bpf/bpftool/prog.c181
-rw-r--r--tools/lib/bpf/libbpf.c5
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c9
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c241
17 files changed, 764 insertions, 68 deletions
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 396490cf7316..acaa935ed977 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -897,6 +897,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
897 image_ptr = jit_data->image; 897 image_ptr = jit_data->image;
898 header = jit_data->header; 898 header = jit_data->header;
899 extra_pass = true; 899 extra_pass = true;
900 image_size = sizeof(u32) * ctx.idx;
900 goto skip_init_ctx; 901 goto skip_init_ctx;
901 } 902 }
902 memset(&ctx, 0, sizeof(ctx)); 903 memset(&ctx, 0, sizeof(ctx));
diff --git a/arch/sparc/net/bpf_jit_comp_64.c b/arch/sparc/net/bpf_jit_comp_64.c
index 22aff21fa44d..635fdefd4ae2 100644
--- a/arch/sparc/net/bpf_jit_comp_64.c
+++ b/arch/sparc/net/bpf_jit_comp_64.c
@@ -1509,11 +1509,19 @@ static void jit_fill_hole(void *area, unsigned int size)
1509 *ptr++ = 0x91d02005; /* ta 5 */ 1509 *ptr++ = 0x91d02005; /* ta 5 */
1510} 1510}
1511 1511
1512struct sparc64_jit_data {
1513 struct bpf_binary_header *header;
1514 u8 *image;
1515 struct jit_ctx ctx;
1516};
1517
1512struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1518struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1513{ 1519{
1514 struct bpf_prog *tmp, *orig_prog = prog; 1520 struct bpf_prog *tmp, *orig_prog = prog;
1521 struct sparc64_jit_data *jit_data;
1515 struct bpf_binary_header *header; 1522 struct bpf_binary_header *header;
1516 bool tmp_blinded = false; 1523 bool tmp_blinded = false;
1524 bool extra_pass = false;
1517 struct jit_ctx ctx; 1525 struct jit_ctx ctx;
1518 u32 image_size; 1526 u32 image_size;
1519 u8 *image_ptr; 1527 u8 *image_ptr;
@@ -1533,13 +1541,31 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1533 prog = tmp; 1541 prog = tmp;
1534 } 1542 }
1535 1543
1544 jit_data = prog->aux->jit_data;
1545 if (!jit_data) {
1546 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1547 if (!jit_data) {
1548 prog = orig_prog;
1549 goto out;
1550 }
1551 prog->aux->jit_data = jit_data;
1552 }
1553 if (jit_data->ctx.offset) {
1554 ctx = jit_data->ctx;
1555 image_ptr = jit_data->image;
1556 header = jit_data->header;
1557 extra_pass = true;
1558 image_size = sizeof(u32) * ctx.idx;
1559 goto skip_init_ctx;
1560 }
1561
1536 memset(&ctx, 0, sizeof(ctx)); 1562 memset(&ctx, 0, sizeof(ctx));
1537 ctx.prog = prog; 1563 ctx.prog = prog;
1538 1564
1539 ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL); 1565 ctx.offset = kcalloc(prog->len, sizeof(unsigned int), GFP_KERNEL);
1540 if (ctx.offset == NULL) { 1566 if (ctx.offset == NULL) {
1541 prog = orig_prog; 1567 prog = orig_prog;
1542 goto out; 1568 goto out_off;
1543 } 1569 }
1544 1570
1545 /* Fake pass to detect features used, and get an accurate assessment 1571 /* Fake pass to detect features used, and get an accurate assessment
@@ -1562,7 +1588,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1562 } 1588 }
1563 1589
1564 ctx.image = (u32 *)image_ptr; 1590 ctx.image = (u32 *)image_ptr;
1565 1591skip_init_ctx:
1566 for (pass = 1; pass < 3; pass++) { 1592 for (pass = 1; pass < 3; pass++) {
1567 ctx.idx = 0; 1593 ctx.idx = 0;
1568 1594
@@ -1593,14 +1619,24 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1593 1619
1594 bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE)); 1620 bpf_flush_icache(header, (u8 *)header + (header->pages * PAGE_SIZE));
1595 1621
1596 bpf_jit_binary_lock_ro(header); 1622 if (!prog->is_func || extra_pass) {
1623 bpf_jit_binary_lock_ro(header);
1624 } else {
1625 jit_data->ctx = ctx;
1626 jit_data->image = image_ptr;
1627 jit_data->header = header;
1628 }
1597 1629
1598 prog->bpf_func = (void *)ctx.image; 1630 prog->bpf_func = (void *)ctx.image;
1599 prog->jited = 1; 1631 prog->jited = 1;
1600 prog->jited_len = image_size; 1632 prog->jited_len = image_size;
1601 1633
1634 if (!prog->is_func || extra_pass) {
1602out_off: 1635out_off:
1603 kfree(ctx.offset); 1636 kfree(ctx.offset);
1637 kfree(jit_data);
1638 prog->aux->jit_data = NULL;
1639 }
1604out: 1640out:
1605 if (tmp_blinded) 1641 if (tmp_blinded)
1606 bpf_jit_prog_release_other(prog, prog == orig_prog ? 1642 bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c009e472f647..883a35d50cd5 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -194,6 +194,7 @@ struct bpf_verifier_env {
194 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 194 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
195 struct bpf_verifer_log log; 195 struct bpf_verifer_log log;
196 u32 subprog_starts[BPF_MAX_SUBPROGS]; 196 u32 subprog_starts[BPF_MAX_SUBPROGS];
197 /* computes the stack depth of each bpf function */
197 u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; 198 u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
198 u32 subprog_cnt; 199 u32 subprog_cnt;
199}; 200};
diff --git a/include/linux/filter.h b/include/linux/filter.h
index e872b4ebaa57..2b0df2703671 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,6 +18,7 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/cryptohash.h> 19#include <linux/cryptohash.h>
20#include <linux/set_memory.h> 20#include <linux/set_memory.h>
21#include <linux/kallsyms.h>
21 22
22#include <net/sch_generic.h> 23#include <net/sch_generic.h>
23 24
@@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
724void bpf_jit_compile(struct bpf_prog *prog); 725void bpf_jit_compile(struct bpf_prog *prog);
725bool bpf_helper_changes_pkt_data(void *func); 726bool bpf_helper_changes_pkt_data(void *func);
726 727
728static inline bool bpf_dump_raw_ok(void)
729{
730 /* Reconstruction of call-sites is dependent on kallsyms,
731 * thus make dump the same restriction.
732 */
733 return kallsyms_show_value() == 1;
734}
735
727struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 736struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
728 const struct bpf_insn *patch, u32 len); 737 const struct bpf_insn *patch, u32 len);
729 738
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d01f1cb3cfc0..69eabfcb9bdb 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1012,7 +1012,8 @@ struct bpf_perf_event_value {
1012#define BPF_DEVCG_DEV_CHAR (1ULL << 1) 1012#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
1013 1013
1014struct bpf_cgroup_dev_ctx { 1014struct bpf_cgroup_dev_ctx {
1015 __u32 access_type; /* (access << 16) | type */ 1015 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1016 __u32 access_type;
1016 __u32 major; 1017 __u32 major;
1017 __u32 minor; 1018 __u32 minor;
1018}; 1019};
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index b789ab78d28f..c1c0b60d3f2f 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -568,6 +568,8 @@ static bool cgroup_dev_is_valid_access(int off, int size,
568 enum bpf_access_type type, 568 enum bpf_access_type type,
569 struct bpf_insn_access_aux *info) 569 struct bpf_insn_access_aux *info)
570{ 570{
571 const int size_default = sizeof(__u32);
572
571 if (type == BPF_WRITE) 573 if (type == BPF_WRITE)
572 return false; 574 return false;
573 575
@@ -576,8 +578,17 @@ static bool cgroup_dev_is_valid_access(int off, int size,
576 /* The verifier guarantees that size > 0. */ 578 /* The verifier guarantees that size > 0. */
577 if (off % size != 0) 579 if (off % size != 0)
578 return false; 580 return false;
579 if (size != sizeof(__u32)) 581
580 return false; 582 switch (off) {
583 case bpf_ctx_range(struct bpf_cgroup_dev_ctx, access_type):
584 bpf_ctx_record_field_size(info, size_default);
585 if (!bpf_ctx_narrow_access_ok(off, size, size_default))
586 return false;
587 break;
588 default:
589 if (size != size_default)
590 return false;
591 }
581 592
582 return true; 593 return true;
583} 594}
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 768e0a02d8c8..70a534549cd3 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -771,7 +771,9 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
771 771
772/* Base function for offset calculation. Needs to go into .text section, 772/* Base function for offset calculation. Needs to go into .text section,
773 * therefore keeping it non-static as well; will also be used by JITs 773 * therefore keeping it non-static as well; will also be used by JITs
774 * anyway later on, so do not let the compiler omit it. 774 * anyway later on, so do not let the compiler omit it. This also needs
775 * to go into kallsyms for correlation from e.g. bpftool, so naming
776 * must not change.
775 */ 777 */
776noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5) 778noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
777{ 779{
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 883f88fa5bfc..8740406df2cd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
21}; 21};
22#undef __BPF_FUNC_STR_FN 22#undef __BPF_FUNC_STR_FN
23 23
24const char *func_id_name(int id) 24static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
25 const struct bpf_insn *insn,
26 char *buff, size_t len)
25{ 27{
26 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 28 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
27 29
30 if (insn->src_reg != BPF_PSEUDO_CALL &&
31 insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
32 func_id_str[insn->imm])
33 return func_id_str[insn->imm];
34
35 if (cbs && cbs->cb_call)
36 return cbs->cb_call(cbs->private_data, insn);
37
38 if (insn->src_reg == BPF_PSEUDO_CALL)
39 snprintf(buff, len, "%+d", insn->imm);
40
41 return buff;
42}
43
44static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
45 const struct bpf_insn *insn,
46 u64 full_imm, char *buff, size_t len)
47{
48 if (cbs && cbs->cb_imm)
49 return cbs->cb_imm(cbs->private_data, insn, full_imm);
50
51 snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
52 return buff;
53}
54
55const char *func_id_name(int id)
56{
28 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 57 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
29 return func_id_str[id]; 58 return func_id_str[id];
30 else 59 else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
83 [BPF_EXIT >> 4] = "exit", 112 [BPF_EXIT >> 4] = "exit",
84}; 113};
85 114
86static void print_bpf_end_insn(bpf_insn_print_cb verbose, 115static void print_bpf_end_insn(bpf_insn_print_t verbose,
87 struct bpf_verifier_env *env, 116 struct bpf_verifier_env *env,
88 const struct bpf_insn *insn) 117 const struct bpf_insn *insn)
89{ 118{
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
92 insn->imm, insn->dst_reg); 121 insn->imm, insn->dst_reg);
93} 122}
94 123
95void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, 124void print_bpf_insn(const struct bpf_insn_cbs *cbs,
96 const struct bpf_insn *insn, bool allow_ptr_leaks) 125 struct bpf_verifier_env *env,
126 const struct bpf_insn *insn,
127 bool allow_ptr_leaks)
97{ 128{
129 const bpf_insn_print_t verbose = cbs->cb_print;
98 u8 class = BPF_CLASS(insn->code); 130 u8 class = BPF_CLASS(insn->code);
99 131
100 if (class == BPF_ALU || class == BPF_ALU64) { 132 if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
175 */ 207 */
176 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 208 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
177 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; 209 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
210 char tmp[64];
178 211
179 if (map_ptr && !allow_ptr_leaks) 212 if (map_ptr && !allow_ptr_leaks)
180 imm = 0; 213 imm = 0;
181 214
182 verbose(env, "(%02x) r%d = 0x%llx\n", insn->code, 215 verbose(env, "(%02x) r%d = %s\n",
183 insn->dst_reg, (unsigned long long)imm); 216 insn->code, insn->dst_reg,
217 __func_imm_name(cbs, insn, imm,
218 tmp, sizeof(tmp)));
184 } else { 219 } else {
185 verbose(env, "BUG_ld_%02x\n", insn->code); 220 verbose(env, "BUG_ld_%02x\n", insn->code);
186 return; 221 return;
@@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
189 u8 opcode = BPF_OP(insn->code); 224 u8 opcode = BPF_OP(insn->code);
190 225
191 if (opcode == BPF_CALL) { 226 if (opcode == BPF_CALL) {
192 if (insn->src_reg == BPF_PSEUDO_CALL) 227 char tmp[64];
193 verbose(env, "(%02x) call pc%+d\n", insn->code, 228
194 insn->imm); 229 if (insn->src_reg == BPF_PSEUDO_CALL) {
195 else 230 verbose(env, "(%02x) call pc%s\n",
231 insn->code,
232 __func_get_name(cbs, insn,
233 tmp, sizeof(tmp)));
234 } else {
235 strcpy(tmp, "unknown");
196 verbose(env, "(%02x) call %s#%d\n", insn->code, 236 verbose(env, "(%02x) call %s#%d\n", insn->code,
197 func_id_name(insn->imm), insn->imm); 237 __func_get_name(cbs, insn,
238 tmp, sizeof(tmp)),
239 insn->imm);
240 }
198 } else if (insn->code == (BPF_JMP | BPF_JA)) { 241 } else if (insn->code == (BPF_JMP | BPF_JA)) {
199 verbose(env, "(%02x) goto pc%+d\n", 242 verbose(env, "(%02x) goto pc%+d\n",
200 insn->code, insn->off); 243 insn->code, insn->off);
diff --git a/kernel/bpf/disasm.h b/kernel/bpf/disasm.h
index 8de977e420b6..e0857d016f89 100644
--- a/kernel/bpf/disasm.h
+++ b/kernel/bpf/disasm.h
@@ -17,16 +17,35 @@
17#include <linux/bpf.h> 17#include <linux/bpf.h>
18#include <linux/kernel.h> 18#include <linux/kernel.h>
19#include <linux/stringify.h> 19#include <linux/stringify.h>
20#ifndef __KERNEL__
21#include <stdio.h>
22#include <string.h>
23#endif
24
25struct bpf_verifier_env;
20 26
21extern const char *const bpf_alu_string[16]; 27extern const char *const bpf_alu_string[16];
22extern const char *const bpf_class_string[8]; 28extern const char *const bpf_class_string[8];
23 29
24const char *func_id_name(int id); 30const char *func_id_name(int id);
25 31
26struct bpf_verifier_env; 32typedef void (*bpf_insn_print_t)(struct bpf_verifier_env *env,
27typedef void (*bpf_insn_print_cb)(struct bpf_verifier_env *env, 33 const char *, ...);
28 const char *, ...); 34typedef const char *(*bpf_insn_revmap_call_t)(void *private_data,
29void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, 35 const struct bpf_insn *insn);
30 const struct bpf_insn *insn, bool allow_ptr_leaks); 36typedef const char *(*bpf_insn_print_imm_t)(void *private_data,
37 const struct bpf_insn *insn,
38 __u64 full_imm);
39
40struct bpf_insn_cbs {
41 bpf_insn_print_t cb_print;
42 bpf_insn_revmap_call_t cb_call;
43 bpf_insn_print_imm_t cb_imm;
44 void *private_data;
45};
31 46
47void print_bpf_insn(const struct bpf_insn_cbs *cbs,
48 struct bpf_verifier_env *env,
49 const struct bpf_insn *insn,
50 bool allow_ptr_leaks);
32#endif 51#endif
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e2e1c78ce1dc..007802c5ca7d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -937,10 +937,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
937static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 937static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
938{ 938{
939 if (atomic_dec_and_test(&prog->aux->refcnt)) { 939 if (atomic_dec_and_test(&prog->aux->refcnt)) {
940 int i;
941
940 trace_bpf_prog_put_rcu(prog); 942 trace_bpf_prog_put_rcu(prog);
941 /* bpf_prog_free_id() must be called first */ 943 /* bpf_prog_free_id() must be called first */
942 bpf_prog_free_id(prog, do_idr_lock); 944 bpf_prog_free_id(prog, do_idr_lock);
945
946 for (i = 0; i < prog->aux->func_cnt; i++)
947 bpf_prog_kallsyms_del(prog->aux->func[i]);
943 bpf_prog_kallsyms_del(prog); 948 bpf_prog_kallsyms_del(prog);
949
944 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 950 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
945 } 951 }
946} 952}
@@ -1552,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1552 return fd; 1558 return fd;
1553} 1559}
1554 1560
1561static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
1562 unsigned long addr)
1563{
1564 int i;
1565
1566 for (i = 0; i < prog->aux->used_map_cnt; i++)
1567 if (prog->aux->used_maps[i] == (void *)addr)
1568 return prog->aux->used_maps[i];
1569 return NULL;
1570}
1571
1572static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
1573{
1574 const struct bpf_map *map;
1575 struct bpf_insn *insns;
1576 u64 imm;
1577 int i;
1578
1579 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
1580 GFP_USER);
1581 if (!insns)
1582 return insns;
1583
1584 for (i = 0; i < prog->len; i++) {
1585 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
1586 insns[i].code = BPF_JMP | BPF_CALL;
1587 insns[i].imm = BPF_FUNC_tail_call;
1588 /* fall-through */
1589 }
1590 if (insns[i].code == (BPF_JMP | BPF_CALL) ||
1591 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
1592 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
1593 insns[i].code = BPF_JMP | BPF_CALL;
1594 if (!bpf_dump_raw_ok())
1595 insns[i].imm = 0;
1596 continue;
1597 }
1598
1599 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
1600 continue;
1601
1602 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
1603 map = bpf_map_from_imm(prog, imm);
1604 if (map) {
1605 insns[i].src_reg = BPF_PSEUDO_MAP_FD;
1606 insns[i].imm = map->id;
1607 insns[i + 1].imm = 0;
1608 continue;
1609 }
1610
1611 if (!bpf_dump_raw_ok() &&
1612 imm == (unsigned long)prog->aux) {
1613 insns[i].imm = 0;
1614 insns[i + 1].imm = 0;
1615 continue;
1616 }
1617 }
1618
1619 return insns;
1620}
1621
1555static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 1622static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1556 const union bpf_attr *attr, 1623 const union bpf_attr *attr,
1557 union bpf_attr __user *uattr) 1624 union bpf_attr __user *uattr)
@@ -1602,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1602 ulen = info.jited_prog_len; 1669 ulen = info.jited_prog_len;
1603 info.jited_prog_len = prog->jited_len; 1670 info.jited_prog_len = prog->jited_len;
1604 if (info.jited_prog_len && ulen) { 1671 if (info.jited_prog_len && ulen) {
1605 uinsns = u64_to_user_ptr(info.jited_prog_insns); 1672 if (bpf_dump_raw_ok()) {
1606 ulen = min_t(u32, info.jited_prog_len, ulen); 1673 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1607 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 1674 ulen = min_t(u32, info.jited_prog_len, ulen);
1608 return -EFAULT; 1675 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1676 return -EFAULT;
1677 } else {
1678 info.jited_prog_insns = 0;
1679 }
1609 } 1680 }
1610 1681
1611 ulen = info.xlated_prog_len; 1682 ulen = info.xlated_prog_len;
1612 info.xlated_prog_len = bpf_prog_insn_size(prog); 1683 info.xlated_prog_len = bpf_prog_insn_size(prog);
1613 if (info.xlated_prog_len && ulen) { 1684 if (info.xlated_prog_len && ulen) {
1685 struct bpf_insn *insns_sanitized;
1686 bool fault;
1687
1688 if (prog->blinded && !bpf_dump_raw_ok()) {
1689 info.xlated_prog_insns = 0;
1690 goto done;
1691 }
1692 insns_sanitized = bpf_insn_prepare_dump(prog);
1693 if (!insns_sanitized)
1694 return -ENOMEM;
1614 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1695 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1615 ulen = min_t(u32, info.xlated_prog_len, ulen); 1696 ulen = min_t(u32, info.xlated_prog_len, ulen);
1616 if (copy_to_user(uinsns, prog->insnsi, ulen)) 1697 fault = copy_to_user(uinsns, insns_sanitized, ulen);
1698 kfree(insns_sanitized);
1699 if (fault)
1617 return -EFAULT; 1700 return -EFAULT;
1618 } 1701 }
1619 1702
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 1cd2c2d28fc3..98d8637cf70d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -772,7 +772,7 @@ static int check_subprogs(struct bpf_verifier_env *env)
772 return -EPERM; 772 return -EPERM;
773 } 773 }
774 if (bpf_prog_is_dev_bound(env->prog->aux)) { 774 if (bpf_prog_is_dev_bound(env->prog->aux)) {
775 verbose(env, "funcation calls in offloaded programs are not supported yet\n"); 775 verbose(env, "function calls in offloaded programs are not supported yet\n");
776 return -EINVAL; 776 return -EINVAL;
777 } 777 }
778 ret = add_subprog(env, i + insn[i].imm + 1); 778 ret = add_subprog(env, i + insn[i].imm + 1);
@@ -823,6 +823,7 @@ next:
823 return 0; 823 return 0;
824} 824}
825 825
826static
826struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env, 827struct bpf_verifier_state *skip_callee(struct bpf_verifier_env *env,
827 const struct bpf_verifier_state *state, 828 const struct bpf_verifier_state *state,
828 struct bpf_verifier_state *parent, 829 struct bpf_verifier_state *parent,
@@ -867,7 +868,7 @@ bug:
867 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp); 868 verbose(env, "verifier bug regno %d tmp %p\n", regno, tmp);
868 verbose(env, "regno %d parent frame %d current frame %d\n", 869 verbose(env, "regno %d parent frame %d current frame %d\n",
869 regno, parent->curframe, state->curframe); 870 regno, parent->curframe, state->curframe);
870 return 0; 871 return NULL;
871} 872}
872 873
873static int mark_reg_read(struct bpf_verifier_env *env, 874static int mark_reg_read(struct bpf_verifier_env *env,
@@ -1434,33 +1435,80 @@ static int update_stack_depth(struct bpf_verifier_env *env,
1434 const struct bpf_func_state *func, 1435 const struct bpf_func_state *func,
1435 int off) 1436 int off)
1436{ 1437{
1437 u16 stack = env->subprog_stack_depth[func->subprogno], total = 0; 1438 u16 stack = env->subprog_stack_depth[func->subprogno];
1438 struct bpf_verifier_state *cur = env->cur_state;
1439 int i;
1440 1439
1441 if (stack >= -off) 1440 if (stack >= -off)
1442 return 0; 1441 return 0;
1443 1442
1444 /* update known max for given subprogram */ 1443 /* update known max for given subprogram */
1445 env->subprog_stack_depth[func->subprogno] = -off; 1444 env->subprog_stack_depth[func->subprogno] = -off;
1445 return 0;
1446}
1446 1447
1447 /* compute the total for current call chain */ 1448/* starting from main bpf function walk all instructions of the function
1448 for (i = 0; i <= cur->curframe; i++) { 1449 * and recursively walk all callees that given function can call.
1449 u32 depth = env->subprog_stack_depth[cur->frame[i]->subprogno]; 1450 * Ignore jump and exit insns.
1450 1451 * Since recursion is prevented by check_cfg() this algorithm
1451 /* round up to 32-bytes, since this is granularity 1452 * only needs a local stack of MAX_CALL_FRAMES to remember callsites
1452 * of interpreter stack sizes 1453 */
1453 */ 1454static int check_max_stack_depth(struct bpf_verifier_env *env)
1454 depth = round_up(depth, 32); 1455{
1455 total += depth; 1456 int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end;
1456 } 1457 struct bpf_insn *insn = env->prog->insnsi;
1458 int insn_cnt = env->prog->len;
1459 int ret_insn[MAX_CALL_FRAMES];
1460 int ret_prog[MAX_CALL_FRAMES];
1457 1461
1458 if (total > MAX_BPF_STACK) { 1462process_func:
1463 /* round up to 32-bytes, since this is granularity
1464 * of interpreter stack size
1465 */
1466 depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
1467 if (depth > MAX_BPF_STACK) {
1459 verbose(env, "combined stack size of %d calls is %d. Too large\n", 1468 verbose(env, "combined stack size of %d calls is %d. Too large\n",
1460 cur->curframe, total); 1469 frame + 1, depth);
1461 return -EACCES; 1470 return -EACCES;
1462 } 1471 }
1463 return 0; 1472continue_func:
1473 if (env->subprog_cnt == subprog)
1474 subprog_end = insn_cnt;
1475 else
1476 subprog_end = env->subprog_starts[subprog];
1477 for (; i < subprog_end; i++) {
1478 if (insn[i].code != (BPF_JMP | BPF_CALL))
1479 continue;
1480 if (insn[i].src_reg != BPF_PSEUDO_CALL)
1481 continue;
1482 /* remember insn and function to return to */
1483 ret_insn[frame] = i + 1;
1484 ret_prog[frame] = subprog;
1485
1486 /* find the callee */
1487 i = i + insn[i].imm + 1;
1488 subprog = find_subprog(env, i);
1489 if (subprog < 0) {
1490 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
1491 i);
1492 return -EFAULT;
1493 }
1494 subprog++;
1495 frame++;
1496 if (frame >= MAX_CALL_FRAMES) {
1497 WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
1498 return -EFAULT;
1499 }
1500 goto process_func;
1501 }
1502 /* end of for() loop means the last insn of the 'subprog'
1503 * was reached. Doesn't matter whether it was JA or EXIT
1504 */
1505 if (frame == 0)
1506 return 0;
1507 depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32);
1508 frame--;
1509 i = ret_insn[frame];
1510 subprog = ret_prog[frame];
1511 goto continue_func;
1464} 1512}
1465 1513
1466static int get_callee_stack_depth(struct bpf_verifier_env *env, 1514static int get_callee_stack_depth(struct bpf_verifier_env *env,
@@ -2105,9 +2153,9 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2105 struct bpf_func_state *caller, *callee; 2153 struct bpf_func_state *caller, *callee;
2106 int i, subprog, target_insn; 2154 int i, subprog, target_insn;
2107 2155
2108 if (state->curframe >= MAX_CALL_FRAMES) { 2156 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2109 verbose(env, "the call stack of %d frames is too deep\n", 2157 verbose(env, "the call stack of %d frames is too deep\n",
2110 state->curframe); 2158 state->curframe + 2);
2111 return -E2BIG; 2159 return -E2BIG;
2112 } 2160 }
2113 2161
@@ -4155,7 +4203,7 @@ static bool stacksafe(struct bpf_func_state *old,
4155 4203
4156 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ)) 4204 if (!(old->stack[spi].spilled_ptr.live & REG_LIVE_READ))
4157 /* explored state didn't use this */ 4205 /* explored state didn't use this */
4158 return true; 4206 continue;
4159 4207
4160 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID) 4208 if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
4161 continue; 4209 continue;
@@ -4475,9 +4523,12 @@ static int do_check(struct bpf_verifier_env *env)
4475 } 4523 }
4476 4524
4477 if (env->log.level) { 4525 if (env->log.level) {
4526 const struct bpf_insn_cbs cbs = {
4527 .cb_print = verbose,
4528 };
4529
4478 verbose(env, "%d: ", insn_idx); 4530 verbose(env, "%d: ", insn_idx);
4479 print_bpf_insn(verbose, env, insn, 4531 print_bpf_insn(&cbs, env, insn, env->allow_ptr_leaks);
4480 env->allow_ptr_leaks);
4481 } 4532 }
4482 4533
4483 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx); 4534 err = ext_analyzer_insn_hook(env, insn_idx, prev_insn_idx);
@@ -5065,14 +5116,14 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5065{ 5116{
5066 struct bpf_prog *prog = env->prog, **func, *tmp; 5117 struct bpf_prog *prog = env->prog, **func, *tmp;
5067 int i, j, subprog_start, subprog_end = 0, len, subprog; 5118 int i, j, subprog_start, subprog_end = 0, len, subprog;
5068 struct bpf_insn *insn = prog->insnsi; 5119 struct bpf_insn *insn;
5069 void *old_bpf_func; 5120 void *old_bpf_func;
5070 int err = -ENOMEM; 5121 int err = -ENOMEM;
5071 5122
5072 if (env->subprog_cnt == 0) 5123 if (env->subprog_cnt == 0)
5073 return 0; 5124 return 0;
5074 5125
5075 for (i = 0; i < prog->len; i++, insn++) { 5126 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5076 if (insn->code != (BPF_JMP | BPF_CALL) || 5127 if (insn->code != (BPF_JMP | BPF_CALL) ||
5077 insn->src_reg != BPF_PSEUDO_CALL) 5128 insn->src_reg != BPF_PSEUDO_CALL)
5078 continue; 5129 continue;
@@ -5111,7 +5162,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5111 goto out_free; 5162 goto out_free;
5112 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start], 5163 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5113 len * sizeof(struct bpf_insn)); 5164 len * sizeof(struct bpf_insn));
5165 func[i]->type = prog->type;
5114 func[i]->len = len; 5166 func[i]->len = len;
5167 if (bpf_prog_calc_tag(func[i]))
5168 goto out_free;
5115 func[i]->is_func = 1; 5169 func[i]->is_func = 1;
5116 /* Use bpf_prog_F_tag to indicate functions in stack traces. 5170 /* Use bpf_prog_F_tag to indicate functions in stack traces.
5117 * Long term would need debug info to populate names 5171 * Long term would need debug info to populate names
@@ -5161,6 +5215,25 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5161 bpf_prog_lock_ro(func[i]); 5215 bpf_prog_lock_ro(func[i]);
5162 bpf_prog_kallsyms_add(func[i]); 5216 bpf_prog_kallsyms_add(func[i]);
5163 } 5217 }
5218
5219 /* Last step: make now unused interpreter insns from main
5220 * prog consistent for later dump requests, so they can
5221 * later look the same as if they were interpreted only.
5222 */
5223 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5224 unsigned long addr;
5225
5226 if (insn->code != (BPF_JMP | BPF_CALL) ||
5227 insn->src_reg != BPF_PSEUDO_CALL)
5228 continue;
5229 insn->off = env->insn_aux_data[i].call_imm;
5230 subprog = find_subprog(env, i + insn->off + 1);
5231 addr = (unsigned long)func[subprog + 1]->bpf_func;
5232 addr &= PAGE_MASK;
5233 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5234 addr - __bpf_call_base;
5235 }
5236
5164 prog->jited = 1; 5237 prog->jited = 1;
5165 prog->bpf_func = func[0]->bpf_func; 5238 prog->bpf_func = func[0]->bpf_func;
5166 prog->aux->func = func; 5239 prog->aux->func = func;
@@ -5427,6 +5500,9 @@ skip_full_check:
5427 sanitize_dead_code(env); 5500 sanitize_dead_code(env);
5428 5501
5429 if (ret == 0) 5502 if (ret == 0)
5503 ret = check_max_stack_depth(env);
5504
5505 if (ret == 0)
5430 /* program is valid, convert *(u32*)(ctx + off) accesses */ 5506 /* program is valid, convert *(u32*)(ctx + off) accesses */
5431 ret = convert_ctx_accesses(env); 5507 ret = convert_ctx_accesses(env);
5432 5508
diff --git a/net/core/filter.c b/net/core/filter.c
index 754abe1041b7..130b842c3a15 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -2684,8 +2684,9 @@ static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
2684 return 0; 2684 return 0;
2685} 2685}
2686 2686
2687int xdp_do_generic_redirect_map(struct net_device *dev, struct sk_buff *skb, 2687static int xdp_do_generic_redirect_map(struct net_device *dev,
2688 struct bpf_prog *xdp_prog) 2688 struct sk_buff *skb,
2689 struct bpf_prog *xdp_prog)
2689{ 2690{
2690 struct redirect_info *ri = this_cpu_ptr(&redirect_info); 2691 struct redirect_info *ri = this_cpu_ptr(&redirect_info);
2691 unsigned long map_owner = ri->map_owner; 2692 unsigned long map_owner = ri->map_owner;
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 037484ceaeaf..42ee8892549c 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -401,6 +401,88 @@ static int do_show(int argc, char **argv)
401 return err; 401 return err;
402} 402}
403 403
404#define SYM_MAX_NAME 256
405
406struct kernel_sym {
407 unsigned long address;
408 char name[SYM_MAX_NAME];
409};
410
411struct dump_data {
412 unsigned long address_call_base;
413 struct kernel_sym *sym_mapping;
414 __u32 sym_count;
415 char scratch_buff[SYM_MAX_NAME];
416};
417
418static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
419{
420 return ((struct kernel_sym *)sym_a)->address -
421 ((struct kernel_sym *)sym_b)->address;
422}
423
424static void kernel_syms_load(struct dump_data *dd)
425{
426 struct kernel_sym *sym;
427 char buff[256];
428 void *tmp, *address;
429 FILE *fp;
430
431 fp = fopen("/proc/kallsyms", "r");
432 if (!fp)
433 return;
434
435 while (!feof(fp)) {
436 if (!fgets(buff, sizeof(buff), fp))
437 break;
438 tmp = realloc(dd->sym_mapping,
439 (dd->sym_count + 1) *
440 sizeof(*dd->sym_mapping));
441 if (!tmp) {
442out:
443 free(dd->sym_mapping);
444 dd->sym_mapping = NULL;
445 fclose(fp);
446 return;
447 }
448 dd->sym_mapping = tmp;
449 sym = &dd->sym_mapping[dd->sym_count];
450 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
451 continue;
452 sym->address = (unsigned long)address;
453 if (!strcmp(sym->name, "__bpf_call_base")) {
454 dd->address_call_base = sym->address;
455 /* sysctl kernel.kptr_restrict was set */
456 if (!sym->address)
457 goto out;
458 }
459 if (sym->address)
460 dd->sym_count++;
461 }
462
463 fclose(fp);
464
465 qsort(dd->sym_mapping, dd->sym_count,
466 sizeof(*dd->sym_mapping), kernel_syms_cmp);
467}
468
469static void kernel_syms_destroy(struct dump_data *dd)
470{
471 free(dd->sym_mapping);
472}
473
474static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
475 unsigned long key)
476{
477 struct kernel_sym sym = {
478 .address = key,
479 };
480
481 return dd->sym_mapping ?
482 bsearch(&sym, dd->sym_mapping, dd->sym_count,
483 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
484}
485
404static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) 486static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
405{ 487{
406 va_list args; 488 va_list args;
@@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
410 va_end(args); 492 va_end(args);
411} 493}
412 494
413static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) 495static const char *print_call_pcrel(struct dump_data *dd,
496 struct kernel_sym *sym,
497 unsigned long address,
498 const struct bpf_insn *insn)
414{ 499{
500 if (sym)
501 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
502 "%+d#%s", insn->off, sym->name);
503 else
504 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
505 "%+d#0x%lx", insn->off, address);
506 return dd->scratch_buff;
507}
508
509static const char *print_call_helper(struct dump_data *dd,
510 struct kernel_sym *sym,
511 unsigned long address)
512{
513 if (sym)
514 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
515 "%s", sym->name);
516 else
517 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
518 "0x%lx", address);
519 return dd->scratch_buff;
520}
521
522static const char *print_call(void *private_data,
523 const struct bpf_insn *insn)
524{
525 struct dump_data *dd = private_data;
526 unsigned long address = dd->address_call_base + insn->imm;
527 struct kernel_sym *sym;
528
529 sym = kernel_syms_search(dd, address);
530 if (insn->src_reg == BPF_PSEUDO_CALL)
531 return print_call_pcrel(dd, sym, address, insn);
532 else
533 return print_call_helper(dd, sym, address);
534}
535
536static const char *print_imm(void *private_data,
537 const struct bpf_insn *insn,
538 __u64 full_imm)
539{
540 struct dump_data *dd = private_data;
541
542 if (insn->src_reg == BPF_PSEUDO_MAP_FD)
543 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
544 "map[id:%u]", insn->imm);
545 else
546 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
547 "0x%llx", (unsigned long long)full_imm);
548 return dd->scratch_buff;
549}
550
551static void dump_xlated_plain(struct dump_data *dd, void *buf,
552 unsigned int len, bool opcodes)
553{
554 const struct bpf_insn_cbs cbs = {
555 .cb_print = print_insn,
556 .cb_call = print_call,
557 .cb_imm = print_imm,
558 .private_data = dd,
559 };
415 struct bpf_insn *insn = buf; 560 struct bpf_insn *insn = buf;
416 bool double_insn = false; 561 bool double_insn = false;
417 unsigned int i; 562 unsigned int i;
@@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
425 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 570 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
426 571
427 printf("% 4d: ", i); 572 printf("% 4d: ", i);
428 print_bpf_insn(print_insn, NULL, insn + i, true); 573 print_bpf_insn(&cbs, NULL, insn + i, true);
429 574
430 if (opcodes) { 575 if (opcodes) {
431 printf(" "); 576 printf(" ");
@@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
454 va_end(args); 599 va_end(args);
455} 600}
456 601
457static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) 602static void dump_xlated_json(struct dump_data *dd, void *buf,
603 unsigned int len, bool opcodes)
458{ 604{
605 const struct bpf_insn_cbs cbs = {
606 .cb_print = print_insn_json,
607 .cb_call = print_call,
608 .cb_imm = print_imm,
609 .private_data = dd,
610 };
459 struct bpf_insn *insn = buf; 611 struct bpf_insn *insn = buf;
460 bool double_insn = false; 612 bool double_insn = false;
461 unsigned int i; 613 unsigned int i;
@@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
470 622
471 jsonw_start_object(json_wtr); 623 jsonw_start_object(json_wtr);
472 jsonw_name(json_wtr, "disasm"); 624 jsonw_name(json_wtr, "disasm");
473 print_bpf_insn(print_insn_json, NULL, insn + i, true); 625 print_bpf_insn(&cbs, NULL, insn + i, true);
474 626
475 if (opcodes) { 627 if (opcodes) {
476 jsonw_name(json_wtr, "opcodes"); 628 jsonw_name(json_wtr, "opcodes");
@@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
505static int do_dump(int argc, char **argv) 657static int do_dump(int argc, char **argv)
506{ 658{
507 struct bpf_prog_info info = {}; 659 struct bpf_prog_info info = {};
660 struct dump_data dd = {};
508 __u32 len = sizeof(info); 661 __u32 len = sizeof(info);
509 unsigned int buf_size; 662 unsigned int buf_size;
510 char *filepath = NULL; 663 char *filepath = NULL;
@@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv)
592 goto err_free; 745 goto err_free;
593 } 746 }
594 747
748 if ((member_len == &info.jited_prog_len &&
749 info.jited_prog_insns == 0) ||
750 (member_len == &info.xlated_prog_len &&
751 info.xlated_prog_insns == 0)) {
752 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
753 goto err_free;
754 }
755
595 if (filepath) { 756 if (filepath) {
596 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); 757 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
597 if (fd < 0) { 758 if (fd < 0) {
@@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv)
608 goto err_free; 769 goto err_free;
609 } 770 }
610 } else { 771 } else {
611 if (member_len == &info.jited_prog_len) 772 if (member_len == &info.jited_prog_len) {
612 disasm_print_insn(buf, *member_len, opcodes); 773 disasm_print_insn(buf, *member_len, opcodes);
613 else 774 } else {
775 kernel_syms_load(&dd);
614 if (json_output) 776 if (json_output)
615 dump_xlated_json(buf, *member_len, opcodes); 777 dump_xlated_json(&dd, buf, *member_len, opcodes);
616 else 778 else
617 dump_xlated_plain(buf, *member_len, opcodes); 779 dump_xlated_plain(&dd, buf, *member_len, opcodes);
780 kernel_syms_destroy(&dd);
781 }
618 } 782 }
619 783
620 free(buf); 784 free(buf);
621
622 return 0; 785 return 0;
623 786
624err_free: 787err_free:
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5b83875b3594..e9c4b7cabcf2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -910,8 +910,9 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
910 GELF_R_SYM(rel.r_info)); 910 GELF_R_SYM(rel.r_info));
911 return -LIBBPF_ERRNO__FORMAT; 911 return -LIBBPF_ERRNO__FORMAT;
912 } 912 }
913 pr_debug("relo for %ld value %ld name %d\n", 913 pr_debug("relo for %lld value %lld name %d\n",
914 rel.r_info >> 32, sym.st_value, sym.st_name); 914 (long long) (rel.r_info >> 32),
915 (long long) sym.st_value, sym.st_name);
915 916
916 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { 917 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
917 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", 918 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 9d4897317c77..983dd25d49f4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -4,3 +4,4 @@ CONFIG_NET_CLS_BPF=m
4CONFIG_BPF_EVENTS=y 4CONFIG_BPF_EVENTS=y
5CONFIG_TEST_BPF=m 5CONFIG_TEST_BPF=m
6CONFIG_CGROUP_BPF=y 6CONFIG_CGROUP_BPF=y
7CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6c89b0..c1535b34f14f 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
10#include <string.h> 10#include <string.h>
11#include <errno.h> 11#include <errno.h>
12#include <assert.h> 12#include <assert.h>
13#include <sys/time.h>
14#include <sys/resource.h>
13 15
14#include <linux/bpf.h> 16#include <linux/bpf.h>
15#include <bpf/bpf.h> 17#include <bpf/bpf.h>
@@ -23,15 +25,19 @@
23 25
24int main(int argc, char **argv) 26int main(int argc, char **argv)
25{ 27{
28 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
26 struct bpf_object *obj; 29 struct bpf_object *obj;
27 int error = EXIT_FAILURE; 30 int error = EXIT_FAILURE;
28 int prog_fd, cgroup_fd; 31 int prog_fd, cgroup_fd;
29 __u32 prog_cnt; 32 __u32 prog_cnt;
30 33
34 if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
35 perror("Unable to lift memlock rlimit");
36
31 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, 37 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
32 &obj, &prog_fd)) { 38 &obj, &prog_fd)) {
33 printf("Failed to load DEV_CGROUP program\n"); 39 printf("Failed to load DEV_CGROUP program\n");
34 goto err; 40 goto out;
35 } 41 }
36 42
37 if (setup_cgroup_environment()) { 43 if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
89err: 95err:
90 cleanup_cgroup_environment(); 96 cleanup_cgroup_environment();
91 97
98out:
92 return error; 99 return error;
93} 100}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index d38334abb990..543847957fdd 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -9273,6 +9273,196 @@ static struct bpf_test tests[] = {
9273 .result = ACCEPT, 9273 .result = ACCEPT,
9274 }, 9274 },
9275 { 9275 {
9276 "calls: stack overflow using two frames (pre-call access)",
9277 .insns = {
9278 /* prog 1 */
9279 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9280 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9281 BPF_EXIT_INSN(),
9282
9283 /* prog 2 */
9284 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9285 BPF_MOV64_IMM(BPF_REG_0, 0),
9286 BPF_EXIT_INSN(),
9287 },
9288 .prog_type = BPF_PROG_TYPE_XDP,
9289 .errstr = "combined stack size",
9290 .result = REJECT,
9291 },
9292 {
9293 "calls: stack overflow using two frames (post-call access)",
9294 .insns = {
9295 /* prog 1 */
9296 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9297 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9298 BPF_EXIT_INSN(),
9299
9300 /* prog 2 */
9301 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9302 BPF_MOV64_IMM(BPF_REG_0, 0),
9303 BPF_EXIT_INSN(),
9304 },
9305 .prog_type = BPF_PROG_TYPE_XDP,
9306 .errstr = "combined stack size",
9307 .result = REJECT,
9308 },
9309 {
9310 "calls: stack depth check using three frames. test1",
9311 .insns = {
9312 /* main */
9313 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9314 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9315 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9316 BPF_MOV64_IMM(BPF_REG_0, 0),
9317 BPF_EXIT_INSN(),
9318 /* A */
9319 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9320 BPF_EXIT_INSN(),
9321 /* B */
9322 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9323 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9324 BPF_EXIT_INSN(),
9325 },
9326 .prog_type = BPF_PROG_TYPE_XDP,
9327 /* stack_main=32, stack_A=256, stack_B=64
9328 * and max(main+A, main+A+B) < 512
9329 */
9330 .result = ACCEPT,
9331 },
9332 {
9333 "calls: stack depth check using three frames. test2",
9334 .insns = {
9335 /* main */
9336 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9337 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9338 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9339 BPF_MOV64_IMM(BPF_REG_0, 0),
9340 BPF_EXIT_INSN(),
9341 /* A */
9342 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9343 BPF_EXIT_INSN(),
9344 /* B */
9345 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9346 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9347 BPF_EXIT_INSN(),
9348 },
9349 .prog_type = BPF_PROG_TYPE_XDP,
9350 /* stack_main=32, stack_A=64, stack_B=256
9351 * and max(main+A, main+A+B) < 512
9352 */
9353 .result = ACCEPT,
9354 },
9355 {
9356 "calls: stack depth check using three frames. test3",
9357 .insns = {
9358 /* main */
9359 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9360 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9361 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9362 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
9363 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
9364 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9365 BPF_MOV64_IMM(BPF_REG_0, 0),
9366 BPF_EXIT_INSN(),
9367 /* A */
9368 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
9369 BPF_EXIT_INSN(),
9370 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
9371 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9372 /* B */
9373 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
9374 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
9375 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9376 BPF_EXIT_INSN(),
9377 },
9378 .prog_type = BPF_PROG_TYPE_XDP,
9379 /* stack_main=64, stack_A=224, stack_B=256
9380 * and max(main+A, main+A+B) > 512
9381 */
9382 .errstr = "combined stack",
9383 .result = REJECT,
9384 },
9385 {
9386 "calls: stack depth check using three frames. test4",
9387 /* void main(void) {
9388 * func1(0);
9389 * func1(1);
9390 * func2(1);
9391 * }
9392 * void func1(int alloc_or_recurse) {
9393 * if (alloc_or_recurse) {
9394 * frame_pointer[-300] = 1;
9395 * } else {
9396 * func2(alloc_or_recurse);
9397 * }
9398 * }
9399 * void func2(int alloc_or_recurse) {
9400 * if (alloc_or_recurse) {
9401 * frame_pointer[-300] = 1;
9402 * }
9403 * }
9404 */
9405 .insns = {
9406 /* main */
9407 BPF_MOV64_IMM(BPF_REG_1, 0),
9408 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9409 BPF_MOV64_IMM(BPF_REG_1, 1),
9410 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9411 BPF_MOV64_IMM(BPF_REG_1, 1),
9412 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
9413 BPF_MOV64_IMM(BPF_REG_0, 0),
9414 BPF_EXIT_INSN(),
9415 /* A */
9416 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
9417 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9418 BPF_EXIT_INSN(),
9419 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9420 BPF_EXIT_INSN(),
9421 /* B */
9422 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
9423 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9424 BPF_EXIT_INSN(),
9425 },
9426 .prog_type = BPF_PROG_TYPE_XDP,
9427 .result = REJECT,
9428 .errstr = "combined stack",
9429 },
9430 {
9431 "calls: stack depth check using three frames. test5",
9432 .insns = {
9433 /* main */
9434 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
9435 BPF_EXIT_INSN(),
9436 /* A */
9437 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9438 BPF_EXIT_INSN(),
9439 /* B */
9440 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
9441 BPF_EXIT_INSN(),
9442 /* C */
9443 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
9444 BPF_EXIT_INSN(),
9445 /* D */
9446 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
9447 BPF_EXIT_INSN(),
9448 /* E */
9449 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
9450 BPF_EXIT_INSN(),
9451 /* F */
9452 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
9453 BPF_EXIT_INSN(),
9454 /* G */
9455 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
9456 BPF_EXIT_INSN(),
9457 /* H */
9458 BPF_MOV64_IMM(BPF_REG_0, 0),
9459 BPF_EXIT_INSN(),
9460 },
9461 .prog_type = BPF_PROG_TYPE_XDP,
9462 .errstr = "call stack",
9463 .result = REJECT,
9464 },
9465 {
9276 "calls: spill into caller stack frame", 9466 "calls: spill into caller stack frame",
9277 .insns = { 9467 .insns = {
9278 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 9468 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -10258,6 +10448,57 @@ static struct bpf_test tests[] = {
10258 .result = REJECT, 10448 .result = REJECT,
10259 .prog_type = BPF_PROG_TYPE_XDP, 10449 .prog_type = BPF_PROG_TYPE_XDP,
10260 }, 10450 },
10451 {
10452 "search pruning: all branches should be verified (nop operation)",
10453 .insns = {
10454 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10456 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10457 BPF_LD_MAP_FD(BPF_REG_1, 0),
10458 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
10460 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10462 BPF_MOV64_IMM(BPF_REG_4, 0),
10463 BPF_JMP_A(1),
10464 BPF_MOV64_IMM(BPF_REG_4, 1),
10465 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10466 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10467 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
10469 BPF_MOV64_IMM(BPF_REG_6, 0),
10470 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
10471 BPF_EXIT_INSN(),
10472 },
10473 .fixup_map1 = { 3 },
10474 .errstr = "R6 invalid mem access 'inv'",
10475 .result = REJECT,
10476 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10477 },
10478 {
10479 "search pruning: all branches should be verified (invalid stack access)",
10480 .insns = {
10481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10483 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10484 BPF_LD_MAP_FD(BPF_REG_1, 0),
10485 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
10487 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10488 BPF_MOV64_IMM(BPF_REG_4, 0),
10489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10490 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10491 BPF_JMP_A(1),
10492 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
10493 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10494 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10495 BPF_EXIT_INSN(),
10496 },
10497 .fixup_map1 = { 3 },
10498 .errstr = "invalid read from stack off -16+0 size 8",
10499 .result = REJECT,
10500 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10501 },
10261}; 10502};
10262 10503
10263static int probe_filter_length(const struct bpf_insn *fp) 10504static int probe_filter_length(const struct bpf_insn *fp)