aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/disasm.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-12-20 07:42:57 -0500
committerAlexei Starovoitov <ast@kernel.org>2017-12-20 21:09:40 -0500
commit7105e828c087de970fcb5a9509db51bfe6bd7894 (patch)
tree47cca432779b910dab12ee5cc81b792f6e432a76 /kernel/bpf/disasm.c
parent4f74d80971bce93d9e608c40324d662c70eb4664 (diff)
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't correlate used helpers as well as maps. The prog info lists involved map ids, however there's no correlation of where in the program they are used as of today. Likewise, bpftool does not correlate helper calls with the target functions. The latter can be done w/o any kernel changes through kallsyms, and also has the advantage that this works with inlined helpers and BPF calls. Example, via interpreter: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1 * Output before patch (calls/maps remain unclear): # bpftool prog dump xlated id 1 <-- dump prog id:1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = 0xffff95c47a8d4800 6: (85) call unknown#73040 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call unknown#73040 12: (15) if r0 == 0x0 goto pc+23 [...] * Output after patch: # bpftool prog dump xlated id 1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call bpf_map_lookup_elem#73424 <-- helper call 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call bpf_map_lookup_elem#73424 12: (15) if r0 == 0x0 goto pc+23 [...] # bpftool map show id 2 <-- show/dump/etc map id:2 2: hash_of_maps flags 0x0 key 4B value 4B max_entries 3 memlock 4096B Example, JITed, same prog: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 3 tag c74773051b364165 jited # bpftool prog show id 3 3: sched_cls tag c74773051b364165 loaded_at Dec 19/13:48 uid 0 xlated 384B jited 257B memlock 4096B map_ids 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite 7: (15) if r0 == 0x0 goto pc+2 | 8: (07) r0 += 56 | 9: (79) r0 = *(u64 *)(r0 +0) <-+ 10: (15) if r0 == 0x0 goto pc+24 11: (bf) r2 = r10 12: (07) r2 += -4 [...] Example, same prog, but kallsyms disabled (in that case we are also not allowed to pass any relative offsets, etc, so prog becomes pointer sanitized on dump): # sysctl kernel.kptr_restrict=2 kernel.kptr_restrict = 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] 6: (85) call bpf_unspec#0 7: (15) if r0 == 0x0 goto pc+2 [...] Example, BPF calls via interpreter: # bpftool prog dump xlated id 1 0: (85) call pc+2#__bpf_prog_run_args32 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit Example, BPF calls via JIT: # sysctl net.core.bpf_jit_enable=1 net.core.bpf_jit_enable = 1 # sysctl net.core.bpf_jit_kallsyms=1 net.core.bpf_jit_kallsyms = 1 # bpftool prog dump xlated id 1 0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit And finally, an example for tail calls that is now working as well wrt correlation: # bpftool prog dump xlated id 2 [...] 10: (b7) r2 = 8 11: (85) call bpf_trace_printk#-41312 12: (bf) r1 = r6 13: (18) r2 = map[id:1] 15: (b7) r3 = 0 16: (85) call bpf_tail_call#12 17: (b7) r1 = 42 18: (6b) *(u16 *)(r6 +46) = r1 19: (b7) r0 = 0 20: (95) exit # bpftool map show id 1 1: prog_array flags 0x0 key 4B value 4B max_entries 1 memlock 4096B Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/disasm.c')
-rw-r--r--kernel/bpf/disasm.c65
1 files changed, 54 insertions, 11 deletions
diff --git a/kernel/bpf/disasm.c b/kernel/bpf/disasm.c
index 883f88fa5bfc..8740406df2cd 100644
--- a/kernel/bpf/disasm.c
+++ b/kernel/bpf/disasm.c
@@ -21,10 +21,39 @@ static const char * const func_id_str[] = {
21}; 21};
22#undef __BPF_FUNC_STR_FN 22#undef __BPF_FUNC_STR_FN
23 23
24const char *func_id_name(int id) 24static const char *__func_get_name(const struct bpf_insn_cbs *cbs,
25 const struct bpf_insn *insn,
26 char *buff, size_t len)
25{ 27{
26 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID); 28 BUILD_BUG_ON(ARRAY_SIZE(func_id_str) != __BPF_FUNC_MAX_ID);
27 29
30 if (insn->src_reg != BPF_PSEUDO_CALL &&
31 insn->imm >= 0 && insn->imm < __BPF_FUNC_MAX_ID &&
32 func_id_str[insn->imm])
33 return func_id_str[insn->imm];
34
35 if (cbs && cbs->cb_call)
36 return cbs->cb_call(cbs->private_data, insn);
37
38 if (insn->src_reg == BPF_PSEUDO_CALL)
39 snprintf(buff, len, "%+d", insn->imm);
40
41 return buff;
42}
43
44static const char *__func_imm_name(const struct bpf_insn_cbs *cbs,
45 const struct bpf_insn *insn,
46 u64 full_imm, char *buff, size_t len)
47{
48 if (cbs && cbs->cb_imm)
49 return cbs->cb_imm(cbs->private_data, insn, full_imm);
50
51 snprintf(buff, len, "0x%llx", (unsigned long long)full_imm);
52 return buff;
53}
54
55const char *func_id_name(int id)
56{
28 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id]) 57 if (id >= 0 && id < __BPF_FUNC_MAX_ID && func_id_str[id])
29 return func_id_str[id]; 58 return func_id_str[id];
30 else 59 else
@@ -83,7 +112,7 @@ static const char *const bpf_jmp_string[16] = {
83 [BPF_EXIT >> 4] = "exit", 112 [BPF_EXIT >> 4] = "exit",
84}; 113};
85 114
86static void print_bpf_end_insn(bpf_insn_print_cb verbose, 115static void print_bpf_end_insn(bpf_insn_print_t verbose,
87 struct bpf_verifier_env *env, 116 struct bpf_verifier_env *env,
88 const struct bpf_insn *insn) 117 const struct bpf_insn *insn)
89{ 118{
@@ -92,9 +121,12 @@ static void print_bpf_end_insn(bpf_insn_print_cb verbose,
92 insn->imm, insn->dst_reg); 121 insn->imm, insn->dst_reg);
93} 122}
94 123
95void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env, 124void print_bpf_insn(const struct bpf_insn_cbs *cbs,
96 const struct bpf_insn *insn, bool allow_ptr_leaks) 125 struct bpf_verifier_env *env,
126 const struct bpf_insn *insn,
127 bool allow_ptr_leaks)
97{ 128{
129 const bpf_insn_print_t verbose = cbs->cb_print;
98 u8 class = BPF_CLASS(insn->code); 130 u8 class = BPF_CLASS(insn->code);
99 131
100 if (class == BPF_ALU || class == BPF_ALU64) { 132 if (class == BPF_ALU || class == BPF_ALU64) {
@@ -175,12 +207,15 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
175 */ 207 */
176 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm; 208 u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
177 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD; 209 bool map_ptr = insn->src_reg == BPF_PSEUDO_MAP_FD;
210 char tmp[64];
178 211
179 if (map_ptr && !allow_ptr_leaks) 212 if (map_ptr && !allow_ptr_leaks)
180 imm = 0; 213 imm = 0;
181 214
182 verbose(env, "(%02x) r%d = 0x%llx\n", insn->code, 215 verbose(env, "(%02x) r%d = %s\n",
183 insn->dst_reg, (unsigned long long)imm); 216 insn->code, insn->dst_reg,
217 __func_imm_name(cbs, insn, imm,
218 tmp, sizeof(tmp)));
184 } else { 219 } else {
185 verbose(env, "BUG_ld_%02x\n", insn->code); 220 verbose(env, "BUG_ld_%02x\n", insn->code);
186 return; 221 return;
@@ -189,12 +224,20 @@ void print_bpf_insn(bpf_insn_print_cb verbose, struct bpf_verifier_env *env,
189 u8 opcode = BPF_OP(insn->code); 224 u8 opcode = BPF_OP(insn->code);
190 225
191 if (opcode == BPF_CALL) { 226 if (opcode == BPF_CALL) {
192 if (insn->src_reg == BPF_PSEUDO_CALL) 227 char tmp[64];
193 verbose(env, "(%02x) call pc%+d\n", insn->code, 228
194 insn->imm); 229 if (insn->src_reg == BPF_PSEUDO_CALL) {
195 else 230 verbose(env, "(%02x) call pc%s\n",
231 insn->code,
232 __func_get_name(cbs, insn,
233 tmp, sizeof(tmp)));
234 } else {
235 strcpy(tmp, "unknown");
196 verbose(env, "(%02x) call %s#%d\n", insn->code, 236 verbose(env, "(%02x) call %s#%d\n", insn->code,
197 func_id_name(insn->imm), insn->imm); 237 __func_get_name(cbs, insn,
238 tmp, sizeof(tmp)),
239 insn->imm);
240 }
198 } else if (insn->code == (BPF_JMP | BPF_JA)) { 241 } else if (insn->code == (BPF_JMP | BPF_JA)) {
199 verbose(env, "(%02x) goto pc%+d\n", 242 verbose(env, "(%02x) goto pc%+d\n",
200 insn->code, insn->off); 243 insn->code, insn->off);