aboutsummaryrefslogtreecommitdiffstats
path: root/tools/bpf/bpftool/prog.c
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-12-20 07:42:57 -0500
committerAlexei Starovoitov <ast@kernel.org>2017-12-20 21:09:40 -0500
commit7105e828c087de970fcb5a9509db51bfe6bd7894 (patch)
tree47cca432779b910dab12ee5cc81b792f6e432a76 /tools/bpf/bpftool/prog.c
parent4f74d80971bce93d9e608c40324d662c70eb4664 (diff)
bpf: allow for correlation of maps and helpers in dump
Currently a dump of an xlated prog (post verifier stage) doesn't correlate used helpers as well as maps. The prog info lists involved map ids, however there's no correlation of where in the program they are used as of today. Likewise, bpftool does not correlate helper calls with the target functions. The latter can be done w/o any kernel changes through kallsyms, and also has the advantage that this works with inlined helpers and BPF calls. Example, via interpreter: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 1 tag c74773051b364165 <-- prog id:1 * Output before patch (calls/maps remain unclear): # bpftool prog dump xlated id 1 <-- dump prog id:1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = 0xffff95c47a8d4800 6: (85) call unknown#73040 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call unknown#73040 12: (15) if r0 == 0x0 goto pc+23 [...] * Output after patch: # bpftool prog dump xlated id 1 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call bpf_map_lookup_elem#73424 <-- helper call 7: (15) if r0 == 0x0 goto pc+18 8: (bf) r2 = r10 9: (07) r2 += -4 10: (bf) r1 = r0 11: (85) call bpf_map_lookup_elem#73424 12: (15) if r0 == 0x0 goto pc+23 [...] # bpftool map show id 2 <-- show/dump/etc map id:2 2: hash_of_maps flags 0x0 key 4B value 4B max_entries 3 memlock 4096B Example, JITed, same prog: # tc filter show dev foo ingress filter protocol all pref 49152 bpf chain 0 filter protocol all pref 49152 bpf chain 0 handle 0x1 foo.o:[ingress] \ direct-action not_in_hw id 3 tag c74773051b364165 jited # bpftool prog show id 3 3: sched_cls tag c74773051b364165 loaded_at Dec 19/13:48 uid 0 xlated 384B jited 257B memlock 4096B map_ids 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] <-- map id:2 6: (85) call __htab_map_lookup_elem#77408 <-+ inlined rewrite 7: (15) if r0 == 0x0 goto pc+2 | 8: (07) r0 += 56 | 9: (79) r0 = *(u64 *)(r0 +0) <-+ 10: (15) if r0 == 0x0 goto pc+24 11: (bf) r2 = r10 12: (07) r2 += -4 [...] Example, same prog, but kallsyms disabled (in that case we are also not allowed to pass any relative offsets, etc, so prog becomes pointer sanitized on dump): # sysctl kernel.kptr_restrict=2 kernel.kptr_restrict = 2 # bpftool prog dump xlated id 3 0: (b7) r1 = 2 1: (63) *(u32 *)(r10 -4) = r1 2: (bf) r2 = r10 3: (07) r2 += -4 4: (18) r1 = map[id:2] 6: (85) call bpf_unspec#0 7: (15) if r0 == 0x0 goto pc+2 [...] Example, BPF calls via interpreter: # bpftool prog dump xlated id 1 0: (85) call pc+2#__bpf_prog_run_args32 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit Example, BPF calls via JIT: # sysctl net.core.bpf_jit_enable=1 net.core.bpf_jit_enable = 1 # sysctl net.core.bpf_jit_kallsyms=1 net.core.bpf_jit_kallsyms = 1 # bpftool prog dump xlated id 1 0: (85) call pc+2#bpf_prog_3b185187f1855c4c_F 1: (b7) r0 = 1 2: (95) exit 3: (b7) r0 = 2 4: (95) exit And finally, an example for tail calls that is now working as well wrt correlation: # bpftool prog dump xlated id 2 [...] 10: (b7) r2 = 8 11: (85) call bpf_trace_printk#-41312 12: (bf) r1 = r6 13: (18) r2 = map[id:1] 15: (b7) r3 = 0 16: (85) call bpf_tail_call#12 17: (b7) r1 = 42 18: (6b) *(u16 *)(r6 +46) = r1 19: (b7) r0 = 0 20: (95) exit # bpftool map show id 1 1: prog_array flags 0x0 key 4B value 4B max_entries 1 memlock 4096B Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'tools/bpf/bpftool/prog.c')
-rw-r--r--tools/bpf/bpftool/prog.c181
1 files changed, 172 insertions, 9 deletions
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 037484ceaeaf..42ee8892549c 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -401,6 +401,88 @@ static int do_show(int argc, char **argv)
401 return err; 401 return err;
402} 402}
403 403
404#define SYM_MAX_NAME 256
405
406struct kernel_sym {
407 unsigned long address;
408 char name[SYM_MAX_NAME];
409};
410
411struct dump_data {
412 unsigned long address_call_base;
413 struct kernel_sym *sym_mapping;
414 __u32 sym_count;
415 char scratch_buff[SYM_MAX_NAME];
416};
417
418static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
419{
420 return ((struct kernel_sym *)sym_a)->address -
421 ((struct kernel_sym *)sym_b)->address;
422}
423
424static void kernel_syms_load(struct dump_data *dd)
425{
426 struct kernel_sym *sym;
427 char buff[256];
428 void *tmp, *address;
429 FILE *fp;
430
431 fp = fopen("/proc/kallsyms", "r");
432 if (!fp)
433 return;
434
435 while (!feof(fp)) {
436 if (!fgets(buff, sizeof(buff), fp))
437 break;
438 tmp = realloc(dd->sym_mapping,
439 (dd->sym_count + 1) *
440 sizeof(*dd->sym_mapping));
441 if (!tmp) {
442out:
443 free(dd->sym_mapping);
444 dd->sym_mapping = NULL;
445 fclose(fp);
446 return;
447 }
448 dd->sym_mapping = tmp;
449 sym = &dd->sym_mapping[dd->sym_count];
450 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
451 continue;
452 sym->address = (unsigned long)address;
453 if (!strcmp(sym->name, "__bpf_call_base")) {
454 dd->address_call_base = sym->address;
455 /* sysctl kernel.kptr_restrict was set */
456 if (!sym->address)
457 goto out;
458 }
459 if (sym->address)
460 dd->sym_count++;
461 }
462
463 fclose(fp);
464
465 qsort(dd->sym_mapping, dd->sym_count,
466 sizeof(*dd->sym_mapping), kernel_syms_cmp);
467}
468
469static void kernel_syms_destroy(struct dump_data *dd)
470{
471 free(dd->sym_mapping);
472}
473
474static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
475 unsigned long key)
476{
477 struct kernel_sym sym = {
478 .address = key,
479 };
480
481 return dd->sym_mapping ?
482 bsearch(&sym, dd->sym_mapping, dd->sym_count,
483 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
484}
485
404static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) 486static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
405{ 487{
406 va_list args; 488 va_list args;
@@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
410 va_end(args); 492 va_end(args);
411} 493}
412 494
413static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) 495static const char *print_call_pcrel(struct dump_data *dd,
496 struct kernel_sym *sym,
497 unsigned long address,
498 const struct bpf_insn *insn)
414{ 499{
500 if (sym)
501 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
502 "%+d#%s", insn->off, sym->name);
503 else
504 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
505 "%+d#0x%lx", insn->off, address);
506 return dd->scratch_buff;
507}
508
509static const char *print_call_helper(struct dump_data *dd,
510 struct kernel_sym *sym,
511 unsigned long address)
512{
513 if (sym)
514 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
515 "%s", sym->name);
516 else
517 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
518 "0x%lx", address);
519 return dd->scratch_buff;
520}
521
522static const char *print_call(void *private_data,
523 const struct bpf_insn *insn)
524{
525 struct dump_data *dd = private_data;
526 unsigned long address = dd->address_call_base + insn->imm;
527 struct kernel_sym *sym;
528
529 sym = kernel_syms_search(dd, address);
530 if (insn->src_reg == BPF_PSEUDO_CALL)
531 return print_call_pcrel(dd, sym, address, insn);
532 else
533 return print_call_helper(dd, sym, address);
534}
535
536static const char *print_imm(void *private_data,
537 const struct bpf_insn *insn,
538 __u64 full_imm)
539{
540 struct dump_data *dd = private_data;
541
542 if (insn->src_reg == BPF_PSEUDO_MAP_FD)
543 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
544 "map[id:%u]", insn->imm);
545 else
546 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
547 "0x%llx", (unsigned long long)full_imm);
548 return dd->scratch_buff;
549}
550
551static void dump_xlated_plain(struct dump_data *dd, void *buf,
552 unsigned int len, bool opcodes)
553{
554 const struct bpf_insn_cbs cbs = {
555 .cb_print = print_insn,
556 .cb_call = print_call,
557 .cb_imm = print_imm,
558 .private_data = dd,
559 };
415 struct bpf_insn *insn = buf; 560 struct bpf_insn *insn = buf;
416 bool double_insn = false; 561 bool double_insn = false;
417 unsigned int i; 562 unsigned int i;
@@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
425 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 570 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
426 571
427 printf("% 4d: ", i); 572 printf("% 4d: ", i);
428 print_bpf_insn(print_insn, NULL, insn + i, true); 573 print_bpf_insn(&cbs, NULL, insn + i, true);
429 574
430 if (opcodes) { 575 if (opcodes) {
431 printf(" "); 576 printf(" ");
@@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
454 va_end(args); 599 va_end(args);
455} 600}
456 601
457static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) 602static void dump_xlated_json(struct dump_data *dd, void *buf,
603 unsigned int len, bool opcodes)
458{ 604{
605 const struct bpf_insn_cbs cbs = {
606 .cb_print = print_insn_json,
607 .cb_call = print_call,
608 .cb_imm = print_imm,
609 .private_data = dd,
610 };
459 struct bpf_insn *insn = buf; 611 struct bpf_insn *insn = buf;
460 bool double_insn = false; 612 bool double_insn = false;
461 unsigned int i; 613 unsigned int i;
@@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
470 622
471 jsonw_start_object(json_wtr); 623 jsonw_start_object(json_wtr);
472 jsonw_name(json_wtr, "disasm"); 624 jsonw_name(json_wtr, "disasm");
473 print_bpf_insn(print_insn_json, NULL, insn + i, true); 625 print_bpf_insn(&cbs, NULL, insn + i, true);
474 626
475 if (opcodes) { 627 if (opcodes) {
476 jsonw_name(json_wtr, "opcodes"); 628 jsonw_name(json_wtr, "opcodes");
@@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
505static int do_dump(int argc, char **argv) 657static int do_dump(int argc, char **argv)
506{ 658{
507 struct bpf_prog_info info = {}; 659 struct bpf_prog_info info = {};
660 struct dump_data dd = {};
508 __u32 len = sizeof(info); 661 __u32 len = sizeof(info);
509 unsigned int buf_size; 662 unsigned int buf_size;
510 char *filepath = NULL; 663 char *filepath = NULL;
@@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv)
592 goto err_free; 745 goto err_free;
593 } 746 }
594 747
748 if ((member_len == &info.jited_prog_len &&
749 info.jited_prog_insns == 0) ||
750 (member_len == &info.xlated_prog_len &&
751 info.xlated_prog_insns == 0)) {
752 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
753 goto err_free;
754 }
755
595 if (filepath) { 756 if (filepath) {
596 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); 757 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
597 if (fd < 0) { 758 if (fd < 0) {
@@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv)
608 goto err_free; 769 goto err_free;
609 } 770 }
610 } else { 771 } else {
611 if (member_len == &info.jited_prog_len) 772 if (member_len == &info.jited_prog_len) {
612 disasm_print_insn(buf, *member_len, opcodes); 773 disasm_print_insn(buf, *member_len, opcodes);
613 else 774 } else {
775 kernel_syms_load(&dd);
614 if (json_output) 776 if (json_output)
615 dump_xlated_json(buf, *member_len, opcodes); 777 dump_xlated_json(&dd, buf, *member_len, opcodes);
616 else 778 else
617 dump_xlated_plain(buf, *member_len, opcodes); 779 dump_xlated_plain(&dd, buf, *member_len, opcodes);
780 kernel_syms_destroy(&dd);
781 }
618 } 782 }
619 783
620 free(buf); 784 free(buf);
621
622 return 0; 785 return 0;
623 786
624err_free: 787err_free: