aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
commitfcffe2edbd390cad499b27d20512ef000d7ecf54 (patch)
tree13120e1efcf0ad226785b721f4b38272ffdd2028 /tools
parent4f83435ad777358d9cdc138868feebbe2a23f577 (diff)
parent624588d9d6cc0a1a270a65fb4d5220f1ceddcf38 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2017-12-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Fix incorrect state pruning related to recognition of zero initialized stack slots, where stacksafe exploration would mistakenly return a positive pruning verdict too early ignoring other slots, from Gianluca. 2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one in maximum call depth check, and rework maximum stack depth tracking logic to fix a bypass of the total stack size check reported by Jann. Also fix a bug in arm64 JIT where prog->jited_len was uninitialized. Addition of various test cases to BPF selftests, from Alexei. 3) Addition of a BPF selftest to test_verifier that is related to BPF to BPF calls which demonstrates a late caller stack size increase and thus out of bounds access. Fixed above in 2). Test case from Jann. 4) Addition of correlating BPF helper calls, BPF to BPF calls as well as BPF maps to bpftool xlated dump in order to allow for better BPF program introspection and debugging, from Daniel. 5) Fixing several bugs in BPF to BPF calls kallsyms handling in order to get it actually to work for subprogs, from Daniel. 6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple of build errors for libbpf on sparc64, from David. 7) Allow narrower context access for BPF dev cgroup typed programs in order to adapt to LLVM code generation. Also adjust memlock rlimit in the test_dev_cgroup BPF selftest, from Yonghong. 8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py relies on netdevsim device being available, from Jakub. 9) Reduce scope of xdp_do_generic_redirect_map() to being static, from Xiongwei. 10) Minor cleanups and spelling fixes in BPF verifier, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/bpf/bpftool/prog.c181
-rw-r--r--tools/lib/bpf/libbpf.c5
-rw-r--r--tools/testing/selftests/bpf/config1
-rw-r--r--tools/testing/selftests/bpf/test_dev_cgroup.c9
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c241
5 files changed, 425 insertions, 12 deletions
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 037484ceaeaf..42ee8892549c 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -401,6 +401,88 @@ static int do_show(int argc, char **argv)
401 return err; 401 return err;
402} 402}
403 403
404#define SYM_MAX_NAME 256
405
406struct kernel_sym {
407 unsigned long address;
408 char name[SYM_MAX_NAME];
409};
410
411struct dump_data {
412 unsigned long address_call_base;
413 struct kernel_sym *sym_mapping;
414 __u32 sym_count;
415 char scratch_buff[SYM_MAX_NAME];
416};
417
418static int kernel_syms_cmp(const void *sym_a, const void *sym_b)
419{
420 return ((struct kernel_sym *)sym_a)->address -
421 ((struct kernel_sym *)sym_b)->address;
422}
423
424static void kernel_syms_load(struct dump_data *dd)
425{
426 struct kernel_sym *sym;
427 char buff[256];
428 void *tmp, *address;
429 FILE *fp;
430
431 fp = fopen("/proc/kallsyms", "r");
432 if (!fp)
433 return;
434
435 while (!feof(fp)) {
436 if (!fgets(buff, sizeof(buff), fp))
437 break;
438 tmp = realloc(dd->sym_mapping,
439 (dd->sym_count + 1) *
440 sizeof(*dd->sym_mapping));
441 if (!tmp) {
442out:
443 free(dd->sym_mapping);
444 dd->sym_mapping = NULL;
445 fclose(fp);
446 return;
447 }
448 dd->sym_mapping = tmp;
449 sym = &dd->sym_mapping[dd->sym_count];
450 if (sscanf(buff, "%p %*c %s", &address, sym->name) != 2)
451 continue;
452 sym->address = (unsigned long)address;
453 if (!strcmp(sym->name, "__bpf_call_base")) {
454 dd->address_call_base = sym->address;
455 /* sysctl kernel.kptr_restrict was set */
456 if (!sym->address)
457 goto out;
458 }
459 if (sym->address)
460 dd->sym_count++;
461 }
462
463 fclose(fp);
464
465 qsort(dd->sym_mapping, dd->sym_count,
466 sizeof(*dd->sym_mapping), kernel_syms_cmp);
467}
468
469static void kernel_syms_destroy(struct dump_data *dd)
470{
471 free(dd->sym_mapping);
472}
473
474static struct kernel_sym *kernel_syms_search(struct dump_data *dd,
475 unsigned long key)
476{
477 struct kernel_sym sym = {
478 .address = key,
479 };
480
481 return dd->sym_mapping ?
482 bsearch(&sym, dd->sym_mapping, dd->sym_count,
483 sizeof(*dd->sym_mapping), kernel_syms_cmp) : NULL;
484}
485
404static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...) 486static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
405{ 487{
406 va_list args; 488 va_list args;
@@ -410,8 +492,71 @@ static void print_insn(struct bpf_verifier_env *env, const char *fmt, ...)
410 va_end(args); 492 va_end(args);
411} 493}
412 494
413static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes) 495static const char *print_call_pcrel(struct dump_data *dd,
496 struct kernel_sym *sym,
497 unsigned long address,
498 const struct bpf_insn *insn)
414{ 499{
500 if (sym)
501 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
502 "%+d#%s", insn->off, sym->name);
503 else
504 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
505 "%+d#0x%lx", insn->off, address);
506 return dd->scratch_buff;
507}
508
509static const char *print_call_helper(struct dump_data *dd,
510 struct kernel_sym *sym,
511 unsigned long address)
512{
513 if (sym)
514 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
515 "%s", sym->name);
516 else
517 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
518 "0x%lx", address);
519 return dd->scratch_buff;
520}
521
522static const char *print_call(void *private_data,
523 const struct bpf_insn *insn)
524{
525 struct dump_data *dd = private_data;
526 unsigned long address = dd->address_call_base + insn->imm;
527 struct kernel_sym *sym;
528
529 sym = kernel_syms_search(dd, address);
530 if (insn->src_reg == BPF_PSEUDO_CALL)
531 return print_call_pcrel(dd, sym, address, insn);
532 else
533 return print_call_helper(dd, sym, address);
534}
535
536static const char *print_imm(void *private_data,
537 const struct bpf_insn *insn,
538 __u64 full_imm)
539{
540 struct dump_data *dd = private_data;
541
542 if (insn->src_reg == BPF_PSEUDO_MAP_FD)
543 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
544 "map[id:%u]", insn->imm);
545 else
546 snprintf(dd->scratch_buff, sizeof(dd->scratch_buff),
547 "0x%llx", (unsigned long long)full_imm);
548 return dd->scratch_buff;
549}
550
551static void dump_xlated_plain(struct dump_data *dd, void *buf,
552 unsigned int len, bool opcodes)
553{
554 const struct bpf_insn_cbs cbs = {
555 .cb_print = print_insn,
556 .cb_call = print_call,
557 .cb_imm = print_imm,
558 .private_data = dd,
559 };
415 struct bpf_insn *insn = buf; 560 struct bpf_insn *insn = buf;
416 bool double_insn = false; 561 bool double_insn = false;
417 unsigned int i; 562 unsigned int i;
@@ -425,7 +570,7 @@ static void dump_xlated_plain(void *buf, unsigned int len, bool opcodes)
425 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW); 570 double_insn = insn[i].code == (BPF_LD | BPF_IMM | BPF_DW);
426 571
427 printf("% 4d: ", i); 572 printf("% 4d: ", i);
428 print_bpf_insn(print_insn, NULL, insn + i, true); 573 print_bpf_insn(&cbs, NULL, insn + i, true);
429 574
430 if (opcodes) { 575 if (opcodes) {
431 printf(" "); 576 printf(" ");
@@ -454,8 +599,15 @@ static void print_insn_json(struct bpf_verifier_env *env, const char *fmt, ...)
454 va_end(args); 599 va_end(args);
455} 600}
456 601
457static void dump_xlated_json(void *buf, unsigned int len, bool opcodes) 602static void dump_xlated_json(struct dump_data *dd, void *buf,
603 unsigned int len, bool opcodes)
458{ 604{
605 const struct bpf_insn_cbs cbs = {
606 .cb_print = print_insn_json,
607 .cb_call = print_call,
608 .cb_imm = print_imm,
609 .private_data = dd,
610 };
459 struct bpf_insn *insn = buf; 611 struct bpf_insn *insn = buf;
460 bool double_insn = false; 612 bool double_insn = false;
461 unsigned int i; 613 unsigned int i;
@@ -470,7 +622,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
470 622
471 jsonw_start_object(json_wtr); 623 jsonw_start_object(json_wtr);
472 jsonw_name(json_wtr, "disasm"); 624 jsonw_name(json_wtr, "disasm");
473 print_bpf_insn(print_insn_json, NULL, insn + i, true); 625 print_bpf_insn(&cbs, NULL, insn + i, true);
474 626
475 if (opcodes) { 627 if (opcodes) {
476 jsonw_name(json_wtr, "opcodes"); 628 jsonw_name(json_wtr, "opcodes");
@@ -505,6 +657,7 @@ static void dump_xlated_json(void *buf, unsigned int len, bool opcodes)
505static int do_dump(int argc, char **argv) 657static int do_dump(int argc, char **argv)
506{ 658{
507 struct bpf_prog_info info = {}; 659 struct bpf_prog_info info = {};
660 struct dump_data dd = {};
508 __u32 len = sizeof(info); 661 __u32 len = sizeof(info);
509 unsigned int buf_size; 662 unsigned int buf_size;
510 char *filepath = NULL; 663 char *filepath = NULL;
@@ -592,6 +745,14 @@ static int do_dump(int argc, char **argv)
592 goto err_free; 745 goto err_free;
593 } 746 }
594 747
748 if ((member_len == &info.jited_prog_len &&
749 info.jited_prog_insns == 0) ||
750 (member_len == &info.xlated_prog_len &&
751 info.xlated_prog_insns == 0)) {
752 p_err("error retrieving insn dump: kernel.kptr_restrict set?");
753 goto err_free;
754 }
755
595 if (filepath) { 756 if (filepath) {
596 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600); 757 fd = open(filepath, O_WRONLY | O_CREAT | O_TRUNC, 0600);
597 if (fd < 0) { 758 if (fd < 0) {
@@ -608,17 +769,19 @@ static int do_dump(int argc, char **argv)
608 goto err_free; 769 goto err_free;
609 } 770 }
610 } else { 771 } else {
611 if (member_len == &info.jited_prog_len) 772 if (member_len == &info.jited_prog_len) {
612 disasm_print_insn(buf, *member_len, opcodes); 773 disasm_print_insn(buf, *member_len, opcodes);
613 else 774 } else {
775 kernel_syms_load(&dd);
614 if (json_output) 776 if (json_output)
615 dump_xlated_json(buf, *member_len, opcodes); 777 dump_xlated_json(&dd, buf, *member_len, opcodes);
616 else 778 else
617 dump_xlated_plain(buf, *member_len, opcodes); 779 dump_xlated_plain(&dd, buf, *member_len, opcodes);
780 kernel_syms_destroy(&dd);
781 }
618 } 782 }
619 783
620 free(buf); 784 free(buf);
621
622 return 0; 785 return 0;
623 786
624err_free: 787err_free:
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 5b83875b3594..e9c4b7cabcf2 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -910,8 +910,9 @@ bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
910 GELF_R_SYM(rel.r_info)); 910 GELF_R_SYM(rel.r_info));
911 return -LIBBPF_ERRNO__FORMAT; 911 return -LIBBPF_ERRNO__FORMAT;
912 } 912 }
913 pr_debug("relo for %ld value %ld name %d\n", 913 pr_debug("relo for %lld value %lld name %d\n",
914 rel.r_info >> 32, sym.st_value, sym.st_name); 914 (long long) (rel.r_info >> 32),
915 (long long) sym.st_value, sym.st_name);
915 916
916 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { 917 if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
917 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", 918 pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 9d4897317c77..983dd25d49f4 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -4,3 +4,4 @@ CONFIG_NET_CLS_BPF=m
4CONFIG_BPF_EVENTS=y 4CONFIG_BPF_EVENTS=y
5CONFIG_TEST_BPF=m 5CONFIG_TEST_BPF=m
6CONFIG_CGROUP_BPF=y 6CONFIG_CGROUP_BPF=y
7CONFIG_NETDEVSIM=m
diff --git a/tools/testing/selftests/bpf/test_dev_cgroup.c b/tools/testing/selftests/bpf/test_dev_cgroup.c
index 02c85d6c89b0..c1535b34f14f 100644
--- a/tools/testing/selftests/bpf/test_dev_cgroup.c
+++ b/tools/testing/selftests/bpf/test_dev_cgroup.c
@@ -10,6 +10,8 @@
10#include <string.h> 10#include <string.h>
11#include <errno.h> 11#include <errno.h>
12#include <assert.h> 12#include <assert.h>
13#include <sys/time.h>
14#include <sys/resource.h>
13 15
14#include <linux/bpf.h> 16#include <linux/bpf.h>
15#include <bpf/bpf.h> 17#include <bpf/bpf.h>
@@ -23,15 +25,19 @@
23 25
24int main(int argc, char **argv) 26int main(int argc, char **argv)
25{ 27{
28 struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
26 struct bpf_object *obj; 29 struct bpf_object *obj;
27 int error = EXIT_FAILURE; 30 int error = EXIT_FAILURE;
28 int prog_fd, cgroup_fd; 31 int prog_fd, cgroup_fd;
29 __u32 prog_cnt; 32 __u32 prog_cnt;
30 33
34 if (setrlimit(RLIMIT_MEMLOCK, &limit) < 0)
35 perror("Unable to lift memlock rlimit");
36
31 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE, 37 if (bpf_prog_load(DEV_CGROUP_PROG, BPF_PROG_TYPE_CGROUP_DEVICE,
32 &obj, &prog_fd)) { 38 &obj, &prog_fd)) {
33 printf("Failed to load DEV_CGROUP program\n"); 39 printf("Failed to load DEV_CGROUP program\n");
34 goto err; 40 goto out;
35 } 41 }
36 42
37 if (setup_cgroup_environment()) { 43 if (setup_cgroup_environment()) {
@@ -89,5 +95,6 @@ int main(int argc, char **argv)
89err: 95err:
90 cleanup_cgroup_environment(); 96 cleanup_cgroup_environment();
91 97
98out:
92 return error; 99 return error;
93} 100}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index d38334abb990..543847957fdd 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -9273,6 +9273,196 @@ static struct bpf_test tests[] = {
9273 .result = ACCEPT, 9273 .result = ACCEPT,
9274 }, 9274 },
9275 { 9275 {
9276 "calls: stack overflow using two frames (pre-call access)",
9277 .insns = {
9278 /* prog 1 */
9279 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9280 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1),
9281 BPF_EXIT_INSN(),
9282
9283 /* prog 2 */
9284 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9285 BPF_MOV64_IMM(BPF_REG_0, 0),
9286 BPF_EXIT_INSN(),
9287 },
9288 .prog_type = BPF_PROG_TYPE_XDP,
9289 .errstr = "combined stack size",
9290 .result = REJECT,
9291 },
9292 {
9293 "calls: stack overflow using two frames (post-call access)",
9294 .insns = {
9295 /* prog 1 */
9296 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 2),
9297 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9298 BPF_EXIT_INSN(),
9299
9300 /* prog 2 */
9301 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9302 BPF_MOV64_IMM(BPF_REG_0, 0),
9303 BPF_EXIT_INSN(),
9304 },
9305 .prog_type = BPF_PROG_TYPE_XDP,
9306 .errstr = "combined stack size",
9307 .result = REJECT,
9308 },
9309 {
9310 "calls: stack depth check using three frames. test1",
9311 .insns = {
9312 /* main */
9313 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9314 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9315 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9316 BPF_MOV64_IMM(BPF_REG_0, 0),
9317 BPF_EXIT_INSN(),
9318 /* A */
9319 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9320 BPF_EXIT_INSN(),
9321 /* B */
9322 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9323 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9324 BPF_EXIT_INSN(),
9325 },
9326 .prog_type = BPF_PROG_TYPE_XDP,
9327 /* stack_main=32, stack_A=256, stack_B=64
9328 * and max(main+A, main+A+B) < 512
9329 */
9330 .result = ACCEPT,
9331 },
9332 {
9333 "calls: stack depth check using three frames. test2",
9334 .insns = {
9335 /* main */
9336 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9337 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 5), /* call B */
9338 BPF_ST_MEM(BPF_B, BPF_REG_10, -32, 0),
9339 BPF_MOV64_IMM(BPF_REG_0, 0),
9340 BPF_EXIT_INSN(),
9341 /* A */
9342 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9343 BPF_EXIT_INSN(),
9344 /* B */
9345 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -3), /* call A */
9346 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9347 BPF_EXIT_INSN(),
9348 },
9349 .prog_type = BPF_PROG_TYPE_XDP,
9350 /* stack_main=32, stack_A=64, stack_B=256
9351 * and max(main+A, main+A+B) < 512
9352 */
9353 .result = ACCEPT,
9354 },
9355 {
9356 "calls: stack depth check using three frames. test3",
9357 .insns = {
9358 /* main */
9359 BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
9360 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9361 BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
9362 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 8), /* call B */
9363 BPF_JMP_IMM(BPF_JGE, BPF_REG_6, 0, 1),
9364 BPF_ST_MEM(BPF_B, BPF_REG_10, -64, 0),
9365 BPF_MOV64_IMM(BPF_REG_0, 0),
9366 BPF_EXIT_INSN(),
9367 /* A */
9368 BPF_JMP_IMM(BPF_JLT, BPF_REG_1, 10, 1),
9369 BPF_EXIT_INSN(),
9370 BPF_ST_MEM(BPF_B, BPF_REG_10, -224, 0),
9371 BPF_JMP_IMM(BPF_JA, 0, 0, -3),
9372 /* B */
9373 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 1),
9374 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, -6), /* call A */
9375 BPF_ST_MEM(BPF_B, BPF_REG_10, -256, 0),
9376 BPF_EXIT_INSN(),
9377 },
9378 .prog_type = BPF_PROG_TYPE_XDP,
9379 /* stack_main=64, stack_A=224, stack_B=256
9380 * and max(main+A, main+A+B) > 512
9381 */
9382 .errstr = "combined stack",
9383 .result = REJECT,
9384 },
9385 {
9386 "calls: stack depth check using three frames. test4",
9387 /* void main(void) {
9388 * func1(0);
9389 * func1(1);
9390 * func2(1);
9391 * }
9392 * void func1(int alloc_or_recurse) {
9393 * if (alloc_or_recurse) {
9394 * frame_pointer[-300] = 1;
9395 * } else {
9396 * func2(alloc_or_recurse);
9397 * }
9398 * }
9399 * void func2(int alloc_or_recurse) {
9400 * if (alloc_or_recurse) {
9401 * frame_pointer[-300] = 1;
9402 * }
9403 * }
9404 */
9405 .insns = {
9406 /* main */
9407 BPF_MOV64_IMM(BPF_REG_1, 0),
9408 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 6), /* call A */
9409 BPF_MOV64_IMM(BPF_REG_1, 1),
9410 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 4), /* call A */
9411 BPF_MOV64_IMM(BPF_REG_1, 1),
9412 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 7), /* call B */
9413 BPF_MOV64_IMM(BPF_REG_0, 0),
9414 BPF_EXIT_INSN(),
9415 /* A */
9416 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 2),
9417 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9418 BPF_EXIT_INSN(),
9419 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9420 BPF_EXIT_INSN(),
9421 /* B */
9422 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1),
9423 BPF_ST_MEM(BPF_B, BPF_REG_10, -300, 0),
9424 BPF_EXIT_INSN(),
9425 },
9426 .prog_type = BPF_PROG_TYPE_XDP,
9427 .result = REJECT,
9428 .errstr = "combined stack",
9429 },
9430 {
9431 "calls: stack depth check using three frames. test5",
9432 .insns = {
9433 /* main */
9434 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call A */
9435 BPF_EXIT_INSN(),
9436 /* A */
9437 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call B */
9438 BPF_EXIT_INSN(),
9439 /* B */
9440 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call C */
9441 BPF_EXIT_INSN(),
9442 /* C */
9443 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call D */
9444 BPF_EXIT_INSN(),
9445 /* D */
9446 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call E */
9447 BPF_EXIT_INSN(),
9448 /* E */
9449 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call F */
9450 BPF_EXIT_INSN(),
9451 /* F */
9452 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call G */
9453 BPF_EXIT_INSN(),
9454 /* G */
9455 BPF_RAW_INSN(BPF_JMP|BPF_CALL, 0, 1, 0, 1), /* call H */
9456 BPF_EXIT_INSN(),
9457 /* H */
9458 BPF_MOV64_IMM(BPF_REG_0, 0),
9459 BPF_EXIT_INSN(),
9460 },
9461 .prog_type = BPF_PROG_TYPE_XDP,
9462 .errstr = "call stack",
9463 .result = REJECT,
9464 },
9465 {
9276 "calls: spill into caller stack frame", 9466 "calls: spill into caller stack frame",
9277 .insns = { 9467 .insns = {
9278 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), 9468 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
@@ -10258,6 +10448,57 @@ static struct bpf_test tests[] = {
10258 .result = REJECT, 10448 .result = REJECT,
10259 .prog_type = BPF_PROG_TYPE_XDP, 10449 .prog_type = BPF_PROG_TYPE_XDP,
10260 }, 10450 },
10451 {
10452 "search pruning: all branches should be verified (nop operation)",
10453 .insns = {
10454 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10456 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10457 BPF_LD_MAP_FD(BPF_REG_1, 0),
10458 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10459 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 11),
10460 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10461 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10462 BPF_MOV64_IMM(BPF_REG_4, 0),
10463 BPF_JMP_A(1),
10464 BPF_MOV64_IMM(BPF_REG_4, 1),
10465 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10466 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10467 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10468 BPF_JMP_IMM(BPF_JEQ, BPF_REG_5, 0, 2),
10469 BPF_MOV64_IMM(BPF_REG_6, 0),
10470 BPF_ST_MEM(BPF_DW, BPF_REG_6, 0, 0xdead),
10471 BPF_EXIT_INSN(),
10472 },
10473 .fixup_map1 = { 3 },
10474 .errstr = "R6 invalid mem access 'inv'",
10475 .result = REJECT,
10476 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10477 },
10478 {
10479 "search pruning: all branches should be verified (invalid stack access)",
10480 .insns = {
10481 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
10482 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
10483 BPF_ST_MEM(BPF_DW, BPF_REG_2, 0, 0),
10484 BPF_LD_MAP_FD(BPF_REG_1, 0),
10485 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
10486 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
10487 BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0),
10488 BPF_MOV64_IMM(BPF_REG_4, 0),
10489 BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0xbeef, 2),
10490 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -16),
10491 BPF_JMP_A(1),
10492 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_4, -24),
10493 BPF_EMIT_CALL(BPF_FUNC_ktime_get_ns),
10494 BPF_LDX_MEM(BPF_DW, BPF_REG_5, BPF_REG_10, -16),
10495 BPF_EXIT_INSN(),
10496 },
10497 .fixup_map1 = { 3 },
10498 .errstr = "invalid read from stack off -16+0 size 8",
10499 .result = REJECT,
10500 .prog_type = BPF_PROG_TYPE_TRACEPOINT,
10501 },
10261}; 10502};
10262 10503
10263static int probe_filter_length(const struct bpf_insn *fp) 10504static int probe_filter_length(const struct bpf_insn *fp)