diff options
author | Daniel Borkmann <daniel@iogearbox.net> | 2017-12-17 14:34:37 -0500 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2017-12-17 14:34:37 -0500 |
commit | ef9fde06a259f5da660ada63214addf8cd86a7b9 (patch) | |
tree | 8b0d109f49281f68709343f72c5c3c89549ab9af /tools | |
parent | 0bce7c9a607f1dbf8d83dd2865e1657096dbce59 (diff) | |
parent | 28ab173e96b3971842414bf88eb02eca6ea3f018 (diff) |
Merge branch 'bpf-to-bpf-function-calls'
Alexei Starovoitov says:
====================
First of all huge thank you to Daniel, John, Jakub, Edward and others who
reviewed multiple iterations of this patch set over the last many months
and to Dave and others who gave critical feedback during netconf/netdev.
The patch is solid enough and we thought through numerous corner cases,
but it's not the end. More followups with code reorg and features to follow.
TLDR: Allow arbitrary function calls from bpf function to another bpf function.
Since the beginning of bpf all bpf programs were represented as a single function
and program authors were forced to use always_inline for all functions
in their C code. That was causing llvm to unnecessary inflate the code size
and forcing developers to move code to header files with little code reuse.
With a bit of additional complexity teach verifier to recognize
arbitrary function calls from one bpf function to another as long as
all of functions are presented to the verifier as a single bpf program.
Extended program layout:
..
r1 = .. // arg1
r2 = .. // arg2
call pc+1 // function call pc-relative
exit
.. = r1 // access arg1
.. = r2 // access arg2
..
call pc+20 // second level of function call
...
It allows for better optimized code and finally allows to introduce
the core bpf libraries that can be reused in different projects,
since programs are no longer limited by single elf file.
With function calls bpf can be compiled into multiple .o files.
This patch is the first step. It detects programs that contain
multiple functions and checks that calls between them are valid.
It splits the sequence of bpf instructions (one program) into a set
of bpf functions that call each other. Calls to only known
functions are allowed. Since all functions are presented to
the verifier at once conceptually it is 'static linking'.
Future plans:
- introduce BPF_PROG_TYPE_LIBRARY and allow a set of bpf functions
to be loaded into the kernel that can be later linked to other
programs with concrete program types. Aka 'dynamic linking'.
- introduce function pointer type and indirect calls to allow
bpf functions call other dynamically loaded bpf functions while
the caller bpf function is already executing. Aka 'runtime linking'.
This will be more generic and more flexible alternative
to bpf_tail_calls.
FAQ:
Q: Interpreter and JIT changes mean that new instruction is introduced ?
A: No. The call instruction technically stays the same. Now it can call
both kernel helpers and other bpf functions.
Calling convention stays the same as well.
From uapi point of view the call insn got new 'relocation' BPF_PSEUDO_CALL
similar to BPF_PSEUDO_MAP_FD 'relocation' of bpf_ldimm64 insn.
Q: What had to change on LLVM side?
A: Trivial LLVM patch to allow calls was applied to upcoming 6.0 release:
https://reviews.llvm.org/rL318614
with few bugfixes as well.
Make sure to build the latest llvm to have bpf_call support.
More details in the patches.
====================
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/include/uapi/linux/bpf.h | 6 | ||||
-rw-r--r-- | tools/lib/bpf/bpf.h | 2 | ||||
-rw-r--r-- | tools/lib/bpf/libbpf.c | 170 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/Makefile | 12 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_l4lb_noinline.c | 473 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_progs.c | 95 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_verifier.c | 1624 | ||||
-rw-r--r-- | tools/testing/selftests/bpf/test_xdp_noinline.c | 833 |
8 files changed, 3162 insertions, 53 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index cf446c25c0ec..db1b0923a308 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -197,8 +197,14 @@ enum bpf_attach_type { | |||
197 | */ | 197 | */ |
198 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) | 198 | #define BPF_F_STRICT_ALIGNMENT (1U << 0) |
199 | 199 | ||
200 | /* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */ | ||
200 | #define BPF_PSEUDO_MAP_FD 1 | 201 | #define BPF_PSEUDO_MAP_FD 1 |
201 | 202 | ||
203 | /* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative | ||
204 | * offset to another bpf function | ||
205 | */ | ||
206 | #define BPF_PSEUDO_CALL 1 | ||
207 | |||
202 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 208 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
203 | #define BPF_ANY 0 /* create new element or update existing */ | 209 | #define BPF_ANY 0 /* create new element or update existing */ |
204 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */ | 210 | #define BPF_NOEXIST 1 /* create new element if it didn't exist */ |
diff --git a/tools/lib/bpf/bpf.h b/tools/lib/bpf/bpf.h index 6534889e2b2f..9f44c196931e 100644 --- a/tools/lib/bpf/bpf.h +++ b/tools/lib/bpf/bpf.h | |||
@@ -40,7 +40,7 @@ int bpf_create_map_in_map(enum bpf_map_type map_type, const char *name, | |||
40 | __u32 map_flags); | 40 | __u32 map_flags); |
41 | 41 | ||
42 | /* Recommend log buffer size */ | 42 | /* Recommend log buffer size */ |
43 | #define BPF_LOG_BUF_SIZE 65536 | 43 | #define BPF_LOG_BUF_SIZE (256 * 1024) |
44 | int bpf_load_program_name(enum bpf_prog_type type, const char *name, | 44 | int bpf_load_program_name(enum bpf_prog_type type, const char *name, |
45 | const struct bpf_insn *insns, | 45 | const struct bpf_insn *insns, |
46 | size_t insns_cnt, const char *license, | 46 | size_t insns_cnt, const char *license, |
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 65d0d0aff4fa..5b83875b3594 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c | |||
@@ -174,12 +174,19 @@ struct bpf_program { | |||
174 | char *name; | 174 | char *name; |
175 | char *section_name; | 175 | char *section_name; |
176 | struct bpf_insn *insns; | 176 | struct bpf_insn *insns; |
177 | size_t insns_cnt; | 177 | size_t insns_cnt, main_prog_cnt; |
178 | enum bpf_prog_type type; | 178 | enum bpf_prog_type type; |
179 | 179 | ||
180 | struct { | 180 | struct reloc_desc { |
181 | enum { | ||
182 | RELO_LD64, | ||
183 | RELO_CALL, | ||
184 | } type; | ||
181 | int insn_idx; | 185 | int insn_idx; |
182 | int map_idx; | 186 | union { |
187 | int map_idx; | ||
188 | int text_off; | ||
189 | }; | ||
183 | } *reloc_desc; | 190 | } *reloc_desc; |
184 | int nr_reloc; | 191 | int nr_reloc; |
185 | 192 | ||
@@ -234,6 +241,7 @@ struct bpf_object { | |||
234 | } *reloc; | 241 | } *reloc; |
235 | int nr_reloc; | 242 | int nr_reloc; |
236 | int maps_shndx; | 243 | int maps_shndx; |
244 | int text_shndx; | ||
237 | } efile; | 245 | } efile; |
238 | /* | 246 | /* |
239 | * All loaded bpf_object is linked in a list, which is | 247 | * All loaded bpf_object is linked in a list, which is |
@@ -375,9 +383,13 @@ bpf_object__init_prog_names(struct bpf_object *obj) | |||
375 | size_t pi, si; | 383 | size_t pi, si; |
376 | 384 | ||
377 | for (pi = 0; pi < obj->nr_programs; pi++) { | 385 | for (pi = 0; pi < obj->nr_programs; pi++) { |
378 | char *name = NULL; | 386 | const char *name = NULL; |
379 | 387 | ||
380 | prog = &obj->programs[pi]; | 388 | prog = &obj->programs[pi]; |
389 | if (prog->idx == obj->efile.text_shndx) { | ||
390 | name = ".text"; | ||
391 | goto skip_search; | ||
392 | } | ||
381 | 393 | ||
382 | for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; | 394 | for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name; |
383 | si++) { | 395 | si++) { |
@@ -405,7 +417,7 @@ bpf_object__init_prog_names(struct bpf_object *obj) | |||
405 | prog->section_name); | 417 | prog->section_name); |
406 | return -EINVAL; | 418 | return -EINVAL; |
407 | } | 419 | } |
408 | 420 | skip_search: | |
409 | prog->name = strdup(name); | 421 | prog->name = strdup(name); |
410 | if (!prog->name) { | 422 | if (!prog->name) { |
411 | pr_warning("failed to allocate memory for prog sym %s\n", | 423 | pr_warning("failed to allocate memory for prog sym %s\n", |
@@ -795,6 +807,8 @@ static int bpf_object__elf_collect(struct bpf_object *obj) | |||
795 | } else if ((sh.sh_type == SHT_PROGBITS) && | 807 | } else if ((sh.sh_type == SHT_PROGBITS) && |
796 | (sh.sh_flags & SHF_EXECINSTR) && | 808 | (sh.sh_flags & SHF_EXECINSTR) && |
797 | (data->d_size > 0)) { | 809 | (data->d_size > 0)) { |
810 | if (strcmp(name, ".text") == 0) | ||
811 | obj->efile.text_shndx = idx; | ||
798 | err = bpf_object__add_program(obj, data->d_buf, | 812 | err = bpf_object__add_program(obj, data->d_buf, |
799 | data->d_size, name, idx); | 813 | data->d_size, name, idx); |
800 | if (err) { | 814 | if (err) { |
@@ -856,11 +870,14 @@ bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx) | |||
856 | } | 870 | } |
857 | 871 | ||
858 | static int | 872 | static int |
859 | bpf_program__collect_reloc(struct bpf_program *prog, | 873 | bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr, |
860 | size_t nr_maps, GElf_Shdr *shdr, | 874 | Elf_Data *data, struct bpf_object *obj) |
861 | Elf_Data *data, Elf_Data *symbols, | ||
862 | int maps_shndx, struct bpf_map *maps) | ||
863 | { | 875 | { |
876 | Elf_Data *symbols = obj->efile.symbols; | ||
877 | int text_shndx = obj->efile.text_shndx; | ||
878 | int maps_shndx = obj->efile.maps_shndx; | ||
879 | struct bpf_map *maps = obj->maps; | ||
880 | size_t nr_maps = obj->nr_maps; | ||
864 | int i, nrels; | 881 | int i, nrels; |
865 | 882 | ||
866 | pr_debug("collecting relocating info for: '%s'\n", | 883 | pr_debug("collecting relocating info for: '%s'\n", |
@@ -893,8 +910,10 @@ bpf_program__collect_reloc(struct bpf_program *prog, | |||
893 | GELF_R_SYM(rel.r_info)); | 910 | GELF_R_SYM(rel.r_info)); |
894 | return -LIBBPF_ERRNO__FORMAT; | 911 | return -LIBBPF_ERRNO__FORMAT; |
895 | } | 912 | } |
913 | pr_debug("relo for %ld value %ld name %d\n", | ||
914 | rel.r_info >> 32, sym.st_value, sym.st_name); | ||
896 | 915 | ||
897 | if (sym.st_shndx != maps_shndx) { | 916 | if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) { |
898 | pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", | 917 | pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n", |
899 | prog->section_name, sym.st_shndx); | 918 | prog->section_name, sym.st_shndx); |
900 | return -LIBBPF_ERRNO__RELOC; | 919 | return -LIBBPF_ERRNO__RELOC; |
@@ -903,6 +922,17 @@ bpf_program__collect_reloc(struct bpf_program *prog, | |||
903 | insn_idx = rel.r_offset / sizeof(struct bpf_insn); | 922 | insn_idx = rel.r_offset / sizeof(struct bpf_insn); |
904 | pr_debug("relocation: insn_idx=%u\n", insn_idx); | 923 | pr_debug("relocation: insn_idx=%u\n", insn_idx); |
905 | 924 | ||
925 | if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) { | ||
926 | if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) { | ||
927 | pr_warning("incorrect bpf_call opcode\n"); | ||
928 | return -LIBBPF_ERRNO__RELOC; | ||
929 | } | ||
930 | prog->reloc_desc[i].type = RELO_CALL; | ||
931 | prog->reloc_desc[i].insn_idx = insn_idx; | ||
932 | prog->reloc_desc[i].text_off = sym.st_value; | ||
933 | continue; | ||
934 | } | ||
935 | |||
906 | if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { | 936 | if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) { |
907 | pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", | 937 | pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n", |
908 | insn_idx, insns[insn_idx].code); | 938 | insn_idx, insns[insn_idx].code); |
@@ -924,6 +954,7 @@ bpf_program__collect_reloc(struct bpf_program *prog, | |||
924 | return -LIBBPF_ERRNO__RELOC; | 954 | return -LIBBPF_ERRNO__RELOC; |
925 | } | 955 | } |
926 | 956 | ||
957 | prog->reloc_desc[i].type = RELO_LD64; | ||
927 | prog->reloc_desc[i].insn_idx = insn_idx; | 958 | prog->reloc_desc[i].insn_idx = insn_idx; |
928 | prog->reloc_desc[i].map_idx = map_idx; | 959 | prog->reloc_desc[i].map_idx = map_idx; |
929 | } | 960 | } |
@@ -963,27 +994,76 @@ bpf_object__create_maps(struct bpf_object *obj) | |||
963 | } | 994 | } |
964 | 995 | ||
965 | static int | 996 | static int |
997 | bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj, | ||
998 | struct reloc_desc *relo) | ||
999 | { | ||
1000 | struct bpf_insn *insn, *new_insn; | ||
1001 | struct bpf_program *text; | ||
1002 | size_t new_cnt; | ||
1003 | |||
1004 | if (relo->type != RELO_CALL) | ||
1005 | return -LIBBPF_ERRNO__RELOC; | ||
1006 | |||
1007 | if (prog->idx == obj->efile.text_shndx) { | ||
1008 | pr_warning("relo in .text insn %d into off %d\n", | ||
1009 | relo->insn_idx, relo->text_off); | ||
1010 | return -LIBBPF_ERRNO__RELOC; | ||
1011 | } | ||
1012 | |||
1013 | if (prog->main_prog_cnt == 0) { | ||
1014 | text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx); | ||
1015 | if (!text) { | ||
1016 | pr_warning("no .text section found yet relo into text exist\n"); | ||
1017 | return -LIBBPF_ERRNO__RELOC; | ||
1018 | } | ||
1019 | new_cnt = prog->insns_cnt + text->insns_cnt; | ||
1020 | new_insn = realloc(prog->insns, new_cnt * sizeof(*insn)); | ||
1021 | if (!new_insn) { | ||
1022 | pr_warning("oom in prog realloc\n"); | ||
1023 | return -ENOMEM; | ||
1024 | } | ||
1025 | memcpy(new_insn + prog->insns_cnt, text->insns, | ||
1026 | text->insns_cnt * sizeof(*insn)); | ||
1027 | prog->insns = new_insn; | ||
1028 | prog->main_prog_cnt = prog->insns_cnt; | ||
1029 | prog->insns_cnt = new_cnt; | ||
1030 | } | ||
1031 | insn = &prog->insns[relo->insn_idx]; | ||
1032 | insn->imm += prog->main_prog_cnt - relo->insn_idx; | ||
1033 | pr_debug("added %zd insn from %s to prog %s\n", | ||
1034 | text->insns_cnt, text->section_name, prog->section_name); | ||
1035 | return 0; | ||
1036 | } | ||
1037 | |||
1038 | static int | ||
966 | bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) | 1039 | bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj) |
967 | { | 1040 | { |
968 | int i; | 1041 | int i, err; |
969 | 1042 | ||
970 | if (!prog || !prog->reloc_desc) | 1043 | if (!prog || !prog->reloc_desc) |
971 | return 0; | 1044 | return 0; |
972 | 1045 | ||
973 | for (i = 0; i < prog->nr_reloc; i++) { | 1046 | for (i = 0; i < prog->nr_reloc; i++) { |
974 | int insn_idx, map_idx; | 1047 | if (prog->reloc_desc[i].type == RELO_LD64) { |
975 | struct bpf_insn *insns = prog->insns; | 1048 | struct bpf_insn *insns = prog->insns; |
1049 | int insn_idx, map_idx; | ||
976 | 1050 | ||
977 | insn_idx = prog->reloc_desc[i].insn_idx; | 1051 | insn_idx = prog->reloc_desc[i].insn_idx; |
978 | map_idx = prog->reloc_desc[i].map_idx; | 1052 | map_idx = prog->reloc_desc[i].map_idx; |
979 | 1053 | ||
980 | if (insn_idx >= (int)prog->insns_cnt) { | 1054 | if (insn_idx >= (int)prog->insns_cnt) { |
981 | pr_warning("relocation out of range: '%s'\n", | 1055 | pr_warning("relocation out of range: '%s'\n", |
982 | prog->section_name); | 1056 | prog->section_name); |
983 | return -LIBBPF_ERRNO__RELOC; | 1057 | return -LIBBPF_ERRNO__RELOC; |
1058 | } | ||
1059 | insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; | ||
1060 | insns[insn_idx].imm = obj->maps[map_idx].fd; | ||
1061 | } else { | ||
1062 | err = bpf_program__reloc_text(prog, obj, | ||
1063 | &prog->reloc_desc[i]); | ||
1064 | if (err) | ||
1065 | return err; | ||
984 | } | 1066 | } |
985 | insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD; | ||
986 | insns[insn_idx].imm = obj->maps[map_idx].fd; | ||
987 | } | 1067 | } |
988 | 1068 | ||
989 | zfree(&prog->reloc_desc); | 1069 | zfree(&prog->reloc_desc); |
@@ -1026,7 +1106,6 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) | |||
1026 | Elf_Data *data = obj->efile.reloc[i].data; | 1106 | Elf_Data *data = obj->efile.reloc[i].data; |
1027 | int idx = shdr->sh_info; | 1107 | int idx = shdr->sh_info; |
1028 | struct bpf_program *prog; | 1108 | struct bpf_program *prog; |
1029 | size_t nr_maps = obj->nr_maps; | ||
1030 | 1109 | ||
1031 | if (shdr->sh_type != SHT_REL) { | 1110 | if (shdr->sh_type != SHT_REL) { |
1032 | pr_warning("internal error at %d\n", __LINE__); | 1111 | pr_warning("internal error at %d\n", __LINE__); |
@@ -1040,11 +1119,9 @@ static int bpf_object__collect_reloc(struct bpf_object *obj) | |||
1040 | return -LIBBPF_ERRNO__RELOC; | 1119 | return -LIBBPF_ERRNO__RELOC; |
1041 | } | 1120 | } |
1042 | 1121 | ||
1043 | err = bpf_program__collect_reloc(prog, nr_maps, | 1122 | err = bpf_program__collect_reloc(prog, |
1044 | shdr, data, | 1123 | shdr, data, |
1045 | obj->efile.symbols, | 1124 | obj); |
1046 | obj->efile.maps_shndx, | ||
1047 | obj->maps); | ||
1048 | if (err) | 1125 | if (err) |
1049 | return err; | 1126 | return err; |
1050 | } | 1127 | } |
@@ -1197,6 +1274,8 @@ bpf_object__load_progs(struct bpf_object *obj) | |||
1197 | int err; | 1274 | int err; |
1198 | 1275 | ||
1199 | for (i = 0; i < obj->nr_programs; i++) { | 1276 | for (i = 0; i < obj->nr_programs; i++) { |
1277 | if (obj->programs[i].idx == obj->efile.text_shndx) | ||
1278 | continue; | ||
1200 | err = bpf_program__load(&obj->programs[i], | 1279 | err = bpf_program__load(&obj->programs[i], |
1201 | obj->license, | 1280 | obj->license, |
1202 | obj->kern_version); | 1281 | obj->kern_version); |
@@ -1859,7 +1938,7 @@ long libbpf_get_error(const void *ptr) | |||
1859 | int bpf_prog_load(const char *file, enum bpf_prog_type type, | 1938 | int bpf_prog_load(const char *file, enum bpf_prog_type type, |
1860 | struct bpf_object **pobj, int *prog_fd) | 1939 | struct bpf_object **pobj, int *prog_fd) |
1861 | { | 1940 | { |
1862 | struct bpf_program *prog; | 1941 | struct bpf_program *prog, *first_prog = NULL; |
1863 | struct bpf_object *obj; | 1942 | struct bpf_object *obj; |
1864 | int err; | 1943 | int err; |
1865 | 1944 | ||
@@ -1867,25 +1946,30 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, | |||
1867 | if (IS_ERR(obj)) | 1946 | if (IS_ERR(obj)) |
1868 | return -ENOENT; | 1947 | return -ENOENT; |
1869 | 1948 | ||
1870 | prog = bpf_program__next(NULL, obj); | 1949 | bpf_object__for_each_program(prog, obj) { |
1871 | if (!prog) { | 1950 | /* |
1872 | bpf_object__close(obj); | 1951 | * If type is not specified, try to guess it based on |
1873 | return -ENOENT; | 1952 | * section name. |
1874 | } | 1953 | */ |
1875 | |||
1876 | /* | ||
1877 | * If type is not specified, try to guess it based on | ||
1878 | * section name. | ||
1879 | */ | ||
1880 | if (type == BPF_PROG_TYPE_UNSPEC) { | ||
1881 | type = bpf_program__guess_type(prog); | ||
1882 | if (type == BPF_PROG_TYPE_UNSPEC) { | 1954 | if (type == BPF_PROG_TYPE_UNSPEC) { |
1883 | bpf_object__close(obj); | 1955 | type = bpf_program__guess_type(prog); |
1884 | return -EINVAL; | 1956 | if (type == BPF_PROG_TYPE_UNSPEC) { |
1957 | bpf_object__close(obj); | ||
1958 | return -EINVAL; | ||
1959 | } | ||
1885 | } | 1960 | } |
1961 | |||
1962 | bpf_program__set_type(prog, type); | ||
1963 | if (prog->idx != obj->efile.text_shndx && !first_prog) | ||
1964 | first_prog = prog; | ||
1965 | } | ||
1966 | |||
1967 | if (!first_prog) { | ||
1968 | pr_warning("object file doesn't contain bpf program\n"); | ||
1969 | bpf_object__close(obj); | ||
1970 | return -ENOENT; | ||
1886 | } | 1971 | } |
1887 | 1972 | ||
1888 | bpf_program__set_type(prog, type); | ||
1889 | err = bpf_object__load(obj); | 1973 | err = bpf_object__load(obj); |
1890 | if (err) { | 1974 | if (err) { |
1891 | bpf_object__close(obj); | 1975 | bpf_object__close(obj); |
@@ -1893,6 +1977,6 @@ int bpf_prog_load(const char *file, enum bpf_prog_type type, | |||
1893 | } | 1977 | } |
1894 | 1978 | ||
1895 | *pobj = obj; | 1979 | *pobj = obj; |
1896 | *prog_fd = bpf_program__fd(prog); | 1980 | *prog_fd = bpf_program__fd(first_prog); |
1897 | return 0; | 1981 | return 0; |
1898 | } | 1982 | } |
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile index 255fb1f50f6b..7ef9601d04bf 100644 --- a/tools/testing/selftests/bpf/Makefile +++ b/tools/testing/selftests/bpf/Makefile | |||
@@ -17,7 +17,8 @@ TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test | |||
17 | 17 | ||
18 | TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ | 18 | TEST_GEN_FILES = test_pkt_access.o test_xdp.o test_l4lb.o test_tcp_estats.o test_obj_id.o \ |
19 | test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ | 19 | test_pkt_md_access.o test_xdp_redirect.o test_xdp_meta.o sockmap_parse_prog.o \ |
20 | sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o | 20 | sockmap_verdict_prog.o dev_cgroup.o sample_ret0.o test_tracepoint.o \ |
21 | test_l4lb_noinline.o test_xdp_noinline.o | ||
21 | 22 | ||
22 | TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \ | 23 | TEST_PROGS := test_kmod.sh test_xdp_redirect.sh test_xdp_meta.sh \ |
23 | test_offload.py | 24 | test_offload.py |
@@ -49,8 +50,13 @@ else | |||
49 | CPU ?= generic | 50 | CPU ?= generic |
50 | endif | 51 | endif |
51 | 52 | ||
53 | CLANG_FLAGS = -I. -I./include/uapi -I../../../include/uapi \ | ||
54 | -Wno-compare-distinct-pointer-types | ||
55 | |||
56 | $(OUTPUT)/test_l4lb_noinline.o: CLANG_FLAGS += -fno-inline | ||
57 | $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline | ||
58 | |||
52 | %.o: %.c | 59 | %.o: %.c |
53 | $(CLANG) -I. -I./include/uapi -I../../../include/uapi \ | 60 | $(CLANG) $(CLANG_FLAGS) \ |
54 | -Wno-compare-distinct-pointer-types \ | ||
55 | -O2 -target bpf -emit-llvm -c $< -o - | \ | 61 | -O2 -target bpf -emit-llvm -c $< -o - | \ |
56 | $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ | 62 | $(LLC) -march=bpf -mcpu=$(CPU) -filetype=obj -o $@ |
diff --git a/tools/testing/selftests/bpf/test_l4lb_noinline.c b/tools/testing/selftests/bpf/test_l4lb_noinline.c new file mode 100644 index 000000000000..ba44a14e6dc4 --- /dev/null +++ b/tools/testing/selftests/bpf/test_l4lb_noinline.c | |||
@@ -0,0 +1,473 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (c) 2017 Facebook | ||
3 | #include <stddef.h> | ||
4 | #include <stdbool.h> | ||
5 | #include <string.h> | ||
6 | #include <linux/pkt_cls.h> | ||
7 | #include <linux/bpf.h> | ||
8 | #include <linux/in.h> | ||
9 | #include <linux/if_ether.h> | ||
10 | #include <linux/ip.h> | ||
11 | #include <linux/ipv6.h> | ||
12 | #include <linux/icmp.h> | ||
13 | #include <linux/icmpv6.h> | ||
14 | #include <linux/tcp.h> | ||
15 | #include <linux/udp.h> | ||
16 | #include "bpf_helpers.h" | ||
17 | #include "test_iptunnel_common.h" | ||
18 | #include "bpf_endian.h" | ||
19 | |||
20 | int _version SEC("version") = 1; | ||
21 | |||
22 | static __u32 rol32(__u32 word, unsigned int shift) | ||
23 | { | ||
24 | return (word << shift) | (word >> ((-shift) & 31)); | ||
25 | } | ||
26 | |||
27 | /* copy paste of jhash from kernel sources to make sure llvm | ||
28 | * can compile it into valid sequence of bpf instructions | ||
29 | */ | ||
30 | #define __jhash_mix(a, b, c) \ | ||
31 | { \ | ||
32 | a -= c; a ^= rol32(c, 4); c += b; \ | ||
33 | b -= a; b ^= rol32(a, 6); a += c; \ | ||
34 | c -= b; c ^= rol32(b, 8); b += a; \ | ||
35 | a -= c; a ^= rol32(c, 16); c += b; \ | ||
36 | b -= a; b ^= rol32(a, 19); a += c; \ | ||
37 | c -= b; c ^= rol32(b, 4); b += a; \ | ||
38 | } | ||
39 | |||
40 | #define __jhash_final(a, b, c) \ | ||
41 | { \ | ||
42 | c ^= b; c -= rol32(b, 14); \ | ||
43 | a ^= c; a -= rol32(c, 11); \ | ||
44 | b ^= a; b -= rol32(a, 25); \ | ||
45 | c ^= b; c -= rol32(b, 16); \ | ||
46 | a ^= c; a -= rol32(c, 4); \ | ||
47 | b ^= a; b -= rol32(a, 14); \ | ||
48 | c ^= b; c -= rol32(b, 24); \ | ||
49 | } | ||
50 | |||
51 | #define JHASH_INITVAL 0xdeadbeef | ||
52 | |||
53 | typedef unsigned int u32; | ||
54 | |||
55 | static u32 jhash(const void *key, u32 length, u32 initval) | ||
56 | { | ||
57 | u32 a, b, c; | ||
58 | const unsigned char *k = key; | ||
59 | |||
60 | a = b = c = JHASH_INITVAL + length + initval; | ||
61 | |||
62 | while (length > 12) { | ||
63 | a += *(u32 *)(k); | ||
64 | b += *(u32 *)(k + 4); | ||
65 | c += *(u32 *)(k + 8); | ||
66 | __jhash_mix(a, b, c); | ||
67 | length -= 12; | ||
68 | k += 12; | ||
69 | } | ||
70 | switch (length) { | ||
71 | case 12: c += (u32)k[11]<<24; | ||
72 | case 11: c += (u32)k[10]<<16; | ||
73 | case 10: c += (u32)k[9]<<8; | ||
74 | case 9: c += k[8]; | ||
75 | case 8: b += (u32)k[7]<<24; | ||
76 | case 7: b += (u32)k[6]<<16; | ||
77 | case 6: b += (u32)k[5]<<8; | ||
78 | case 5: b += k[4]; | ||
79 | case 4: a += (u32)k[3]<<24; | ||
80 | case 3: a += (u32)k[2]<<16; | ||
81 | case 2: a += (u32)k[1]<<8; | ||
82 | case 1: a += k[0]; | ||
83 | __jhash_final(a, b, c); | ||
84 | case 0: /* Nothing left to add */ | ||
85 | break; | ||
86 | } | ||
87 | |||
88 | return c; | ||
89 | } | ||
90 | |||
91 | static u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) | ||
92 | { | ||
93 | a += initval; | ||
94 | b += initval; | ||
95 | c += initval; | ||
96 | __jhash_final(a, b, c); | ||
97 | return c; | ||
98 | } | ||
99 | |||
100 | static u32 jhash_2words(u32 a, u32 b, u32 initval) | ||
101 | { | ||
102 | return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); | ||
103 | } | ||
104 | |||
105 | #define PCKT_FRAGMENTED 65343 | ||
106 | #define IPV4_HDR_LEN_NO_OPT 20 | ||
107 | #define IPV4_PLUS_ICMP_HDR 28 | ||
108 | #define IPV6_PLUS_ICMP_HDR 48 | ||
109 | #define RING_SIZE 2 | ||
110 | #define MAX_VIPS 12 | ||
111 | #define MAX_REALS 5 | ||
112 | #define CTL_MAP_SIZE 16 | ||
113 | #define CH_RINGS_SIZE (MAX_VIPS * RING_SIZE) | ||
114 | #define F_IPV6 (1 << 0) | ||
115 | #define F_HASH_NO_SRC_PORT (1 << 0) | ||
116 | #define F_ICMP (1 << 0) | ||
117 | #define F_SYN_SET (1 << 1) | ||
118 | |||
119 | struct packet_description { | ||
120 | union { | ||
121 | __be32 src; | ||
122 | __be32 srcv6[4]; | ||
123 | }; | ||
124 | union { | ||
125 | __be32 dst; | ||
126 | __be32 dstv6[4]; | ||
127 | }; | ||
128 | union { | ||
129 | __u32 ports; | ||
130 | __u16 port16[2]; | ||
131 | }; | ||
132 | __u8 proto; | ||
133 | __u8 flags; | ||
134 | }; | ||
135 | |||
136 | struct ctl_value { | ||
137 | union { | ||
138 | __u64 value; | ||
139 | __u32 ifindex; | ||
140 | __u8 mac[6]; | ||
141 | }; | ||
142 | }; | ||
143 | |||
144 | struct vip_meta { | ||
145 | __u32 flags; | ||
146 | __u32 vip_num; | ||
147 | }; | ||
148 | |||
149 | struct real_definition { | ||
150 | union { | ||
151 | __be32 dst; | ||
152 | __be32 dstv6[4]; | ||
153 | }; | ||
154 | __u8 flags; | ||
155 | }; | ||
156 | |||
157 | struct vip_stats { | ||
158 | __u64 bytes; | ||
159 | __u64 pkts; | ||
160 | }; | ||
161 | |||
162 | struct eth_hdr { | ||
163 | unsigned char eth_dest[ETH_ALEN]; | ||
164 | unsigned char eth_source[ETH_ALEN]; | ||
165 | unsigned short eth_proto; | ||
166 | }; | ||
167 | |||
168 | struct bpf_map_def SEC("maps") vip_map = { | ||
169 | .type = BPF_MAP_TYPE_HASH, | ||
170 | .key_size = sizeof(struct vip), | ||
171 | .value_size = sizeof(struct vip_meta), | ||
172 | .max_entries = MAX_VIPS, | ||
173 | }; | ||
174 | |||
175 | struct bpf_map_def SEC("maps") ch_rings = { | ||
176 | .type = BPF_MAP_TYPE_ARRAY, | ||
177 | .key_size = sizeof(__u32), | ||
178 | .value_size = sizeof(__u32), | ||
179 | .max_entries = CH_RINGS_SIZE, | ||
180 | }; | ||
181 | |||
182 | struct bpf_map_def SEC("maps") reals = { | ||
183 | .type = BPF_MAP_TYPE_ARRAY, | ||
184 | .key_size = sizeof(__u32), | ||
185 | .value_size = sizeof(struct real_definition), | ||
186 | .max_entries = MAX_REALS, | ||
187 | }; | ||
188 | |||
189 | struct bpf_map_def SEC("maps") stats = { | ||
190 | .type = BPF_MAP_TYPE_PERCPU_ARRAY, | ||
191 | .key_size = sizeof(__u32), | ||
192 | .value_size = sizeof(struct vip_stats), | ||
193 | .max_entries = MAX_VIPS, | ||
194 | }; | ||
195 | |||
196 | struct bpf_map_def SEC("maps") ctl_array = { | ||
197 | .type = BPF_MAP_TYPE_ARRAY, | ||
198 | .key_size = sizeof(__u32), | ||
199 | .value_size = sizeof(struct ctl_value), | ||
200 | .max_entries = CTL_MAP_SIZE, | ||
201 | }; | ||
202 | |||
203 | static __u32 get_packet_hash(struct packet_description *pckt, | ||
204 | bool ipv6) | ||
205 | { | ||
206 | if (ipv6) | ||
207 | return jhash_2words(jhash(pckt->srcv6, 16, MAX_VIPS), | ||
208 | pckt->ports, CH_RINGS_SIZE); | ||
209 | else | ||
210 | return jhash_2words(pckt->src, pckt->ports, CH_RINGS_SIZE); | ||
211 | } | ||
212 | |||
213 | static bool get_packet_dst(struct real_definition **real, | ||
214 | struct packet_description *pckt, | ||
215 | struct vip_meta *vip_info, | ||
216 | bool is_ipv6) | ||
217 | { | ||
218 | __u32 hash = get_packet_hash(pckt, is_ipv6); | ||
219 | __u32 key = RING_SIZE * vip_info->vip_num + hash % RING_SIZE; | ||
220 | __u32 *real_pos; | ||
221 | |||
222 | if (hash != 0x358459b7 /* jhash of ipv4 packet */ && | ||
223 | hash != 0x2f4bc6bb /* jhash of ipv6 packet */) | ||
224 | return 0; | ||
225 | |||
226 | real_pos = bpf_map_lookup_elem(&ch_rings, &key); | ||
227 | if (!real_pos) | ||
228 | return false; | ||
229 | key = *real_pos; | ||
230 | *real = bpf_map_lookup_elem(&reals, &key); | ||
231 | if (!(*real)) | ||
232 | return false; | ||
233 | return true; | ||
234 | } | ||
235 | |||
236 | static int parse_icmpv6(void *data, void *data_end, __u64 off, | ||
237 | struct packet_description *pckt) | ||
238 | { | ||
239 | struct icmp6hdr *icmp_hdr; | ||
240 | struct ipv6hdr *ip6h; | ||
241 | |||
242 | icmp_hdr = data + off; | ||
243 | if (icmp_hdr + 1 > data_end) | ||
244 | return TC_ACT_SHOT; | ||
245 | if (icmp_hdr->icmp6_type != ICMPV6_PKT_TOOBIG) | ||
246 | return TC_ACT_OK; | ||
247 | off += sizeof(struct icmp6hdr); | ||
248 | ip6h = data + off; | ||
249 | if (ip6h + 1 > data_end) | ||
250 | return TC_ACT_SHOT; | ||
251 | pckt->proto = ip6h->nexthdr; | ||
252 | pckt->flags |= F_ICMP; | ||
253 | memcpy(pckt->srcv6, ip6h->daddr.s6_addr32, 16); | ||
254 | memcpy(pckt->dstv6, ip6h->saddr.s6_addr32, 16); | ||
255 | return TC_ACT_UNSPEC; | ||
256 | } | ||
257 | |||
258 | static int parse_icmp(void *data, void *data_end, __u64 off, | ||
259 | struct packet_description *pckt) | ||
260 | { | ||
261 | struct icmphdr *icmp_hdr; | ||
262 | struct iphdr *iph; | ||
263 | |||
264 | icmp_hdr = data + off; | ||
265 | if (icmp_hdr + 1 > data_end) | ||
266 | return TC_ACT_SHOT; | ||
267 | if (icmp_hdr->type != ICMP_DEST_UNREACH || | ||
268 | icmp_hdr->code != ICMP_FRAG_NEEDED) | ||
269 | return TC_ACT_OK; | ||
270 | off += sizeof(struct icmphdr); | ||
271 | iph = data + off; | ||
272 | if (iph + 1 > data_end) | ||
273 | return TC_ACT_SHOT; | ||
274 | if (iph->ihl != 5) | ||
275 | return TC_ACT_SHOT; | ||
276 | pckt->proto = iph->protocol; | ||
277 | pckt->flags |= F_ICMP; | ||
278 | pckt->src = iph->daddr; | ||
279 | pckt->dst = iph->saddr; | ||
280 | return TC_ACT_UNSPEC; | ||
281 | } | ||
282 | |||
283 | static bool parse_udp(void *data, __u64 off, void *data_end, | ||
284 | struct packet_description *pckt) | ||
285 | { | ||
286 | struct udphdr *udp; | ||
287 | udp = data + off; | ||
288 | |||
289 | if (udp + 1 > data_end) | ||
290 | return false; | ||
291 | |||
292 | if (!(pckt->flags & F_ICMP)) { | ||
293 | pckt->port16[0] = udp->source; | ||
294 | pckt->port16[1] = udp->dest; | ||
295 | } else { | ||
296 | pckt->port16[0] = udp->dest; | ||
297 | pckt->port16[1] = udp->source; | ||
298 | } | ||
299 | return true; | ||
300 | } | ||
301 | |||
302 | static bool parse_tcp(void *data, __u64 off, void *data_end, | ||
303 | struct packet_description *pckt) | ||
304 | { | ||
305 | struct tcphdr *tcp; | ||
306 | |||
307 | tcp = data + off; | ||
308 | if (tcp + 1 > data_end) | ||
309 | return false; | ||
310 | |||
311 | if (tcp->syn) | ||
312 | pckt->flags |= F_SYN_SET; | ||
313 | |||
314 | if (!(pckt->flags & F_ICMP)) { | ||
315 | pckt->port16[0] = tcp->source; | ||
316 | pckt->port16[1] = tcp->dest; | ||
317 | } else { | ||
318 | pckt->port16[0] = tcp->dest; | ||
319 | pckt->port16[1] = tcp->source; | ||
320 | } | ||
321 | return true; | ||
322 | } | ||
323 | |||
324 | static int process_packet(void *data, __u64 off, void *data_end, | ||
325 | bool is_ipv6, struct __sk_buff *skb) | ||
326 | { | ||
327 | void *pkt_start = (void *)(long)skb->data; | ||
328 | struct packet_description pckt = {}; | ||
329 | struct eth_hdr *eth = pkt_start; | ||
330 | struct bpf_tunnel_key tkey = {}; | ||
331 | struct vip_stats *data_stats; | ||
332 | struct real_definition *dst; | ||
333 | struct vip_meta *vip_info; | ||
334 | struct ctl_value *cval; | ||
335 | __u32 v4_intf_pos = 1; | ||
336 | __u32 v6_intf_pos = 2; | ||
337 | struct ipv6hdr *ip6h; | ||
338 | struct vip vip = {}; | ||
339 | struct iphdr *iph; | ||
340 | int tun_flag = 0; | ||
341 | __u16 pkt_bytes; | ||
342 | __u64 iph_len; | ||
343 | __u32 ifindex; | ||
344 | __u8 protocol; | ||
345 | __u32 vip_num; | ||
346 | int action; | ||
347 | |||
348 | tkey.tunnel_ttl = 64; | ||
349 | if (is_ipv6) { | ||
350 | ip6h = data + off; | ||
351 | if (ip6h + 1 > data_end) | ||
352 | return TC_ACT_SHOT; | ||
353 | |||
354 | iph_len = sizeof(struct ipv6hdr); | ||
355 | protocol = ip6h->nexthdr; | ||
356 | pckt.proto = protocol; | ||
357 | pkt_bytes = bpf_ntohs(ip6h->payload_len); | ||
358 | off += iph_len; | ||
359 | if (protocol == IPPROTO_FRAGMENT) { | ||
360 | return TC_ACT_SHOT; | ||
361 | } else if (protocol == IPPROTO_ICMPV6) { | ||
362 | action = parse_icmpv6(data, data_end, off, &pckt); | ||
363 | if (action >= 0) | ||
364 | return action; | ||
365 | off += IPV6_PLUS_ICMP_HDR; | ||
366 | } else { | ||
367 | memcpy(pckt.srcv6, ip6h->saddr.s6_addr32, 16); | ||
368 | memcpy(pckt.dstv6, ip6h->daddr.s6_addr32, 16); | ||
369 | } | ||
370 | } else { | ||
371 | iph = data + off; | ||
372 | if (iph + 1 > data_end) | ||
373 | return TC_ACT_SHOT; | ||
374 | if (iph->ihl != 5) | ||
375 | return TC_ACT_SHOT; | ||
376 | |||
377 | protocol = iph->protocol; | ||
378 | pckt.proto = protocol; | ||
379 | pkt_bytes = bpf_ntohs(iph->tot_len); | ||
380 | off += IPV4_HDR_LEN_NO_OPT; | ||
381 | |||
382 | if (iph->frag_off & PCKT_FRAGMENTED) | ||
383 | return TC_ACT_SHOT; | ||
384 | if (protocol == IPPROTO_ICMP) { | ||
385 | action = parse_icmp(data, data_end, off, &pckt); | ||
386 | if (action >= 0) | ||
387 | return action; | ||
388 | off += IPV4_PLUS_ICMP_HDR; | ||
389 | } else { | ||
390 | pckt.src = iph->saddr; | ||
391 | pckt.dst = iph->daddr; | ||
392 | } | ||
393 | } | ||
394 | protocol = pckt.proto; | ||
395 | |||
396 | if (protocol == IPPROTO_TCP) { | ||
397 | if (!parse_tcp(data, off, data_end, &pckt)) | ||
398 | return TC_ACT_SHOT; | ||
399 | } else if (protocol == IPPROTO_UDP) { | ||
400 | if (!parse_udp(data, off, data_end, &pckt)) | ||
401 | return TC_ACT_SHOT; | ||
402 | } else { | ||
403 | return TC_ACT_SHOT; | ||
404 | } | ||
405 | |||
406 | if (is_ipv6) | ||
407 | memcpy(vip.daddr.v6, pckt.dstv6, 16); | ||
408 | else | ||
409 | vip.daddr.v4 = pckt.dst; | ||
410 | |||
411 | vip.dport = pckt.port16[1]; | ||
412 | vip.protocol = pckt.proto; | ||
413 | vip_info = bpf_map_lookup_elem(&vip_map, &vip); | ||
414 | if (!vip_info) { | ||
415 | vip.dport = 0; | ||
416 | vip_info = bpf_map_lookup_elem(&vip_map, &vip); | ||
417 | if (!vip_info) | ||
418 | return TC_ACT_SHOT; | ||
419 | pckt.port16[1] = 0; | ||
420 | } | ||
421 | |||
422 | if (vip_info->flags & F_HASH_NO_SRC_PORT) | ||
423 | pckt.port16[0] = 0; | ||
424 | |||
425 | if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6)) | ||
426 | return TC_ACT_SHOT; | ||
427 | |||
428 | if (dst->flags & F_IPV6) { | ||
429 | cval = bpf_map_lookup_elem(&ctl_array, &v6_intf_pos); | ||
430 | if (!cval) | ||
431 | return TC_ACT_SHOT; | ||
432 | ifindex = cval->ifindex; | ||
433 | memcpy(tkey.remote_ipv6, dst->dstv6, 16); | ||
434 | tun_flag = BPF_F_TUNINFO_IPV6; | ||
435 | } else { | ||
436 | cval = bpf_map_lookup_elem(&ctl_array, &v4_intf_pos); | ||
437 | if (!cval) | ||
438 | return TC_ACT_SHOT; | ||
439 | ifindex = cval->ifindex; | ||
440 | tkey.remote_ipv4 = dst->dst; | ||
441 | } | ||
442 | vip_num = vip_info->vip_num; | ||
443 | data_stats = bpf_map_lookup_elem(&stats, &vip_num); | ||
444 | if (!data_stats) | ||
445 | return TC_ACT_SHOT; | ||
446 | data_stats->pkts++; | ||
447 | data_stats->bytes += pkt_bytes; | ||
448 | bpf_skb_set_tunnel_key(skb, &tkey, sizeof(tkey), tun_flag); | ||
449 | *(u32 *)eth->eth_dest = tkey.remote_ipv4; | ||
450 | return bpf_redirect(ifindex, 0); | ||
451 | } | ||
452 | |||
453 | SEC("l4lb-demo") | ||
454 | int balancer_ingress(struct __sk_buff *ctx) | ||
455 | { | ||
456 | void *data_end = (void *)(long)ctx->data_end; | ||
457 | void *data = (void *)(long)ctx->data; | ||
458 | struct eth_hdr *eth = data; | ||
459 | __u32 eth_proto; | ||
460 | __u32 nh_off; | ||
461 | |||
462 | nh_off = sizeof(struct eth_hdr); | ||
463 | if (data + nh_off > data_end) | ||
464 | return TC_ACT_SHOT; | ||
465 | eth_proto = eth->eth_proto; | ||
466 | if (eth_proto == bpf_htons(ETH_P_IP)) | ||
467 | return process_packet(data, nh_off, data_end, false, ctx); | ||
468 | else if (eth_proto == bpf_htons(ETH_P_IPV6)) | ||
469 | return process_packet(data, nh_off, data_end, true, ctx); | ||
470 | else | ||
471 | return TC_ACT_SHOT; | ||
472 | } | ||
473 | char _license[] SEC("license") = "GPL"; | ||
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c index 1d7d2149163a..6472ca98690e 100644 --- a/tools/testing/selftests/bpf/test_progs.c +++ b/tools/testing/selftests/bpf/test_progs.c | |||
@@ -169,10 +169,9 @@ out: | |||
169 | #define NUM_ITER 100000 | 169 | #define NUM_ITER 100000 |
170 | #define VIP_NUM 5 | 170 | #define VIP_NUM 5 |
171 | 171 | ||
172 | static void test_l4lb(void) | 172 | static void test_l4lb(const char *file) |
173 | { | 173 | { |
174 | unsigned int nr_cpus = bpf_num_possible_cpus(); | 174 | unsigned int nr_cpus = bpf_num_possible_cpus(); |
175 | const char *file = "./test_l4lb.o"; | ||
176 | struct vip key = {.protocol = 6}; | 175 | struct vip key = {.protocol = 6}; |
177 | struct vip_meta { | 176 | struct vip_meta { |
178 | __u32 flags; | 177 | __u32 flags; |
@@ -249,6 +248,95 @@ out: | |||
249 | bpf_object__close(obj); | 248 | bpf_object__close(obj); |
250 | } | 249 | } |
251 | 250 | ||
251 | static void test_l4lb_all(void) | ||
252 | { | ||
253 | const char *file1 = "./test_l4lb.o"; | ||
254 | const char *file2 = "./test_l4lb_noinline.o"; | ||
255 | |||
256 | test_l4lb(file1); | ||
257 | test_l4lb(file2); | ||
258 | } | ||
259 | |||
260 | static void test_xdp_noinline(void) | ||
261 | { | ||
262 | const char *file = "./test_xdp_noinline.o"; | ||
263 | unsigned int nr_cpus = bpf_num_possible_cpus(); | ||
264 | struct vip key = {.protocol = 6}; | ||
265 | struct vip_meta { | ||
266 | __u32 flags; | ||
267 | __u32 vip_num; | ||
268 | } value = {.vip_num = VIP_NUM}; | ||
269 | __u32 stats_key = VIP_NUM; | ||
270 | struct vip_stats { | ||
271 | __u64 bytes; | ||
272 | __u64 pkts; | ||
273 | } stats[nr_cpus]; | ||
274 | struct real_definition { | ||
275 | union { | ||
276 | __be32 dst; | ||
277 | __be32 dstv6[4]; | ||
278 | }; | ||
279 | __u8 flags; | ||
280 | } real_def = {.dst = MAGIC_VAL}; | ||
281 | __u32 ch_key = 11, real_num = 3; | ||
282 | __u32 duration, retval, size; | ||
283 | int err, i, prog_fd, map_fd; | ||
284 | __u64 bytes = 0, pkts = 0; | ||
285 | struct bpf_object *obj; | ||
286 | char buf[128]; | ||
287 | u32 *magic = (u32 *)buf; | ||
288 | |||
289 | err = bpf_prog_load(file, BPF_PROG_TYPE_XDP, &obj, &prog_fd); | ||
290 | if (err) { | ||
291 | error_cnt++; | ||
292 | return; | ||
293 | } | ||
294 | |||
295 | map_fd = bpf_find_map(__func__, obj, "vip_map"); | ||
296 | if (map_fd < 0) | ||
297 | goto out; | ||
298 | bpf_map_update_elem(map_fd, &key, &value, 0); | ||
299 | |||
300 | map_fd = bpf_find_map(__func__, obj, "ch_rings"); | ||
301 | if (map_fd < 0) | ||
302 | goto out; | ||
303 | bpf_map_update_elem(map_fd, &ch_key, &real_num, 0); | ||
304 | |||
305 | map_fd = bpf_find_map(__func__, obj, "reals"); | ||
306 | if (map_fd < 0) | ||
307 | goto out; | ||
308 | bpf_map_update_elem(map_fd, &real_num, &real_def, 0); | ||
309 | |||
310 | err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v4, sizeof(pkt_v4), | ||
311 | buf, &size, &retval, &duration); | ||
312 | CHECK(err || errno || retval != 1 || size != 54 || | ||
313 | *magic != MAGIC_VAL, "ipv4", | ||
314 | "err %d errno %d retval %d size %d magic %x\n", | ||
315 | err, errno, retval, size, *magic); | ||
316 | |||
317 | err = bpf_prog_test_run(prog_fd, NUM_ITER, &pkt_v6, sizeof(pkt_v6), | ||
318 | buf, &size, &retval, &duration); | ||
319 | CHECK(err || errno || retval != 1 || size != 74 || | ||
320 | *magic != MAGIC_VAL, "ipv6", | ||
321 | "err %d errno %d retval %d size %d magic %x\n", | ||
322 | err, errno, retval, size, *magic); | ||
323 | |||
324 | map_fd = bpf_find_map(__func__, obj, "stats"); | ||
325 | if (map_fd < 0) | ||
326 | goto out; | ||
327 | bpf_map_lookup_elem(map_fd, &stats_key, stats); | ||
328 | for (i = 0; i < nr_cpus; i++) { | ||
329 | bytes += stats[i].bytes; | ||
330 | pkts += stats[i].pkts; | ||
331 | } | ||
332 | if (bytes != MAGIC_BYTES * NUM_ITER * 2 || pkts != NUM_ITER * 2) { | ||
333 | error_cnt++; | ||
334 | printf("test_xdp_noinline:FAIL:stats %lld %lld\n", bytes, pkts); | ||
335 | } | ||
336 | out: | ||
337 | bpf_object__close(obj); | ||
338 | } | ||
339 | |||
252 | static void test_tcp_estats(void) | 340 | static void test_tcp_estats(void) |
253 | { | 341 | { |
254 | const char *file = "./test_tcp_estats.o"; | 342 | const char *file = "./test_tcp_estats.o"; |
@@ -757,7 +845,8 @@ int main(void) | |||
757 | 845 | ||
758 | test_pkt_access(); | 846 | test_pkt_access(); |
759 | test_xdp(); | 847 | test_xdp(); |
760 | test_l4lb(); | 848 | test_l4lb_all(); |
849 | test_xdp_noinline(); | ||
761 | test_tcp_estats(); | 850 | test_tcp_estats(); |
762 | test_bpf_obj_id(); | 851 | test_bpf_obj_id(); |
763 | test_pkt_md_access(); | 852 | test_pkt_md_access(); |
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index 3c64f30cf63c..3bacff0d6f91 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c | |||
@@ -2,6 +2,7 @@ | |||
2 | * Testsuite for eBPF verifier | 2 | * Testsuite for eBPF verifier |
3 | * | 3 | * |
4 | * Copyright (c) 2014 PLUMgrid, http://plumgrid.com | 4 | * Copyright (c) 2014 PLUMgrid, http://plumgrid.com |
5 | * Copyright (c) 2017 Facebook | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of version 2 of the GNU General Public | 8 | * modify it under the terms of version 2 of the GNU General Public |
@@ -277,7 +278,7 @@ static struct bpf_test tests[] = { | |||
277 | .insns = { | 278 | .insns = { |
278 | BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), | 279 | BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2), |
279 | }, | 280 | }, |
280 | .errstr = "jump out of range", | 281 | .errstr = "not an exit", |
281 | .result = REJECT, | 282 | .result = REJECT, |
282 | }, | 283 | }, |
283 | { | 284 | { |
@@ -5648,7 +5649,7 @@ static struct bpf_test tests[] = { | |||
5648 | "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", | 5649 | "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", |
5649 | .insns = { | 5650 | .insns = { |
5650 | BPF_MOV64_IMM(BPF_REG_1, 0), | 5651 | BPF_MOV64_IMM(BPF_REG_1, 0), |
5651 | BPF_MOV64_IMM(BPF_REG_2, 0), | 5652 | BPF_MOV64_IMM(BPF_REG_2, 1), |
5652 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), | 5653 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), |
5653 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), | 5654 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), |
5654 | BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), | 5655 | BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), |
@@ -5883,7 +5884,7 @@ static struct bpf_test tests[] = { | |||
5883 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), | 5884 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), |
5884 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), | 5885 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), |
5885 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | 5886 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), |
5886 | BPF_MOV64_IMM(BPF_REG_2, 0), | 5887 | BPF_MOV64_IMM(BPF_REG_2, 1), |
5887 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), | 5888 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), |
5888 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), | 5889 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), |
5889 | BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), | 5890 | BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), |
@@ -8097,6 +8098,1623 @@ static struct bpf_test tests[] = { | |||
8097 | .result = REJECT, | 8098 | .result = REJECT, |
8098 | .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, | 8099 | .prog_type = BPF_PROG_TYPE_CGROUP_SOCK, |
8099 | }, | 8100 | }, |
8101 | { | ||
8102 | "calls: basic sanity", | ||
8103 | .insns = { | ||
8104 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8105 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8106 | BPF_EXIT_INSN(), | ||
8107 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8108 | BPF_EXIT_INSN(), | ||
8109 | }, | ||
8110 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8111 | .result = ACCEPT, | ||
8112 | }, | ||
8113 | { | ||
8114 | "calls: not on unpriviledged", | ||
8115 | .insns = { | ||
8116 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8117 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8118 | BPF_EXIT_INSN(), | ||
8119 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8120 | BPF_EXIT_INSN(), | ||
8121 | }, | ||
8122 | .errstr_unpriv = "function calls to other bpf functions are allowed for root only", | ||
8123 | .result_unpriv = REJECT, | ||
8124 | .result = ACCEPT, | ||
8125 | }, | ||
8126 | { | ||
8127 | "calls: overlapping caller/callee", | ||
8128 | .insns = { | ||
8129 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0), | ||
8130 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8131 | BPF_EXIT_INSN(), | ||
8132 | }, | ||
8133 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8134 | .errstr = "last insn is not an exit or jmp", | ||
8135 | .result = REJECT, | ||
8136 | }, | ||
8137 | { | ||
8138 | "calls: wrong recursive calls", | ||
8139 | .insns = { | ||
8140 | BPF_JMP_IMM(BPF_JA, 0, 0, 4), | ||
8141 | BPF_JMP_IMM(BPF_JA, 0, 0, 4), | ||
8142 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), | ||
8143 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), | ||
8144 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -2), | ||
8145 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8146 | BPF_EXIT_INSN(), | ||
8147 | }, | ||
8148 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8149 | .errstr = "jump out of range", | ||
8150 | .result = REJECT, | ||
8151 | }, | ||
8152 | { | ||
8153 | "calls: wrong src reg", | ||
8154 | .insns = { | ||
8155 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 2, 0, 0), | ||
8156 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8157 | BPF_EXIT_INSN(), | ||
8158 | }, | ||
8159 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8160 | .errstr = "BPF_CALL uses reserved fields", | ||
8161 | .result = REJECT, | ||
8162 | }, | ||
8163 | { | ||
8164 | "calls: wrong off value", | ||
8165 | .insns = { | ||
8166 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, -1, 2), | ||
8167 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8168 | BPF_EXIT_INSN(), | ||
8169 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8170 | BPF_EXIT_INSN(), | ||
8171 | }, | ||
8172 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8173 | .errstr = "BPF_CALL uses reserved fields", | ||
8174 | .result = REJECT, | ||
8175 | }, | ||
8176 | { | ||
8177 | "calls: jump back loop", | ||
8178 | .insns = { | ||
8179 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), | ||
8180 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8181 | BPF_EXIT_INSN(), | ||
8182 | }, | ||
8183 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8184 | .errstr = "back-edge from insn 0 to 0", | ||
8185 | .result = REJECT, | ||
8186 | }, | ||
8187 | { | ||
8188 | "calls: conditional call", | ||
8189 | .insns = { | ||
8190 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8191 | offsetof(struct __sk_buff, mark)), | ||
8192 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), | ||
8193 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8194 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8195 | BPF_EXIT_INSN(), | ||
8196 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8197 | BPF_EXIT_INSN(), | ||
8198 | }, | ||
8199 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8200 | .errstr = "jump out of range", | ||
8201 | .result = REJECT, | ||
8202 | }, | ||
8203 | { | ||
8204 | "calls: conditional call 2", | ||
8205 | .insns = { | ||
8206 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8207 | offsetof(struct __sk_buff, mark)), | ||
8208 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), | ||
8209 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8210 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8211 | BPF_EXIT_INSN(), | ||
8212 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8213 | BPF_EXIT_INSN(), | ||
8214 | BPF_MOV64_IMM(BPF_REG_0, 3), | ||
8215 | BPF_EXIT_INSN(), | ||
8216 | }, | ||
8217 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8218 | .result = ACCEPT, | ||
8219 | }, | ||
8220 | { | ||
8221 | "calls: conditional call 3", | ||
8222 | .insns = { | ||
8223 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8224 | offsetof(struct __sk_buff, mark)), | ||
8225 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), | ||
8226 | BPF_JMP_IMM(BPF_JA, 0, 0, 4), | ||
8227 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8228 | BPF_EXIT_INSN(), | ||
8229 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8230 | BPF_JMP_IMM(BPF_JA, 0, 0, -6), | ||
8231 | BPF_MOV64_IMM(BPF_REG_0, 3), | ||
8232 | BPF_JMP_IMM(BPF_JA, 0, 0, -6), | ||
8233 | }, | ||
8234 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8235 | .errstr = "back-edge from insn", | ||
8236 | .result = REJECT, | ||
8237 | }, | ||
8238 | { | ||
8239 | "calls: conditional call 4", | ||
8240 | .insns = { | ||
8241 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8242 | offsetof(struct __sk_buff, mark)), | ||
8243 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), | ||
8244 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8245 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8246 | BPF_EXIT_INSN(), | ||
8247 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8248 | BPF_JMP_IMM(BPF_JA, 0, 0, -5), | ||
8249 | BPF_MOV64_IMM(BPF_REG_0, 3), | ||
8250 | BPF_EXIT_INSN(), | ||
8251 | }, | ||
8252 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8253 | .result = ACCEPT, | ||
8254 | }, | ||
8255 | { | ||
8256 | "calls: conditional call 5", | ||
8257 | .insns = { | ||
8258 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8259 | offsetof(struct __sk_buff, mark)), | ||
8260 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3), | ||
8261 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8262 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8263 | BPF_EXIT_INSN(), | ||
8264 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8265 | BPF_JMP_IMM(BPF_JA, 0, 0, -6), | ||
8266 | BPF_MOV64_IMM(BPF_REG_0, 3), | ||
8267 | BPF_EXIT_INSN(), | ||
8268 | }, | ||
8269 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8270 | .errstr = "back-edge from insn", | ||
8271 | .result = REJECT, | ||
8272 | }, | ||
8273 | { | ||
8274 | "calls: conditional call 6", | ||
8275 | .insns = { | ||
8276 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8277 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -2), | ||
8278 | BPF_EXIT_INSN(), | ||
8279 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8280 | offsetof(struct __sk_buff, mark)), | ||
8281 | BPF_EXIT_INSN(), | ||
8282 | }, | ||
8283 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8284 | .errstr = "back-edge from insn", | ||
8285 | .result = REJECT, | ||
8286 | }, | ||
8287 | { | ||
8288 | "calls: using r0 returned by callee", | ||
8289 | .insns = { | ||
8290 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8291 | BPF_EXIT_INSN(), | ||
8292 | BPF_MOV64_IMM(BPF_REG_0, 2), | ||
8293 | BPF_EXIT_INSN(), | ||
8294 | }, | ||
8295 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8296 | .result = ACCEPT, | ||
8297 | }, | ||
8298 | { | ||
8299 | "calls: using uninit r0 from callee", | ||
8300 | .insns = { | ||
8301 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8302 | BPF_EXIT_INSN(), | ||
8303 | BPF_EXIT_INSN(), | ||
8304 | }, | ||
8305 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8306 | .errstr = "!read_ok", | ||
8307 | .result = REJECT, | ||
8308 | }, | ||
8309 | { | ||
8310 | "calls: callee is using r1", | ||
8311 | .insns = { | ||
8312 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8313 | BPF_EXIT_INSN(), | ||
8314 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8315 | offsetof(struct __sk_buff, len)), | ||
8316 | BPF_EXIT_INSN(), | ||
8317 | }, | ||
8318 | .prog_type = BPF_PROG_TYPE_SCHED_ACT, | ||
8319 | .result = ACCEPT, | ||
8320 | }, | ||
8321 | { | ||
8322 | "calls: callee using args1", | ||
8323 | .insns = { | ||
8324 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8325 | BPF_EXIT_INSN(), | ||
8326 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), | ||
8327 | BPF_EXIT_INSN(), | ||
8328 | }, | ||
8329 | .errstr_unpriv = "allowed for root only", | ||
8330 | .result_unpriv = REJECT, | ||
8331 | .result = ACCEPT, | ||
8332 | }, | ||
8333 | { | ||
8334 | "calls: callee using wrong args2", | ||
8335 | .insns = { | ||
8336 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8337 | BPF_EXIT_INSN(), | ||
8338 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
8339 | BPF_EXIT_INSN(), | ||
8340 | }, | ||
8341 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8342 | .errstr = "R2 !read_ok", | ||
8343 | .result = REJECT, | ||
8344 | }, | ||
8345 | { | ||
8346 | "calls: callee using two args", | ||
8347 | .insns = { | ||
8348 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8349 | BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_6, | ||
8350 | offsetof(struct __sk_buff, len)), | ||
8351 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_6, | ||
8352 | offsetof(struct __sk_buff, len)), | ||
8353 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8354 | BPF_EXIT_INSN(), | ||
8355 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_1), | ||
8356 | BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2), | ||
8357 | BPF_EXIT_INSN(), | ||
8358 | }, | ||
8359 | .errstr_unpriv = "allowed for root only", | ||
8360 | .result_unpriv = REJECT, | ||
8361 | .result = ACCEPT, | ||
8362 | }, | ||
8363 | { | ||
8364 | "calls: callee changing pkt pointers", | ||
8365 | .insns = { | ||
8366 | BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1, | ||
8367 | offsetof(struct xdp_md, data)), | ||
8368 | BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1, | ||
8369 | offsetof(struct xdp_md, data_end)), | ||
8370 | BPF_MOV64_REG(BPF_REG_8, BPF_REG_6), | ||
8371 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_8, 8), | ||
8372 | BPF_JMP_REG(BPF_JGT, BPF_REG_8, BPF_REG_7, 2), | ||
8373 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8374 | /* clear_all_pkt_pointers() has to walk all frames | ||
8375 | * to make sure that pkt pointers in the caller | ||
8376 | * are cleared when callee is calling a helper that | ||
8377 | * adjusts packet size | ||
8378 | */ | ||
8379 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), | ||
8380 | BPF_MOV32_IMM(BPF_REG_0, 0), | ||
8381 | BPF_EXIT_INSN(), | ||
8382 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
8383 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
8384 | BPF_FUNC_xdp_adjust_head), | ||
8385 | BPF_EXIT_INSN(), | ||
8386 | }, | ||
8387 | .result = REJECT, | ||
8388 | .errstr = "R6 invalid mem access 'inv'", | ||
8389 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8390 | }, | ||
8391 | { | ||
8392 | "calls: two calls with args", | ||
8393 | .insns = { | ||
8394 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8395 | BPF_EXIT_INSN(), | ||
8396 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8397 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), | ||
8398 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
8399 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8400 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8401 | BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), | ||
8402 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
8403 | BPF_EXIT_INSN(), | ||
8404 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8405 | offsetof(struct __sk_buff, len)), | ||
8406 | BPF_EXIT_INSN(), | ||
8407 | }, | ||
8408 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8409 | .result = ACCEPT, | ||
8410 | }, | ||
8411 | { | ||
8412 | "calls: calls with stack arith", | ||
8413 | .insns = { | ||
8414 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8415 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), | ||
8416 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8417 | BPF_EXIT_INSN(), | ||
8418 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), | ||
8419 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8420 | BPF_EXIT_INSN(), | ||
8421 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -64), | ||
8422 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
8423 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), | ||
8424 | BPF_EXIT_INSN(), | ||
8425 | }, | ||
8426 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8427 | .result = ACCEPT, | ||
8428 | }, | ||
8429 | { | ||
8430 | "calls: calls with misaligned stack access", | ||
8431 | .insns = { | ||
8432 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8433 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), | ||
8434 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8435 | BPF_EXIT_INSN(), | ||
8436 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -61), | ||
8437 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8438 | BPF_EXIT_INSN(), | ||
8439 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -63), | ||
8440 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
8441 | BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_0, 0), | ||
8442 | BPF_EXIT_INSN(), | ||
8443 | }, | ||
8444 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8445 | .flags = F_LOAD_WITH_STRICT_ALIGNMENT, | ||
8446 | .errstr = "misaligned stack access", | ||
8447 | .result = REJECT, | ||
8448 | }, | ||
8449 | { | ||
8450 | "calls: calls control flow, jump test", | ||
8451 | .insns = { | ||
8452 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
8453 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
8454 | BPF_MOV64_IMM(BPF_REG_0, 43), | ||
8455 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
8456 | BPF_JMP_IMM(BPF_JA, 0, 0, -3), | ||
8457 | BPF_EXIT_INSN(), | ||
8458 | }, | ||
8459 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8460 | .result = ACCEPT, | ||
8461 | }, | ||
8462 | { | ||
8463 | "calls: calls control flow, jump test 2", | ||
8464 | .insns = { | ||
8465 | BPF_MOV64_IMM(BPF_REG_0, 42), | ||
8466 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
8467 | BPF_MOV64_IMM(BPF_REG_0, 43), | ||
8468 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
8469 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), | ||
8470 | BPF_EXIT_INSN(), | ||
8471 | }, | ||
8472 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8473 | .errstr = "jump out of range from insn 1 to 4", | ||
8474 | .result = REJECT, | ||
8475 | }, | ||
8476 | { | ||
8477 | "calls: two calls with bad jump", | ||
8478 | .insns = { | ||
8479 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8480 | BPF_EXIT_INSN(), | ||
8481 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8482 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), | ||
8483 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
8484 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8485 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8486 | BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), | ||
8487 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
8488 | BPF_EXIT_INSN(), | ||
8489 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8490 | offsetof(struct __sk_buff, len)), | ||
8491 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, -3), | ||
8492 | BPF_EXIT_INSN(), | ||
8493 | }, | ||
8494 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8495 | .errstr = "jump out of range from insn 11 to 9", | ||
8496 | .result = REJECT, | ||
8497 | }, | ||
8498 | { | ||
8499 | "calls: recursive call. test1", | ||
8500 | .insns = { | ||
8501 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8502 | BPF_EXIT_INSN(), | ||
8503 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -1), | ||
8504 | BPF_EXIT_INSN(), | ||
8505 | }, | ||
8506 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8507 | .errstr = "back-edge", | ||
8508 | .result = REJECT, | ||
8509 | }, | ||
8510 | { | ||
8511 | "calls: recursive call. test2", | ||
8512 | .insns = { | ||
8513 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8514 | BPF_EXIT_INSN(), | ||
8515 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -3), | ||
8516 | BPF_EXIT_INSN(), | ||
8517 | }, | ||
8518 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8519 | .errstr = "back-edge", | ||
8520 | .result = REJECT, | ||
8521 | }, | ||
8522 | { | ||
8523 | "calls: unreachable code", | ||
8524 | .insns = { | ||
8525 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8526 | BPF_EXIT_INSN(), | ||
8527 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8528 | BPF_EXIT_INSN(), | ||
8529 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8530 | BPF_EXIT_INSN(), | ||
8531 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8532 | BPF_EXIT_INSN(), | ||
8533 | }, | ||
8534 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8535 | .errstr = "unreachable insn 6", | ||
8536 | .result = REJECT, | ||
8537 | }, | ||
8538 | { | ||
8539 | "calls: invalid call", | ||
8540 | .insns = { | ||
8541 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8542 | BPF_EXIT_INSN(), | ||
8543 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -4), | ||
8544 | BPF_EXIT_INSN(), | ||
8545 | }, | ||
8546 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8547 | .errstr = "invalid destination", | ||
8548 | .result = REJECT, | ||
8549 | }, | ||
8550 | { | ||
8551 | "calls: invalid call 2", | ||
8552 | .insns = { | ||
8553 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8554 | BPF_EXIT_INSN(), | ||
8555 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 0x7fffffff), | ||
8556 | BPF_EXIT_INSN(), | ||
8557 | }, | ||
8558 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8559 | .errstr = "invalid destination", | ||
8560 | .result = REJECT, | ||
8561 | }, | ||
8562 | { | ||
8563 | "calls: jumping across function bodies. test1", | ||
8564 | .insns = { | ||
8565 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8566 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8567 | BPF_EXIT_INSN(), | ||
8568 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3), | ||
8569 | BPF_EXIT_INSN(), | ||
8570 | }, | ||
8571 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8572 | .errstr = "jump out of range", | ||
8573 | .result = REJECT, | ||
8574 | }, | ||
8575 | { | ||
8576 | "calls: jumping across function bodies. test2", | ||
8577 | .insns = { | ||
8578 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 3), | ||
8579 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8580 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8581 | BPF_EXIT_INSN(), | ||
8582 | BPF_EXIT_INSN(), | ||
8583 | }, | ||
8584 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8585 | .errstr = "jump out of range", | ||
8586 | .result = REJECT, | ||
8587 | }, | ||
8588 | { | ||
8589 | "calls: call without exit", | ||
8590 | .insns = { | ||
8591 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8592 | BPF_EXIT_INSN(), | ||
8593 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8594 | BPF_EXIT_INSN(), | ||
8595 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8596 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -2), | ||
8597 | }, | ||
8598 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8599 | .errstr = "not an exit", | ||
8600 | .result = REJECT, | ||
8601 | }, | ||
8602 | { | ||
8603 | "calls: call into middle of ld_imm64", | ||
8604 | .insns = { | ||
8605 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8606 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8607 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8608 | BPF_EXIT_INSN(), | ||
8609 | BPF_LD_IMM64(BPF_REG_0, 0), | ||
8610 | BPF_EXIT_INSN(), | ||
8611 | }, | ||
8612 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8613 | .errstr = "last insn", | ||
8614 | .result = REJECT, | ||
8615 | }, | ||
8616 | { | ||
8617 | "calls: call into middle of other call", | ||
8618 | .insns = { | ||
8619 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8620 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8621 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8622 | BPF_EXIT_INSN(), | ||
8623 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8624 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8625 | BPF_EXIT_INSN(), | ||
8626 | }, | ||
8627 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8628 | .errstr = "last insn", | ||
8629 | .result = REJECT, | ||
8630 | }, | ||
8631 | { | ||
8632 | "calls: ld_abs with changing ctx data in callee", | ||
8633 | .insns = { | ||
8634 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8635 | BPF_LD_ABS(BPF_B, 0), | ||
8636 | BPF_LD_ABS(BPF_H, 0), | ||
8637 | BPF_LD_ABS(BPF_W, 0), | ||
8638 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_6), | ||
8639 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), | ||
8640 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_7), | ||
8641 | BPF_LD_ABS(BPF_B, 0), | ||
8642 | BPF_LD_ABS(BPF_H, 0), | ||
8643 | BPF_LD_ABS(BPF_W, 0), | ||
8644 | BPF_EXIT_INSN(), | ||
8645 | BPF_MOV64_IMM(BPF_REG_2, 1), | ||
8646 | BPF_MOV64_IMM(BPF_REG_3, 2), | ||
8647 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
8648 | BPF_FUNC_skb_vlan_push), | ||
8649 | BPF_EXIT_INSN(), | ||
8650 | }, | ||
8651 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
8652 | .errstr = "BPF_LD_[ABS|IND] instructions cannot be mixed", | ||
8653 | .result = REJECT, | ||
8654 | }, | ||
8655 | { | ||
8656 | "calls: two calls with bad fallthrough", | ||
8657 | .insns = { | ||
8658 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8659 | BPF_EXIT_INSN(), | ||
8660 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8661 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), | ||
8662 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
8663 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8664 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8665 | BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), | ||
8666 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
8667 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_0), | ||
8668 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, | ||
8669 | offsetof(struct __sk_buff, len)), | ||
8670 | BPF_EXIT_INSN(), | ||
8671 | }, | ||
8672 | .prog_type = BPF_PROG_TYPE_TRACEPOINT, | ||
8673 | .errstr = "not an exit", | ||
8674 | .result = REJECT, | ||
8675 | }, | ||
8676 | { | ||
8677 | "calls: two calls with stack read", | ||
8678 | .insns = { | ||
8679 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8680 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8681 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8682 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8683 | BPF_EXIT_INSN(), | ||
8684 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8685 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 6), | ||
8686 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_0), | ||
8687 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8688 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8689 | BPF_ALU64_REG(BPF_ADD, BPF_REG_7, BPF_REG_0), | ||
8690 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_7), | ||
8691 | BPF_EXIT_INSN(), | ||
8692 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), | ||
8693 | BPF_EXIT_INSN(), | ||
8694 | }, | ||
8695 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8696 | .result = ACCEPT, | ||
8697 | }, | ||
8698 | { | ||
8699 | "calls: two calls with stack write", | ||
8700 | .insns = { | ||
8701 | /* main prog */ | ||
8702 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8703 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8704 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8705 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8706 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8707 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8708 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), | ||
8709 | BPF_EXIT_INSN(), | ||
8710 | |||
8711 | /* subprog 1 */ | ||
8712 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8713 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
8714 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 7), | ||
8715 | BPF_MOV64_REG(BPF_REG_8, BPF_REG_0), | ||
8716 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8717 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8718 | BPF_ALU64_REG(BPF_ADD, BPF_REG_8, BPF_REG_0), | ||
8719 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_8), | ||
8720 | /* write into stack frame of main prog */ | ||
8721 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
8722 | BPF_EXIT_INSN(), | ||
8723 | |||
8724 | /* subprog 2 */ | ||
8725 | /* read from stack frame of main prog */ | ||
8726 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0), | ||
8727 | BPF_EXIT_INSN(), | ||
8728 | }, | ||
8729 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8730 | .result = ACCEPT, | ||
8731 | }, | ||
8732 | { | ||
8733 | "calls: spill into caller stack frame", | ||
8734 | .insns = { | ||
8735 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8736 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8737 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8738 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8739 | BPF_EXIT_INSN(), | ||
8740 | BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_1, 0), | ||
8741 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8742 | BPF_EXIT_INSN(), | ||
8743 | }, | ||
8744 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8745 | .errstr = "cannot spill", | ||
8746 | .result = REJECT, | ||
8747 | }, | ||
8748 | { | ||
8749 | "calls: write into caller stack frame", | ||
8750 | .insns = { | ||
8751 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8752 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8753 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8754 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8755 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), | ||
8756 | BPF_EXIT_INSN(), | ||
8757 | BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), | ||
8758 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8759 | BPF_EXIT_INSN(), | ||
8760 | }, | ||
8761 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8762 | .result = ACCEPT, | ||
8763 | }, | ||
8764 | { | ||
8765 | "calls: write into callee stack frame", | ||
8766 | .insns = { | ||
8767 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8768 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 42), | ||
8769 | BPF_EXIT_INSN(), | ||
8770 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_10), | ||
8771 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, -8), | ||
8772 | BPF_EXIT_INSN(), | ||
8773 | }, | ||
8774 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8775 | .errstr = "cannot return stack pointer", | ||
8776 | .result = REJECT, | ||
8777 | }, | ||
8778 | { | ||
8779 | "calls: two calls with stack write and void return", | ||
8780 | .insns = { | ||
8781 | /* main prog */ | ||
8782 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8783 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8784 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8785 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8786 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8787 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8788 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), | ||
8789 | BPF_EXIT_INSN(), | ||
8790 | |||
8791 | /* subprog 1 */ | ||
8792 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8793 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
8794 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8795 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), | ||
8796 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8797 | BPF_EXIT_INSN(), | ||
8798 | |||
8799 | /* subprog 2 */ | ||
8800 | /* write into stack frame of main prog */ | ||
8801 | BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 0), | ||
8802 | BPF_EXIT_INSN(), /* void return */ | ||
8803 | }, | ||
8804 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8805 | .result = ACCEPT, | ||
8806 | }, | ||
8807 | { | ||
8808 | "calls: ambiguous return value", | ||
8809 | .insns = { | ||
8810 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8811 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 5), | ||
8812 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
8813 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
8814 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8815 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_0), | ||
8816 | BPF_EXIT_INSN(), | ||
8817 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 1), | ||
8818 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8819 | BPF_EXIT_INSN(), | ||
8820 | }, | ||
8821 | .errstr_unpriv = "allowed for root only", | ||
8822 | .result_unpriv = REJECT, | ||
8823 | .errstr = "R0 !read_ok", | ||
8824 | .result = REJECT, | ||
8825 | }, | ||
8826 | { | ||
8827 | "calls: two calls that return map_value", | ||
8828 | .insns = { | ||
8829 | /* main prog */ | ||
8830 | /* pass fp-16, fp-8 into a function */ | ||
8831 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8832 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8833 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8834 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8835 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 8), | ||
8836 | |||
8837 | /* fetch map_value_ptr from the stack of this function */ | ||
8838 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), | ||
8839 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
8840 | /* write into map value */ | ||
8841 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8842 | /* fetch secound map_value_ptr from the stack */ | ||
8843 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -16), | ||
8844 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
8845 | /* write into map value */ | ||
8846 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8847 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8848 | BPF_EXIT_INSN(), | ||
8849 | |||
8850 | /* subprog 1 */ | ||
8851 | /* call 3rd function twice */ | ||
8852 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8853 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
8854 | /* first time with fp-8 */ | ||
8855 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
8856 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), | ||
8857 | /* second time with fp-16 */ | ||
8858 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
8859 | BPF_EXIT_INSN(), | ||
8860 | |||
8861 | /* subprog 2 */ | ||
8862 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8863 | /* lookup from map */ | ||
8864 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8865 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8866 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
8867 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
8868 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
8869 | BPF_FUNC_map_lookup_elem), | ||
8870 | /* write map_value_ptr into stack frame of main prog */ | ||
8871 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
8872 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8873 | BPF_EXIT_INSN(), /* return 0 */ | ||
8874 | }, | ||
8875 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8876 | .fixup_map1 = { 23 }, | ||
8877 | .result = ACCEPT, | ||
8878 | }, | ||
8879 | { | ||
8880 | "calls: two calls that return map_value with bool condition", | ||
8881 | .insns = { | ||
8882 | /* main prog */ | ||
8883 | /* pass fp-16, fp-8 into a function */ | ||
8884 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8885 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8886 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8887 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8888 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8889 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8890 | BPF_EXIT_INSN(), | ||
8891 | |||
8892 | /* subprog 1 */ | ||
8893 | /* call 3rd function twice */ | ||
8894 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8895 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
8896 | /* first time with fp-8 */ | ||
8897 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), | ||
8898 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), | ||
8899 | /* fetch map_value_ptr from the stack of this function */ | ||
8900 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), | ||
8901 | /* write into map value */ | ||
8902 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8903 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), | ||
8904 | /* second time with fp-16 */ | ||
8905 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8906 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), | ||
8907 | /* fetch secound map_value_ptr from the stack */ | ||
8908 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), | ||
8909 | /* write into map value */ | ||
8910 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8911 | BPF_EXIT_INSN(), | ||
8912 | |||
8913 | /* subprog 2 */ | ||
8914 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8915 | /* lookup from map */ | ||
8916 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8917 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8918 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
8919 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
8920 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
8921 | BPF_FUNC_map_lookup_elem), | ||
8922 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
8923 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8924 | BPF_EXIT_INSN(), /* return 0 */ | ||
8925 | /* write map_value_ptr into stack frame of main prog */ | ||
8926 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
8927 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8928 | BPF_EXIT_INSN(), /* return 1 */ | ||
8929 | }, | ||
8930 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8931 | .fixup_map1 = { 23 }, | ||
8932 | .result = ACCEPT, | ||
8933 | }, | ||
8934 | { | ||
8935 | "calls: two calls that return map_value with incorrect bool check", | ||
8936 | .insns = { | ||
8937 | /* main prog */ | ||
8938 | /* pass fp-16, fp-8 into a function */ | ||
8939 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8940 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8941 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8942 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8943 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
8944 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8945 | BPF_EXIT_INSN(), | ||
8946 | |||
8947 | /* subprog 1 */ | ||
8948 | /* call 3rd function twice */ | ||
8949 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8950 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
8951 | /* first time with fp-8 */ | ||
8952 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 9), | ||
8953 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 1, 2), | ||
8954 | /* fetch map_value_ptr from the stack of this function */ | ||
8955 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_6, 0), | ||
8956 | /* write into map value */ | ||
8957 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8958 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_7), | ||
8959 | /* second time with fp-16 */ | ||
8960 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
8961 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
8962 | /* fetch secound map_value_ptr from the stack */ | ||
8963 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_7, 0), | ||
8964 | /* write into map value */ | ||
8965 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
8966 | BPF_EXIT_INSN(), | ||
8967 | |||
8968 | /* subprog 2 */ | ||
8969 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
8970 | /* lookup from map */ | ||
8971 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
8972 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8973 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
8974 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
8975 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
8976 | BPF_FUNC_map_lookup_elem), | ||
8977 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
8978 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
8979 | BPF_EXIT_INSN(), /* return 0 */ | ||
8980 | /* write map_value_ptr into stack frame of main prog */ | ||
8981 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
8982 | BPF_MOV64_IMM(BPF_REG_0, 1), | ||
8983 | BPF_EXIT_INSN(), /* return 1 */ | ||
8984 | }, | ||
8985 | .prog_type = BPF_PROG_TYPE_XDP, | ||
8986 | .fixup_map1 = { 23 }, | ||
8987 | .result = REJECT, | ||
8988 | .errstr = "invalid read from stack off -16+0 size 8", | ||
8989 | }, | ||
8990 | { | ||
8991 | "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test1", | ||
8992 | .insns = { | ||
8993 | /* main prog */ | ||
8994 | /* pass fp-16, fp-8 into a function */ | ||
8995 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
8996 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
8997 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
8998 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
8999 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
9000 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9001 | BPF_EXIT_INSN(), | ||
9002 | |||
9003 | /* subprog 1 */ | ||
9004 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
9005 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
9006 | /* 1st lookup from map */ | ||
9007 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9008 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9009 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9010 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9011 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9012 | BPF_FUNC_map_lookup_elem), | ||
9013 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9014 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
9015 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9016 | /* write map_value_ptr into stack frame of main prog at fp-8 */ | ||
9017 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9018 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
9019 | |||
9020 | /* 2nd lookup from map */ | ||
9021 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ | ||
9022 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9023 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9024 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ | ||
9025 | BPF_FUNC_map_lookup_elem), | ||
9026 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9027 | BPF_MOV64_IMM(BPF_REG_9, 0), | ||
9028 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9029 | /* write map_value_ptr into stack frame of main prog at fp-16 */ | ||
9030 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
9031 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
9032 | |||
9033 | /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ | ||
9034 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ | ||
9035 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), | ||
9036 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), | ||
9037 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), | ||
9038 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ | ||
9039 | BPF_EXIT_INSN(), | ||
9040 | |||
9041 | /* subprog 2 */ | ||
9042 | /* if arg2 == 1 do *arg1 = 0 */ | ||
9043 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), | ||
9044 | /* fetch map_value_ptr from the stack of this function */ | ||
9045 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), | ||
9046 | /* write into map value */ | ||
9047 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9048 | |||
9049 | /* if arg4 == 1 do *arg3 = 0 */ | ||
9050 | BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), | ||
9051 | /* fetch map_value_ptr from the stack of this function */ | ||
9052 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), | ||
9053 | /* write into map value */ | ||
9054 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), | ||
9055 | BPF_EXIT_INSN(), | ||
9056 | }, | ||
9057 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9058 | .fixup_map1 = { 12, 22 }, | ||
9059 | .result = REJECT, | ||
9060 | .errstr = "invalid access to map value, value_size=8 off=2 size=8", | ||
9061 | }, | ||
9062 | { | ||
9063 | "calls: two calls that receive map_value via arg=ptr_stack_of_caller. test2", | ||
9064 | .insns = { | ||
9065 | /* main prog */ | ||
9066 | /* pass fp-16, fp-8 into a function */ | ||
9067 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
9068 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
9069 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9070 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
9071 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
9072 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9073 | BPF_EXIT_INSN(), | ||
9074 | |||
9075 | /* subprog 1 */ | ||
9076 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
9077 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
9078 | /* 1st lookup from map */ | ||
9079 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9080 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9081 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9082 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9083 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9084 | BPF_FUNC_map_lookup_elem), | ||
9085 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9086 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
9087 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9088 | /* write map_value_ptr into stack frame of main prog at fp-8 */ | ||
9089 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9090 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
9091 | |||
9092 | /* 2nd lookup from map */ | ||
9093 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), /* 20 */ | ||
9094 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9095 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9096 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, /* 24 */ | ||
9097 | BPF_FUNC_map_lookup_elem), | ||
9098 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9099 | BPF_MOV64_IMM(BPF_REG_9, 0), | ||
9100 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9101 | /* write map_value_ptr into stack frame of main prog at fp-16 */ | ||
9102 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
9103 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
9104 | |||
9105 | /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ | ||
9106 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), /* 30 */ | ||
9107 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), | ||
9108 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), | ||
9109 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), | ||
9110 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), /* 34 */ | ||
9111 | BPF_EXIT_INSN(), | ||
9112 | |||
9113 | /* subprog 2 */ | ||
9114 | /* if arg2 == 1 do *arg1 = 0 */ | ||
9115 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), | ||
9116 | /* fetch map_value_ptr from the stack of this function */ | ||
9117 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), | ||
9118 | /* write into map value */ | ||
9119 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9120 | |||
9121 | /* if arg4 == 1 do *arg3 = 0 */ | ||
9122 | BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), | ||
9123 | /* fetch map_value_ptr from the stack of this function */ | ||
9124 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), | ||
9125 | /* write into map value */ | ||
9126 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9127 | BPF_EXIT_INSN(), | ||
9128 | }, | ||
9129 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9130 | .fixup_map1 = { 12, 22 }, | ||
9131 | .result = ACCEPT, | ||
9132 | }, | ||
9133 | { | ||
9134 | "calls: two jumps that receive map_value via arg=ptr_stack_of_jumper. test3", | ||
9135 | .insns = { | ||
9136 | /* main prog */ | ||
9137 | /* pass fp-16, fp-8 into a function */ | ||
9138 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
9139 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
9140 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9141 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
9142 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 2), | ||
9143 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9144 | BPF_EXIT_INSN(), | ||
9145 | |||
9146 | /* subprog 1 */ | ||
9147 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
9148 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
9149 | /* 1st lookup from map */ | ||
9150 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -24, 0), | ||
9151 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9152 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), | ||
9153 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9154 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9155 | BPF_FUNC_map_lookup_elem), | ||
9156 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9157 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
9158 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9159 | /* write map_value_ptr into stack frame of main prog at fp-8 */ | ||
9160 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9161 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
9162 | |||
9163 | /* 2nd lookup from map */ | ||
9164 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9165 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -24), | ||
9166 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9167 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9168 | BPF_FUNC_map_lookup_elem), | ||
9169 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9170 | BPF_MOV64_IMM(BPF_REG_9, 0), // 26 | ||
9171 | BPF_JMP_IMM(BPF_JA, 0, 0, 2), | ||
9172 | /* write map_value_ptr into stack frame of main prog at fp-16 */ | ||
9173 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
9174 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
9175 | |||
9176 | /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ | ||
9177 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), // 30 | ||
9178 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), | ||
9179 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), | ||
9180 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), | ||
9181 | BPF_JMP_IMM(BPF_JNE, BPF_REG_1, 0, 1), // 34 | ||
9182 | BPF_JMP_IMM(BPF_JA, 0, 0, -30), | ||
9183 | |||
9184 | /* subprog 2 */ | ||
9185 | /* if arg2 == 1 do *arg1 = 0 */ | ||
9186 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), | ||
9187 | /* fetch map_value_ptr from the stack of this function */ | ||
9188 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), | ||
9189 | /* write into map value */ | ||
9190 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9191 | |||
9192 | /* if arg4 == 1 do *arg3 = 0 */ | ||
9193 | BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), | ||
9194 | /* fetch map_value_ptr from the stack of this function */ | ||
9195 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), | ||
9196 | /* write into map value */ | ||
9197 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 2, 0), | ||
9198 | BPF_JMP_IMM(BPF_JA, 0, 0, -8), | ||
9199 | }, | ||
9200 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9201 | .fixup_map1 = { 12, 22 }, | ||
9202 | .result = REJECT, | ||
9203 | .errstr = "invalid access to map value, value_size=8 off=2 size=8", | ||
9204 | }, | ||
9205 | { | ||
9206 | "calls: two calls that receive map_value_ptr_or_null via arg. test1", | ||
9207 | .insns = { | ||
9208 | /* main prog */ | ||
9209 | /* pass fp-16, fp-8 into a function */ | ||
9210 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
9211 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
9212 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9213 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
9214 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
9215 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9216 | BPF_EXIT_INSN(), | ||
9217 | |||
9218 | /* subprog 1 */ | ||
9219 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
9220 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
9221 | /* 1st lookup from map */ | ||
9222 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9223 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9224 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9225 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9226 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9227 | BPF_FUNC_map_lookup_elem), | ||
9228 | /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ | ||
9229 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9230 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9231 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
9232 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
9233 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
9234 | |||
9235 | /* 2nd lookup from map */ | ||
9236 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9237 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9238 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9239 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9240 | BPF_FUNC_map_lookup_elem), | ||
9241 | /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ | ||
9242 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
9243 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9244 | BPF_MOV64_IMM(BPF_REG_9, 0), | ||
9245 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
9246 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
9247 | |||
9248 | /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ | ||
9249 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
9250 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), | ||
9251 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), | ||
9252 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), | ||
9253 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
9254 | BPF_EXIT_INSN(), | ||
9255 | |||
9256 | /* subprog 2 */ | ||
9257 | /* if arg2 == 1 do *arg1 = 0 */ | ||
9258 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), | ||
9259 | /* fetch map_value_ptr from the stack of this function */ | ||
9260 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), | ||
9261 | /* write into map value */ | ||
9262 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9263 | |||
9264 | /* if arg4 == 1 do *arg3 = 0 */ | ||
9265 | BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 1, 2), | ||
9266 | /* fetch map_value_ptr from the stack of this function */ | ||
9267 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), | ||
9268 | /* write into map value */ | ||
9269 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9270 | BPF_EXIT_INSN(), | ||
9271 | }, | ||
9272 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9273 | .fixup_map1 = { 12, 22 }, | ||
9274 | .result = ACCEPT, | ||
9275 | }, | ||
9276 | { | ||
9277 | "calls: two calls that receive map_value_ptr_or_null via arg. test2", | ||
9278 | .insns = { | ||
9279 | /* main prog */ | ||
9280 | /* pass fp-16, fp-8 into a function */ | ||
9281 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), | ||
9282 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), | ||
9283 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9284 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -16), | ||
9285 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 2), | ||
9286 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9287 | BPF_EXIT_INSN(), | ||
9288 | |||
9289 | /* subprog 1 */ | ||
9290 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_1), | ||
9291 | BPF_MOV64_REG(BPF_REG_7, BPF_REG_2), | ||
9292 | /* 1st lookup from map */ | ||
9293 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9294 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9295 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9296 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9297 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9298 | BPF_FUNC_map_lookup_elem), | ||
9299 | /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ | ||
9300 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9301 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9302 | BPF_MOV64_IMM(BPF_REG_8, 0), | ||
9303 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
9304 | BPF_MOV64_IMM(BPF_REG_8, 1), | ||
9305 | |||
9306 | /* 2nd lookup from map */ | ||
9307 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9308 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9309 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9310 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9311 | BPF_FUNC_map_lookup_elem), | ||
9312 | /* write map_value_ptr_or_null into stack frame of main prog at fp-16 */ | ||
9313 | BPF_STX_MEM(BPF_DW, BPF_REG_7, BPF_REG_0, 0), | ||
9314 | BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2), | ||
9315 | BPF_MOV64_IMM(BPF_REG_9, 0), | ||
9316 | BPF_JMP_IMM(BPF_JA, 0, 0, 1), | ||
9317 | BPF_MOV64_IMM(BPF_REG_9, 1), | ||
9318 | |||
9319 | /* call 3rd func with fp-8, 0|1, fp-16, 0|1 */ | ||
9320 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_6), | ||
9321 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_8), | ||
9322 | BPF_MOV64_REG(BPF_REG_3, BPF_REG_7), | ||
9323 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_9), | ||
9324 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
9325 | BPF_EXIT_INSN(), | ||
9326 | |||
9327 | /* subprog 2 */ | ||
9328 | /* if arg2 == 1 do *arg1 = 0 */ | ||
9329 | BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 1, 2), | ||
9330 | /* fetch map_value_ptr from the stack of this function */ | ||
9331 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, 0), | ||
9332 | /* write into map value */ | ||
9333 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9334 | |||
9335 | /* if arg4 == 0 do *arg3 = 0 */ | ||
9336 | BPF_JMP_IMM(BPF_JNE, BPF_REG_4, 0, 2), | ||
9337 | /* fetch map_value_ptr from the stack of this function */ | ||
9338 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_3, 0), | ||
9339 | /* write into map value */ | ||
9340 | BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, 0), | ||
9341 | BPF_EXIT_INSN(), | ||
9342 | }, | ||
9343 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9344 | .fixup_map1 = { 12, 22 }, | ||
9345 | .result = REJECT, | ||
9346 | .errstr = "R0 invalid mem access 'inv'", | ||
9347 | }, | ||
9348 | { | ||
9349 | "calls: pkt_ptr spill into caller stack", | ||
9350 | .insns = { | ||
9351 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9352 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9353 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1), | ||
9354 | BPF_EXIT_INSN(), | ||
9355 | |||
9356 | /* subprog 1 */ | ||
9357 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9358 | offsetof(struct __sk_buff, data)), | ||
9359 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9360 | offsetof(struct __sk_buff, data_end)), | ||
9361 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9362 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9363 | /* spill unchecked pkt_ptr into stack of caller */ | ||
9364 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9365 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), | ||
9366 | /* now the pkt range is verified, read pkt_ptr from stack */ | ||
9367 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), | ||
9368 | /* write 4 bytes into packet */ | ||
9369 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9370 | BPF_EXIT_INSN(), | ||
9371 | }, | ||
9372 | .result = ACCEPT, | ||
9373 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9374 | }, | ||
9375 | { | ||
9376 | "calls: pkt_ptr spill into caller stack 2", | ||
9377 | .insns = { | ||
9378 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9379 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9380 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9381 | /* Marking is still kept, but not in all cases safe. */ | ||
9382 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9383 | BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), | ||
9384 | BPF_EXIT_INSN(), | ||
9385 | |||
9386 | /* subprog 1 */ | ||
9387 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9388 | offsetof(struct __sk_buff, data)), | ||
9389 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9390 | offsetof(struct __sk_buff, data_end)), | ||
9391 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9392 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9393 | /* spill unchecked pkt_ptr into stack of caller */ | ||
9394 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9395 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), | ||
9396 | /* now the pkt range is verified, read pkt_ptr from stack */ | ||
9397 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), | ||
9398 | /* write 4 bytes into packet */ | ||
9399 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9400 | BPF_EXIT_INSN(), | ||
9401 | }, | ||
9402 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9403 | .errstr = "invalid access to packet", | ||
9404 | .result = REJECT, | ||
9405 | }, | ||
9406 | { | ||
9407 | "calls: pkt_ptr spill into caller stack 3", | ||
9408 | .insns = { | ||
9409 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9410 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9411 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
9412 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), | ||
9413 | /* Marking is still kept and safe here. */ | ||
9414 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9415 | BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), | ||
9416 | BPF_EXIT_INSN(), | ||
9417 | |||
9418 | /* subprog 1 */ | ||
9419 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9420 | offsetof(struct __sk_buff, data)), | ||
9421 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9422 | offsetof(struct __sk_buff, data_end)), | ||
9423 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9424 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9425 | /* spill unchecked pkt_ptr into stack of caller */ | ||
9426 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9427 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9428 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), | ||
9429 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9430 | /* now the pkt range is verified, read pkt_ptr from stack */ | ||
9431 | BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_4, 0), | ||
9432 | /* write 4 bytes into packet */ | ||
9433 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9434 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9435 | BPF_EXIT_INSN(), | ||
9436 | }, | ||
9437 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9438 | .result = ACCEPT, | ||
9439 | }, | ||
9440 | { | ||
9441 | "calls: pkt_ptr spill into caller stack 4", | ||
9442 | .insns = { | ||
9443 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9444 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9445 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
9446 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), | ||
9447 | /* Check marking propagated. */ | ||
9448 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9449 | BPF_ST_MEM(BPF_W, BPF_REG_4, 0, 0), | ||
9450 | BPF_EXIT_INSN(), | ||
9451 | |||
9452 | /* subprog 1 */ | ||
9453 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9454 | offsetof(struct __sk_buff, data)), | ||
9455 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9456 | offsetof(struct __sk_buff, data_end)), | ||
9457 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9458 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9459 | /* spill unchecked pkt_ptr into stack of caller */ | ||
9460 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9461 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9462 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), | ||
9463 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9464 | /* don't read back pkt_ptr from stack here */ | ||
9465 | /* write 4 bytes into packet */ | ||
9466 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9467 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9468 | BPF_EXIT_INSN(), | ||
9469 | }, | ||
9470 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9471 | .result = ACCEPT, | ||
9472 | }, | ||
9473 | { | ||
9474 | "calls: pkt_ptr spill into caller stack 5", | ||
9475 | .insns = { | ||
9476 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9477 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9478 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_1, 0), | ||
9479 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9480 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9481 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), | ||
9482 | BPF_EXIT_INSN(), | ||
9483 | |||
9484 | /* subprog 1 */ | ||
9485 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9486 | offsetof(struct __sk_buff, data)), | ||
9487 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9488 | offsetof(struct __sk_buff, data_end)), | ||
9489 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9490 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9491 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9492 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), | ||
9493 | /* spill checked pkt_ptr into stack of caller */ | ||
9494 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9495 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9496 | /* don't read back pkt_ptr from stack here */ | ||
9497 | /* write 4 bytes into packet */ | ||
9498 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9499 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9500 | BPF_EXIT_INSN(), | ||
9501 | }, | ||
9502 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9503 | .errstr = "same insn cannot be used with different", | ||
9504 | .result = REJECT, | ||
9505 | }, | ||
9506 | { | ||
9507 | "calls: pkt_ptr spill into caller stack 6", | ||
9508 | .insns = { | ||
9509 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9510 | offsetof(struct __sk_buff, data_end)), | ||
9511 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9512 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9513 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9514 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9515 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9516 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), | ||
9517 | BPF_EXIT_INSN(), | ||
9518 | |||
9519 | /* subprog 1 */ | ||
9520 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9521 | offsetof(struct __sk_buff, data)), | ||
9522 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9523 | offsetof(struct __sk_buff, data_end)), | ||
9524 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9525 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9526 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9527 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), | ||
9528 | /* spill checked pkt_ptr into stack of caller */ | ||
9529 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9530 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9531 | /* don't read back pkt_ptr from stack here */ | ||
9532 | /* write 4 bytes into packet */ | ||
9533 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9534 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9535 | BPF_EXIT_INSN(), | ||
9536 | }, | ||
9537 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9538 | .errstr = "R4 invalid mem access", | ||
9539 | .result = REJECT, | ||
9540 | }, | ||
9541 | { | ||
9542 | "calls: pkt_ptr spill into caller stack 7", | ||
9543 | .insns = { | ||
9544 | BPF_MOV64_IMM(BPF_REG_2, 0), | ||
9545 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9546 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9547 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9548 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9549 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9550 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), | ||
9551 | BPF_EXIT_INSN(), | ||
9552 | |||
9553 | /* subprog 1 */ | ||
9554 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9555 | offsetof(struct __sk_buff, data)), | ||
9556 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9557 | offsetof(struct __sk_buff, data_end)), | ||
9558 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9559 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9560 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9561 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), | ||
9562 | /* spill checked pkt_ptr into stack of caller */ | ||
9563 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9564 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9565 | /* don't read back pkt_ptr from stack here */ | ||
9566 | /* write 4 bytes into packet */ | ||
9567 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9568 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9569 | BPF_EXIT_INSN(), | ||
9570 | }, | ||
9571 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9572 | .errstr = "R4 invalid mem access", | ||
9573 | .result = REJECT, | ||
9574 | }, | ||
9575 | { | ||
9576 | "calls: pkt_ptr spill into caller stack 8", | ||
9577 | .insns = { | ||
9578 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9579 | offsetof(struct __sk_buff, data)), | ||
9580 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9581 | offsetof(struct __sk_buff, data_end)), | ||
9582 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9583 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9584 | BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), | ||
9585 | BPF_EXIT_INSN(), | ||
9586 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9587 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9588 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9589 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9590 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9591 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), | ||
9592 | BPF_EXIT_INSN(), | ||
9593 | |||
9594 | /* subprog 1 */ | ||
9595 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9596 | offsetof(struct __sk_buff, data)), | ||
9597 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9598 | offsetof(struct __sk_buff, data_end)), | ||
9599 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9600 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9601 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9602 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 3), | ||
9603 | /* spill checked pkt_ptr into stack of caller */ | ||
9604 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9605 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9606 | /* don't read back pkt_ptr from stack here */ | ||
9607 | /* write 4 bytes into packet */ | ||
9608 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9609 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9610 | BPF_EXIT_INSN(), | ||
9611 | }, | ||
9612 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9613 | .result = ACCEPT, | ||
9614 | }, | ||
9615 | { | ||
9616 | "calls: pkt_ptr spill into caller stack 9", | ||
9617 | .insns = { | ||
9618 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9619 | offsetof(struct __sk_buff, data)), | ||
9620 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9621 | offsetof(struct __sk_buff, data_end)), | ||
9622 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9623 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9624 | BPF_JMP_REG(BPF_JLE, BPF_REG_0, BPF_REG_3, 1), | ||
9625 | BPF_EXIT_INSN(), | ||
9626 | BPF_MOV64_REG(BPF_REG_4, BPF_REG_10), | ||
9627 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, -8), | ||
9628 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9629 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3), | ||
9630 | BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8), | ||
9631 | BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_4, 0), | ||
9632 | BPF_EXIT_INSN(), | ||
9633 | |||
9634 | /* subprog 1 */ | ||
9635 | BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, | ||
9636 | offsetof(struct __sk_buff, data)), | ||
9637 | BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, | ||
9638 | offsetof(struct __sk_buff, data_end)), | ||
9639 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), | ||
9640 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), | ||
9641 | BPF_MOV64_IMM(BPF_REG_5, 0), | ||
9642 | /* spill unchecked pkt_ptr into stack of caller */ | ||
9643 | BPF_STX_MEM(BPF_DW, BPF_REG_4, BPF_REG_2, 0), | ||
9644 | BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 2), | ||
9645 | BPF_MOV64_IMM(BPF_REG_5, 1), | ||
9646 | /* don't read back pkt_ptr from stack here */ | ||
9647 | /* write 4 bytes into packet */ | ||
9648 | BPF_ST_MEM(BPF_W, BPF_REG_2, 0, 0), | ||
9649 | BPF_MOV64_REG(BPF_REG_0, BPF_REG_5), | ||
9650 | BPF_EXIT_INSN(), | ||
9651 | }, | ||
9652 | .prog_type = BPF_PROG_TYPE_SCHED_CLS, | ||
9653 | .errstr = "invalid access to packet", | ||
9654 | .result = REJECT, | ||
9655 | }, | ||
9656 | { | ||
9657 | "calls: caller stack init to zero or map_value_or_null", | ||
9658 | .insns = { | ||
9659 | BPF_MOV64_IMM(BPF_REG_0, 0), | ||
9660 | BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), | ||
9661 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9662 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9663 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4), | ||
9664 | /* fetch map_value_or_null or const_zero from stack */ | ||
9665 | BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8), | ||
9666 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1), | ||
9667 | /* store into map_value */ | ||
9668 | BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0), | ||
9669 | BPF_EXIT_INSN(), | ||
9670 | |||
9671 | /* subprog 1 */ | ||
9672 | /* if (ctx == 0) return; */ | ||
9673 | BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8), | ||
9674 | /* else bpf_map_lookup() and *(fp - 8) = r0 */ | ||
9675 | BPF_MOV64_REG(BPF_REG_6, BPF_REG_2), | ||
9676 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9677 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9678 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9679 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9680 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9681 | BPF_FUNC_map_lookup_elem), | ||
9682 | /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */ | ||
9683 | BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0), | ||
9684 | BPF_EXIT_INSN(), | ||
9685 | }, | ||
9686 | .fixup_map1 = { 13 }, | ||
9687 | .result = ACCEPT, | ||
9688 | .prog_type = BPF_PROG_TYPE_XDP, | ||
9689 | }, | ||
9690 | { | ||
9691 | "calls: stack init to zero and pruning", | ||
9692 | .insns = { | ||
9693 | /* first make allocated_stack 16 byte */ | ||
9694 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0), | ||
9695 | /* now fork the execution such that the false branch | ||
9696 | * of JGT insn will be verified second and it skisp zero | ||
9697 | * init of fp-8 stack slot. If stack liveness marking | ||
9698 | * is missing live_read marks from call map_lookup | ||
9699 | * processing then pruning will incorrectly assume | ||
9700 | * that fp-8 stack slot was unused in the fall-through | ||
9701 | * branch and will accept the program incorrectly | ||
9702 | */ | ||
9703 | BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2), | ||
9704 | BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0), | ||
9705 | BPF_JMP_IMM(BPF_JA, 0, 0, 0), | ||
9706 | BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), | ||
9707 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8), | ||
9708 | BPF_LD_MAP_FD(BPF_REG_1, 0), | ||
9709 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, | ||
9710 | BPF_FUNC_map_lookup_elem), | ||
9711 | BPF_EXIT_INSN(), | ||
9712 | }, | ||
9713 | .fixup_map2 = { 6 }, | ||
9714 | .errstr = "invalid indirect read from stack off -8+0 size 8", | ||
9715 | .result = REJECT, | ||
9716 | .prog_type = BPF_PROG_TYPE_XDP, | ||
9717 | }, | ||
8100 | }; | 9718 | }; |
8101 | 9719 | ||
8102 | static int probe_filter_length(const struct bpf_insn *fp) | 9720 | static int probe_filter_length(const struct bpf_insn *fp) |
diff --git a/tools/testing/selftests/bpf/test_xdp_noinline.c b/tools/testing/selftests/bpf/test_xdp_noinline.c new file mode 100644 index 000000000000..5e4aac74f9d0 --- /dev/null +++ b/tools/testing/selftests/bpf/test_xdp_noinline.c | |||
@@ -0,0 +1,833 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (c) 2017 Facebook | ||
3 | #include <stddef.h> | ||
4 | #include <stdbool.h> | ||
5 | #include <string.h> | ||
6 | #include <linux/pkt_cls.h> | ||
7 | #include <linux/bpf.h> | ||
8 | #include <linux/in.h> | ||
9 | #include <linux/if_ether.h> | ||
10 | #include <linux/ip.h> | ||
11 | #include <linux/ipv6.h> | ||
12 | #include <linux/icmp.h> | ||
13 | #include <linux/icmpv6.h> | ||
14 | #include <linux/tcp.h> | ||
15 | #include <linux/udp.h> | ||
16 | #include "bpf_helpers.h" | ||
17 | |||
18 | #define bpf_printk(fmt, ...) \ | ||
19 | ({ \ | ||
20 | char ____fmt[] = fmt; \ | ||
21 | bpf_trace_printk(____fmt, sizeof(____fmt), \ | ||
22 | ##__VA_ARGS__); \ | ||
23 | }) | ||
24 | |||
25 | static __u32 rol32(__u32 word, unsigned int shift) | ||
26 | { | ||
27 | return (word << shift) | (word >> ((-shift) & 31)); | ||
28 | } | ||
29 | |||
30 | /* copy paste of jhash from kernel sources to make sure llvm | ||
31 | * can compile it into valid sequence of bpf instructions | ||
32 | */ | ||
33 | #define __jhash_mix(a, b, c) \ | ||
34 | { \ | ||
35 | a -= c; a ^= rol32(c, 4); c += b; \ | ||
36 | b -= a; b ^= rol32(a, 6); a += c; \ | ||
37 | c -= b; c ^= rol32(b, 8); b += a; \ | ||
38 | a -= c; a ^= rol32(c, 16); c += b; \ | ||
39 | b -= a; b ^= rol32(a, 19); a += c; \ | ||
40 | c -= b; c ^= rol32(b, 4); b += a; \ | ||
41 | } | ||
42 | |||
43 | #define __jhash_final(a, b, c) \ | ||
44 | { \ | ||
45 | c ^= b; c -= rol32(b, 14); \ | ||
46 | a ^= c; a -= rol32(c, 11); \ | ||
47 | b ^= a; b -= rol32(a, 25); \ | ||
48 | c ^= b; c -= rol32(b, 16); \ | ||
49 | a ^= c; a -= rol32(c, 4); \ | ||
50 | b ^= a; b -= rol32(a, 14); \ | ||
51 | c ^= b; c -= rol32(b, 24); \ | ||
52 | } | ||
53 | |||
54 | #define JHASH_INITVAL 0xdeadbeef | ||
55 | |||
56 | typedef unsigned int u32; | ||
57 | |||
58 | static __attribute__ ((noinline)) | ||
59 | u32 jhash(const void *key, u32 length, u32 initval) | ||
60 | { | ||
61 | u32 a, b, c; | ||
62 | const unsigned char *k = key; | ||
63 | |||
64 | a = b = c = JHASH_INITVAL + length + initval; | ||
65 | |||
66 | while (length > 12) { | ||
67 | a += *(u32 *)(k); | ||
68 | b += *(u32 *)(k + 4); | ||
69 | c += *(u32 *)(k + 8); | ||
70 | __jhash_mix(a, b, c); | ||
71 | length -= 12; | ||
72 | k += 12; | ||
73 | } | ||
74 | switch (length) { | ||
75 | case 12: c += (u32)k[11]<<24; | ||
76 | case 11: c += (u32)k[10]<<16; | ||
77 | case 10: c += (u32)k[9]<<8; | ||
78 | case 9: c += k[8]; | ||
79 | case 8: b += (u32)k[7]<<24; | ||
80 | case 7: b += (u32)k[6]<<16; | ||
81 | case 6: b += (u32)k[5]<<8; | ||
82 | case 5: b += k[4]; | ||
83 | case 4: a += (u32)k[3]<<24; | ||
84 | case 3: a += (u32)k[2]<<16; | ||
85 | case 2: a += (u32)k[1]<<8; | ||
86 | case 1: a += k[0]; | ||
87 | __jhash_final(a, b, c); | ||
88 | case 0: /* Nothing left to add */ | ||
89 | break; | ||
90 | } | ||
91 | |||
92 | return c; | ||
93 | } | ||
94 | |||
95 | static __attribute__ ((noinline)) | ||
96 | u32 __jhash_nwords(u32 a, u32 b, u32 c, u32 initval) | ||
97 | { | ||
98 | a += initval; | ||
99 | b += initval; | ||
100 | c += initval; | ||
101 | __jhash_final(a, b, c); | ||
102 | return c; | ||
103 | } | ||
104 | |||
105 | static __attribute__ ((noinline)) | ||
106 | u32 jhash_2words(u32 a, u32 b, u32 initval) | ||
107 | { | ||
108 | return __jhash_nwords(a, b, 0, initval + JHASH_INITVAL + (2 << 2)); | ||
109 | } | ||
110 | |||
111 | struct flow_key { | ||
112 | union { | ||
113 | __be32 src; | ||
114 | __be32 srcv6[4]; | ||
115 | }; | ||
116 | union { | ||
117 | __be32 dst; | ||
118 | __be32 dstv6[4]; | ||
119 | }; | ||
120 | union { | ||
121 | __u32 ports; | ||
122 | __u16 port16[2]; | ||
123 | }; | ||
124 | __u8 proto; | ||
125 | }; | ||
126 | |||
127 | struct packet_description { | ||
128 | struct flow_key flow; | ||
129 | __u8 flags; | ||
130 | }; | ||
131 | |||
132 | struct ctl_value { | ||
133 | union { | ||
134 | __u64 value; | ||
135 | __u32 ifindex; | ||
136 | __u8 mac[6]; | ||
137 | }; | ||
138 | }; | ||
139 | |||
140 | struct vip_definition { | ||
141 | union { | ||
142 | __be32 vip; | ||
143 | __be32 vipv6[4]; | ||
144 | }; | ||
145 | __u16 port; | ||
146 | __u16 family; | ||
147 | __u8 proto; | ||
148 | }; | ||
149 | |||
150 | struct vip_meta { | ||
151 | __u32 flags; | ||
152 | __u32 vip_num; | ||
153 | }; | ||
154 | |||
155 | struct real_pos_lru { | ||
156 | __u32 pos; | ||
157 | __u64 atime; | ||
158 | }; | ||
159 | |||
160 | struct real_definition { | ||
161 | union { | ||
162 | __be32 dst; | ||
163 | __be32 dstv6[4]; | ||
164 | }; | ||
165 | __u8 flags; | ||
166 | }; | ||
167 | |||
168 | struct lb_stats { | ||
169 | __u64 v2; | ||
170 | __u64 v1; | ||
171 | }; | ||
172 | |||
173 | struct bpf_map_def __attribute__ ((section("maps"), used)) vip_map = { | ||
174 | .type = BPF_MAP_TYPE_HASH, | ||
175 | .key_size = sizeof(struct vip_definition), | ||
176 | .value_size = sizeof(struct vip_meta), | ||
177 | .max_entries = 512, | ||
178 | .map_flags = 0, | ||
179 | }; | ||
180 | |||
181 | struct bpf_map_def __attribute__ ((section("maps"), used)) lru_cache = { | ||
182 | .type = BPF_MAP_TYPE_LRU_HASH, | ||
183 | .key_size = sizeof(struct flow_key), | ||
184 | .value_size = sizeof(struct real_pos_lru), | ||
185 | .max_entries = 300, | ||
186 | .map_flags = 1U << 1, | ||
187 | }; | ||
188 | |||
189 | struct bpf_map_def __attribute__ ((section("maps"), used)) ch_rings = { | ||
190 | .type = BPF_MAP_TYPE_ARRAY, | ||
191 | .key_size = sizeof(__u32), | ||
192 | .value_size = sizeof(__u32), | ||
193 | .max_entries = 12 * 655, | ||
194 | .map_flags = 0, | ||
195 | }; | ||
196 | |||
197 | struct bpf_map_def __attribute__ ((section("maps"), used)) reals = { | ||
198 | .type = BPF_MAP_TYPE_ARRAY, | ||
199 | .key_size = sizeof(__u32), | ||
200 | .value_size = sizeof(struct real_definition), | ||
201 | .max_entries = 40, | ||
202 | .map_flags = 0, | ||
203 | }; | ||
204 | |||
205 | struct bpf_map_def __attribute__ ((section("maps"), used)) stats = { | ||
206 | .type = BPF_MAP_TYPE_PERCPU_ARRAY, | ||
207 | .key_size = sizeof(__u32), | ||
208 | .value_size = sizeof(struct lb_stats), | ||
209 | .max_entries = 515, | ||
210 | .map_flags = 0, | ||
211 | }; | ||
212 | |||
213 | struct bpf_map_def __attribute__ ((section("maps"), used)) ctl_array = { | ||
214 | .type = BPF_MAP_TYPE_ARRAY, | ||
215 | .key_size = sizeof(__u32), | ||
216 | .value_size = sizeof(struct ctl_value), | ||
217 | .max_entries = 16, | ||
218 | .map_flags = 0, | ||
219 | }; | ||
220 | |||
221 | struct eth_hdr { | ||
222 | unsigned char eth_dest[6]; | ||
223 | unsigned char eth_source[6]; | ||
224 | unsigned short eth_proto; | ||
225 | }; | ||
226 | |||
227 | static inline __u64 calc_offset(bool is_ipv6, bool is_icmp) | ||
228 | { | ||
229 | __u64 off = sizeof(struct eth_hdr); | ||
230 | if (is_ipv6) { | ||
231 | off += sizeof(struct ipv6hdr); | ||
232 | if (is_icmp) | ||
233 | off += sizeof(struct icmp6hdr) + sizeof(struct ipv6hdr); | ||
234 | } else { | ||
235 | off += sizeof(struct iphdr); | ||
236 | if (is_icmp) | ||
237 | off += sizeof(struct icmphdr) + sizeof(struct iphdr); | ||
238 | } | ||
239 | return off; | ||
240 | } | ||
241 | |||
242 | static __attribute__ ((noinline)) | ||
243 | bool parse_udp(void *data, void *data_end, | ||
244 | bool is_ipv6, struct packet_description *pckt) | ||
245 | { | ||
246 | |||
247 | bool is_icmp = !((pckt->flags & (1 << 0)) == 0); | ||
248 | __u64 off = calc_offset(is_ipv6, is_icmp); | ||
249 | struct udphdr *udp; | ||
250 | udp = data + off; | ||
251 | |||
252 | if (udp + 1 > data_end) | ||
253 | return 0; | ||
254 | if (!is_icmp) { | ||
255 | pckt->flow.port16[0] = udp->source; | ||
256 | pckt->flow.port16[1] = udp->dest; | ||
257 | } else { | ||
258 | pckt->flow.port16[0] = udp->dest; | ||
259 | pckt->flow.port16[1] = udp->source; | ||
260 | } | ||
261 | return 1; | ||
262 | } | ||
263 | |||
264 | static __attribute__ ((noinline)) | ||
265 | bool parse_tcp(void *data, void *data_end, | ||
266 | bool is_ipv6, struct packet_description *pckt) | ||
267 | { | ||
268 | |||
269 | bool is_icmp = !((pckt->flags & (1 << 0)) == 0); | ||
270 | __u64 off = calc_offset(is_ipv6, is_icmp); | ||
271 | struct tcphdr *tcp; | ||
272 | |||
273 | tcp = data + off; | ||
274 | if (tcp + 1 > data_end) | ||
275 | return 0; | ||
276 | if (tcp->syn) | ||
277 | pckt->flags |= (1 << 1); | ||
278 | if (!is_icmp) { | ||
279 | pckt->flow.port16[0] = tcp->source; | ||
280 | pckt->flow.port16[1] = tcp->dest; | ||
281 | } else { | ||
282 | pckt->flow.port16[0] = tcp->dest; | ||
283 | pckt->flow.port16[1] = tcp->source; | ||
284 | } | ||
285 | return 1; | ||
286 | } | ||
287 | |||
288 | static __attribute__ ((noinline)) | ||
289 | bool encap_v6(struct xdp_md *xdp, struct ctl_value *cval, | ||
290 | struct packet_description *pckt, | ||
291 | struct real_definition *dst, __u32 pkt_bytes) | ||
292 | { | ||
293 | struct eth_hdr *new_eth; | ||
294 | struct eth_hdr *old_eth; | ||
295 | struct ipv6hdr *ip6h; | ||
296 | __u32 ip_suffix; | ||
297 | void *data_end; | ||
298 | void *data; | ||
299 | |||
300 | if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct ipv6hdr))) | ||
301 | return 0; | ||
302 | data = (void *)(long)xdp->data; | ||
303 | data_end = (void *)(long)xdp->data_end; | ||
304 | new_eth = data; | ||
305 | ip6h = data + sizeof(struct eth_hdr); | ||
306 | old_eth = data + sizeof(struct ipv6hdr); | ||
307 | if (new_eth + 1 > data_end || | ||
308 | old_eth + 1 > data_end || ip6h + 1 > data_end) | ||
309 | return 0; | ||
310 | memcpy(new_eth->eth_dest, cval->mac, 6); | ||
311 | memcpy(new_eth->eth_source, old_eth->eth_dest, 6); | ||
312 | new_eth->eth_proto = 56710; | ||
313 | ip6h->version = 6; | ||
314 | ip6h->priority = 0; | ||
315 | memset(ip6h->flow_lbl, 0, sizeof(ip6h->flow_lbl)); | ||
316 | |||
317 | ip6h->nexthdr = IPPROTO_IPV6; | ||
318 | ip_suffix = pckt->flow.srcv6[3] ^ pckt->flow.port16[0]; | ||
319 | ip6h->payload_len = | ||
320 | __builtin_bswap16(pkt_bytes + sizeof(struct ipv6hdr)); | ||
321 | ip6h->hop_limit = 4; | ||
322 | |||
323 | ip6h->saddr.in6_u.u6_addr32[0] = 1; | ||
324 | ip6h->saddr.in6_u.u6_addr32[1] = 2; | ||
325 | ip6h->saddr.in6_u.u6_addr32[2] = 3; | ||
326 | ip6h->saddr.in6_u.u6_addr32[3] = ip_suffix; | ||
327 | memcpy(ip6h->daddr.in6_u.u6_addr32, dst->dstv6, 16); | ||
328 | return 1; | ||
329 | } | ||
330 | |||
331 | static __attribute__ ((noinline)) | ||
332 | bool encap_v4(struct xdp_md *xdp, struct ctl_value *cval, | ||
333 | struct packet_description *pckt, | ||
334 | struct real_definition *dst, __u32 pkt_bytes) | ||
335 | { | ||
336 | |||
337 | __u32 ip_suffix = __builtin_bswap16(pckt->flow.port16[0]); | ||
338 | struct eth_hdr *new_eth; | ||
339 | struct eth_hdr *old_eth; | ||
340 | __u16 *next_iph_u16; | ||
341 | struct iphdr *iph; | ||
342 | __u32 csum = 0; | ||
343 | void *data_end; | ||
344 | void *data; | ||
345 | |||
346 | ip_suffix <<= 15; | ||
347 | ip_suffix ^= pckt->flow.src; | ||
348 | if (bpf_xdp_adjust_head(xdp, 0 - (int)sizeof(struct iphdr))) | ||
349 | return 0; | ||
350 | data = (void *)(long)xdp->data; | ||
351 | data_end = (void *)(long)xdp->data_end; | ||
352 | new_eth = data; | ||
353 | iph = data + sizeof(struct eth_hdr); | ||
354 | old_eth = data + sizeof(struct iphdr); | ||
355 | if (new_eth + 1 > data_end || | ||
356 | old_eth + 1 > data_end || iph + 1 > data_end) | ||
357 | return 0; | ||
358 | memcpy(new_eth->eth_dest, cval->mac, 6); | ||
359 | memcpy(new_eth->eth_source, old_eth->eth_dest, 6); | ||
360 | new_eth->eth_proto = 8; | ||
361 | iph->version = 4; | ||
362 | iph->ihl = 5; | ||
363 | iph->frag_off = 0; | ||
364 | iph->protocol = IPPROTO_IPIP; | ||
365 | iph->check = 0; | ||
366 | iph->tos = 1; | ||
367 | iph->tot_len = __builtin_bswap16(pkt_bytes + sizeof(struct iphdr)); | ||
368 | /* don't update iph->daddr, since it will overwrite old eth_proto | ||
369 | * and multiple iterations of bpf_prog_run() will fail | ||
370 | */ | ||
371 | |||
372 | iph->saddr = ((0xFFFF0000 & ip_suffix) | 4268) ^ dst->dst; | ||
373 | iph->ttl = 4; | ||
374 | |||
375 | next_iph_u16 = (__u16 *) iph; | ||
376 | #pragma clang loop unroll(full) | ||
377 | for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) | ||
378 | csum += *next_iph_u16++; | ||
379 | iph->check = ~((csum & 0xffff) + (csum >> 16)); | ||
380 | if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) | ||
381 | return 0; | ||
382 | return 1; | ||
383 | } | ||
384 | |||
385 | static __attribute__ ((noinline)) | ||
386 | bool decap_v6(struct xdp_md *xdp, void **data, void **data_end, bool inner_v4) | ||
387 | { | ||
388 | struct eth_hdr *new_eth; | ||
389 | struct eth_hdr *old_eth; | ||
390 | |||
391 | old_eth = *data; | ||
392 | new_eth = *data + sizeof(struct ipv6hdr); | ||
393 | memcpy(new_eth->eth_source, old_eth->eth_source, 6); | ||
394 | memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); | ||
395 | if (inner_v4) | ||
396 | new_eth->eth_proto = 8; | ||
397 | else | ||
398 | new_eth->eth_proto = 56710; | ||
399 | if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct ipv6hdr))) | ||
400 | return 0; | ||
401 | *data = (void *)(long)xdp->data; | ||
402 | *data_end = (void *)(long)xdp->data_end; | ||
403 | return 1; | ||
404 | } | ||
405 | |||
406 | static __attribute__ ((noinline)) | ||
407 | bool decap_v4(struct xdp_md *xdp, void **data, void **data_end) | ||
408 | { | ||
409 | struct eth_hdr *new_eth; | ||
410 | struct eth_hdr *old_eth; | ||
411 | |||
412 | old_eth = *data; | ||
413 | new_eth = *data + sizeof(struct iphdr); | ||
414 | memcpy(new_eth->eth_source, old_eth->eth_source, 6); | ||
415 | memcpy(new_eth->eth_dest, old_eth->eth_dest, 6); | ||
416 | new_eth->eth_proto = 8; | ||
417 | if (bpf_xdp_adjust_head(xdp, (int)sizeof(struct iphdr))) | ||
418 | return 0; | ||
419 | *data = (void *)(long)xdp->data; | ||
420 | *data_end = (void *)(long)xdp->data_end; | ||
421 | return 1; | ||
422 | } | ||
423 | |||
424 | static __attribute__ ((noinline)) | ||
425 | int swap_mac_and_send(void *data, void *data_end) | ||
426 | { | ||
427 | unsigned char tmp_mac[6]; | ||
428 | struct eth_hdr *eth; | ||
429 | |||
430 | eth = data; | ||
431 | memcpy(tmp_mac, eth->eth_source, 6); | ||
432 | memcpy(eth->eth_source, eth->eth_dest, 6); | ||
433 | memcpy(eth->eth_dest, tmp_mac, 6); | ||
434 | return XDP_TX; | ||
435 | } | ||
436 | |||
437 | static __attribute__ ((noinline)) | ||
438 | int send_icmp_reply(void *data, void *data_end) | ||
439 | { | ||
440 | struct icmphdr *icmp_hdr; | ||
441 | __u16 *next_iph_u16; | ||
442 | __u32 tmp_addr = 0; | ||
443 | struct iphdr *iph; | ||
444 | __u32 csum1 = 0; | ||
445 | __u32 csum = 0; | ||
446 | __u64 off = 0; | ||
447 | |||
448 | if (data + sizeof(struct eth_hdr) | ||
449 | + sizeof(struct iphdr) + sizeof(struct icmphdr) > data_end) | ||
450 | return XDP_DROP; | ||
451 | off += sizeof(struct eth_hdr); | ||
452 | iph = data + off; | ||
453 | off += sizeof(struct iphdr); | ||
454 | icmp_hdr = data + off; | ||
455 | icmp_hdr->type = 0; | ||
456 | icmp_hdr->checksum += 0x0007; | ||
457 | iph->ttl = 4; | ||
458 | tmp_addr = iph->daddr; | ||
459 | iph->daddr = iph->saddr; | ||
460 | iph->saddr = tmp_addr; | ||
461 | iph->check = 0; | ||
462 | next_iph_u16 = (__u16 *) iph; | ||
463 | #pragma clang loop unroll(full) | ||
464 | for (int i = 0; i < sizeof(struct iphdr) >> 1; i++) | ||
465 | csum += *next_iph_u16++; | ||
466 | iph->check = ~((csum & 0xffff) + (csum >> 16)); | ||
467 | return swap_mac_and_send(data, data_end); | ||
468 | } | ||
469 | |||
470 | static __attribute__ ((noinline)) | ||
471 | int send_icmp6_reply(void *data, void *data_end) | ||
472 | { | ||
473 | struct icmp6hdr *icmp_hdr; | ||
474 | struct ipv6hdr *ip6h; | ||
475 | __be32 tmp_addr[4]; | ||
476 | __u64 off = 0; | ||
477 | |||
478 | if (data + sizeof(struct eth_hdr) | ||
479 | + sizeof(struct ipv6hdr) + sizeof(struct icmp6hdr) > data_end) | ||
480 | return XDP_DROP; | ||
481 | off += sizeof(struct eth_hdr); | ||
482 | ip6h = data + off; | ||
483 | off += sizeof(struct ipv6hdr); | ||
484 | icmp_hdr = data + off; | ||
485 | icmp_hdr->icmp6_type = 129; | ||
486 | icmp_hdr->icmp6_cksum -= 0x0001; | ||
487 | ip6h->hop_limit = 4; | ||
488 | memcpy(tmp_addr, ip6h->saddr.in6_u.u6_addr32, 16); | ||
489 | memcpy(ip6h->saddr.in6_u.u6_addr32, ip6h->daddr.in6_u.u6_addr32, 16); | ||
490 | memcpy(ip6h->daddr.in6_u.u6_addr32, tmp_addr, 16); | ||
491 | return swap_mac_and_send(data, data_end); | ||
492 | } | ||
493 | |||
494 | static __attribute__ ((noinline)) | ||
495 | int parse_icmpv6(void *data, void *data_end, __u64 off, | ||
496 | struct packet_description *pckt) | ||
497 | { | ||
498 | struct icmp6hdr *icmp_hdr; | ||
499 | struct ipv6hdr *ip6h; | ||
500 | |||
501 | icmp_hdr = data + off; | ||
502 | if (icmp_hdr + 1 > data_end) | ||
503 | return XDP_DROP; | ||
504 | if (icmp_hdr->icmp6_type == 128) | ||
505 | return send_icmp6_reply(data, data_end); | ||
506 | if (icmp_hdr->icmp6_type != 3) | ||
507 | return XDP_PASS; | ||
508 | off += sizeof(struct icmp6hdr); | ||
509 | ip6h = data + off; | ||
510 | if (ip6h + 1 > data_end) | ||
511 | return XDP_DROP; | ||
512 | pckt->flow.proto = ip6h->nexthdr; | ||
513 | pckt->flags |= (1 << 0); | ||
514 | memcpy(pckt->flow.srcv6, ip6h->daddr.in6_u.u6_addr32, 16); | ||
515 | memcpy(pckt->flow.dstv6, ip6h->saddr.in6_u.u6_addr32, 16); | ||
516 | return -1; | ||
517 | } | ||
518 | |||
519 | static __attribute__ ((noinline)) | ||
520 | int parse_icmp(void *data, void *data_end, __u64 off, | ||
521 | struct packet_description *pckt) | ||
522 | { | ||
523 | struct icmphdr *icmp_hdr; | ||
524 | struct iphdr *iph; | ||
525 | |||
526 | icmp_hdr = data + off; | ||
527 | if (icmp_hdr + 1 > data_end) | ||
528 | return XDP_DROP; | ||
529 | if (icmp_hdr->type == 8) | ||
530 | return send_icmp_reply(data, data_end); | ||
531 | if ((icmp_hdr->type != 3) || (icmp_hdr->code != 4)) | ||
532 | return XDP_PASS; | ||
533 | off += sizeof(struct icmphdr); | ||
534 | iph = data + off; | ||
535 | if (iph + 1 > data_end) | ||
536 | return XDP_DROP; | ||
537 | if (iph->ihl != 5) | ||
538 | return XDP_DROP; | ||
539 | pckt->flow.proto = iph->protocol; | ||
540 | pckt->flags |= (1 << 0); | ||
541 | pckt->flow.src = iph->daddr; | ||
542 | pckt->flow.dst = iph->saddr; | ||
543 | return -1; | ||
544 | } | ||
545 | |||
546 | static __attribute__ ((noinline)) | ||
547 | __u32 get_packet_hash(struct packet_description *pckt, | ||
548 | bool hash_16bytes) | ||
549 | { | ||
550 | if (hash_16bytes) | ||
551 | return jhash_2words(jhash(pckt->flow.srcv6, 16, 12), | ||
552 | pckt->flow.ports, 24); | ||
553 | else | ||
554 | return jhash_2words(pckt->flow.src, pckt->flow.ports, | ||
555 | 24); | ||
556 | } | ||
557 | |||
558 | __attribute__ ((noinline)) | ||
559 | static bool get_packet_dst(struct real_definition **real, | ||
560 | struct packet_description *pckt, | ||
561 | struct vip_meta *vip_info, | ||
562 | bool is_ipv6, void *lru_map) | ||
563 | { | ||
564 | struct real_pos_lru new_dst_lru = { }; | ||
565 | bool hash_16bytes = is_ipv6; | ||
566 | __u32 *real_pos, hash, key; | ||
567 | __u64 cur_time; | ||
568 | |||
569 | if (vip_info->flags & (1 << 2)) | ||
570 | hash_16bytes = 1; | ||
571 | if (vip_info->flags & (1 << 3)) { | ||
572 | pckt->flow.port16[0] = pckt->flow.port16[1]; | ||
573 | memset(pckt->flow.srcv6, 0, 16); | ||
574 | } | ||
575 | hash = get_packet_hash(pckt, hash_16bytes); | ||
576 | if (hash != 0x358459b7 /* jhash of ipv4 packet */ && | ||
577 | hash != 0x2f4bc6bb /* jhash of ipv6 packet */) | ||
578 | return 0; | ||
579 | key = 2 * vip_info->vip_num + hash % 2; | ||
580 | real_pos = bpf_map_lookup_elem(&ch_rings, &key); | ||
581 | if (!real_pos) | ||
582 | return 0; | ||
583 | key = *real_pos; | ||
584 | *real = bpf_map_lookup_elem(&reals, &key); | ||
585 | if (!(*real)) | ||
586 | return 0; | ||
587 | if (!(vip_info->flags & (1 << 1))) { | ||
588 | __u32 conn_rate_key = 512 + 2; | ||
589 | struct lb_stats *conn_rate_stats = | ||
590 | bpf_map_lookup_elem(&stats, &conn_rate_key); | ||
591 | |||
592 | if (!conn_rate_stats) | ||
593 | return 1; | ||
594 | cur_time = bpf_ktime_get_ns(); | ||
595 | if ((cur_time - conn_rate_stats->v2) >> 32 > 0xffFFFF) { | ||
596 | conn_rate_stats->v1 = 1; | ||
597 | conn_rate_stats->v2 = cur_time; | ||
598 | } else { | ||
599 | conn_rate_stats->v1 += 1; | ||
600 | if (conn_rate_stats->v1 >= 1) | ||
601 | return 1; | ||
602 | } | ||
603 | if (pckt->flow.proto == IPPROTO_UDP) | ||
604 | new_dst_lru.atime = cur_time; | ||
605 | new_dst_lru.pos = key; | ||
606 | bpf_map_update_elem(lru_map, &pckt->flow, &new_dst_lru, 0); | ||
607 | } | ||
608 | return 1; | ||
609 | } | ||
610 | |||
611 | __attribute__ ((noinline)) | ||
612 | static void connection_table_lookup(struct real_definition **real, | ||
613 | struct packet_description *pckt, | ||
614 | void *lru_map) | ||
615 | { | ||
616 | |||
617 | struct real_pos_lru *dst_lru; | ||
618 | __u64 cur_time; | ||
619 | __u32 key; | ||
620 | |||
621 | dst_lru = bpf_map_lookup_elem(lru_map, &pckt->flow); | ||
622 | if (!dst_lru) | ||
623 | return; | ||
624 | if (pckt->flow.proto == IPPROTO_UDP) { | ||
625 | cur_time = bpf_ktime_get_ns(); | ||
626 | if (cur_time - dst_lru->atime > 300000) | ||
627 | return; | ||
628 | dst_lru->atime = cur_time; | ||
629 | } | ||
630 | key = dst_lru->pos; | ||
631 | *real = bpf_map_lookup_elem(&reals, &key); | ||
632 | } | ||
633 | |||
634 | /* don't believe your eyes! | ||
635 | * below function has 6 arguments whereas bpf and llvm allow maximum of 5 | ||
636 | * but since it's _static_ llvm can optimize one argument away | ||
637 | */ | ||
638 | __attribute__ ((noinline)) | ||
639 | static int process_l3_headers_v6(struct packet_description *pckt, | ||
640 | __u8 *protocol, __u64 off, | ||
641 | __u16 *pkt_bytes, void *data, | ||
642 | void *data_end) | ||
643 | { | ||
644 | struct ipv6hdr *ip6h; | ||
645 | __u64 iph_len; | ||
646 | int action; | ||
647 | |||
648 | ip6h = data + off; | ||
649 | if (ip6h + 1 > data_end) | ||
650 | return XDP_DROP; | ||
651 | iph_len = sizeof(struct ipv6hdr); | ||
652 | *protocol = ip6h->nexthdr; | ||
653 | pckt->flow.proto = *protocol; | ||
654 | *pkt_bytes = __builtin_bswap16(ip6h->payload_len); | ||
655 | off += iph_len; | ||
656 | if (*protocol == 45) { | ||
657 | return XDP_DROP; | ||
658 | } else if (*protocol == 59) { | ||
659 | action = parse_icmpv6(data, data_end, off, pckt); | ||
660 | if (action >= 0) | ||
661 | return action; | ||
662 | } else { | ||
663 | memcpy(pckt->flow.srcv6, ip6h->saddr.in6_u.u6_addr32, 16); | ||
664 | memcpy(pckt->flow.dstv6, ip6h->daddr.in6_u.u6_addr32, 16); | ||
665 | } | ||
666 | return -1; | ||
667 | } | ||
668 | |||
669 | __attribute__ ((noinline)) | ||
670 | static int process_l3_headers_v4(struct packet_description *pckt, | ||
671 | __u8 *protocol, __u64 off, | ||
672 | __u16 *pkt_bytes, void *data, | ||
673 | void *data_end) | ||
674 | { | ||
675 | struct iphdr *iph; | ||
676 | __u64 iph_len; | ||
677 | int action; | ||
678 | |||
679 | iph = data + off; | ||
680 | if (iph + 1 > data_end) | ||
681 | return XDP_DROP; | ||
682 | if (iph->ihl != 5) | ||
683 | return XDP_DROP; | ||
684 | *protocol = iph->protocol; | ||
685 | pckt->flow.proto = *protocol; | ||
686 | *pkt_bytes = __builtin_bswap16(iph->tot_len); | ||
687 | off += 20; | ||
688 | if (iph->frag_off & 65343) | ||
689 | return XDP_DROP; | ||
690 | if (*protocol == IPPROTO_ICMP) { | ||
691 | action = parse_icmp(data, data_end, off, pckt); | ||
692 | if (action >= 0) | ||
693 | return action; | ||
694 | } else { | ||
695 | pckt->flow.src = iph->saddr; | ||
696 | pckt->flow.dst = iph->daddr; | ||
697 | } | ||
698 | return -1; | ||
699 | } | ||
700 | |||
701 | __attribute__ ((noinline)) | ||
702 | static int process_packet(void *data, __u64 off, void *data_end, | ||
703 | bool is_ipv6, struct xdp_md *xdp) | ||
704 | { | ||
705 | |||
706 | struct real_definition *dst = NULL; | ||
707 | struct packet_description pckt = { }; | ||
708 | struct vip_definition vip = { }; | ||
709 | struct lb_stats *data_stats; | ||
710 | struct eth_hdr *eth = data; | ||
711 | void *lru_map = &lru_cache; | ||
712 | struct vip_meta *vip_info; | ||
713 | __u32 lru_stats_key = 513; | ||
714 | __u32 mac_addr_pos = 0; | ||
715 | __u32 stats_key = 512; | ||
716 | struct ctl_value *cval; | ||
717 | __u16 pkt_bytes; | ||
718 | __u64 iph_len; | ||
719 | __u8 protocol; | ||
720 | __u32 vip_num; | ||
721 | int action; | ||
722 | |||
723 | if (is_ipv6) | ||
724 | action = process_l3_headers_v6(&pckt, &protocol, off, | ||
725 | &pkt_bytes, data, data_end); | ||
726 | else | ||
727 | action = process_l3_headers_v4(&pckt, &protocol, off, | ||
728 | &pkt_bytes, data, data_end); | ||
729 | if (action >= 0) | ||
730 | return action; | ||
731 | protocol = pckt.flow.proto; | ||
732 | if (protocol == IPPROTO_TCP) { | ||
733 | if (!parse_tcp(data, data_end, is_ipv6, &pckt)) | ||
734 | return XDP_DROP; | ||
735 | } else if (protocol == IPPROTO_UDP) { | ||
736 | if (!parse_udp(data, data_end, is_ipv6, &pckt)) | ||
737 | return XDP_DROP; | ||
738 | } else { | ||
739 | return XDP_TX; | ||
740 | } | ||
741 | |||
742 | if (is_ipv6) | ||
743 | memcpy(vip.vipv6, pckt.flow.dstv6, 16); | ||
744 | else | ||
745 | vip.vip = pckt.flow.dst; | ||
746 | vip.port = pckt.flow.port16[1]; | ||
747 | vip.proto = pckt.flow.proto; | ||
748 | vip_info = bpf_map_lookup_elem(&vip_map, &vip); | ||
749 | if (!vip_info) { | ||
750 | vip.port = 0; | ||
751 | vip_info = bpf_map_lookup_elem(&vip_map, &vip); | ||
752 | if (!vip_info) | ||
753 | return XDP_PASS; | ||
754 | if (!(vip_info->flags & (1 << 4))) | ||
755 | pckt.flow.port16[1] = 0; | ||
756 | } | ||
757 | if (data_end - data > 1400) | ||
758 | return XDP_DROP; | ||
759 | data_stats = bpf_map_lookup_elem(&stats, &stats_key); | ||
760 | if (!data_stats) | ||
761 | return XDP_DROP; | ||
762 | data_stats->v1 += 1; | ||
763 | if (!dst) { | ||
764 | if (vip_info->flags & (1 << 0)) | ||
765 | pckt.flow.port16[0] = 0; | ||
766 | if (!(pckt.flags & (1 << 1)) && !(vip_info->flags & (1 << 1))) | ||
767 | connection_table_lookup(&dst, &pckt, lru_map); | ||
768 | if (dst) | ||
769 | goto out; | ||
770 | if (pckt.flow.proto == IPPROTO_TCP) { | ||
771 | struct lb_stats *lru_stats = | ||
772 | bpf_map_lookup_elem(&stats, &lru_stats_key); | ||
773 | |||
774 | if (!lru_stats) | ||
775 | return XDP_DROP; | ||
776 | if (pckt.flags & (1 << 1)) | ||
777 | lru_stats->v1 += 1; | ||
778 | else | ||
779 | lru_stats->v2 += 1; | ||
780 | } | ||
781 | if (!get_packet_dst(&dst, &pckt, vip_info, is_ipv6, lru_map)) | ||
782 | return XDP_DROP; | ||
783 | data_stats->v2 += 1; | ||
784 | } | ||
785 | out: | ||
786 | cval = bpf_map_lookup_elem(&ctl_array, &mac_addr_pos); | ||
787 | if (!cval) | ||
788 | return XDP_DROP; | ||
789 | if (dst->flags & (1 << 0)) { | ||
790 | if (!encap_v6(xdp, cval, &pckt, dst, pkt_bytes)) | ||
791 | return XDP_DROP; | ||
792 | } else { | ||
793 | if (!encap_v4(xdp, cval, &pckt, dst, pkt_bytes)) | ||
794 | return XDP_DROP; | ||
795 | } | ||
796 | vip_num = vip_info->vip_num; | ||
797 | data_stats = bpf_map_lookup_elem(&stats, &vip_num); | ||
798 | if (!data_stats) | ||
799 | return XDP_DROP; | ||
800 | data_stats->v1 += 1; | ||
801 | data_stats->v2 += pkt_bytes; | ||
802 | |||
803 | data = (void *)(long)xdp->data; | ||
804 | data_end = (void *)(long)xdp->data_end; | ||
805 | if (data + 4 > data_end) | ||
806 | return XDP_DROP; | ||
807 | *(u32 *)data = dst->dst; | ||
808 | return XDP_DROP; | ||
809 | } | ||
810 | |||
811 | __attribute__ ((section("xdp-test"), used)) | ||
812 | int balancer_ingress(struct xdp_md *ctx) | ||
813 | { | ||
814 | void *data = (void *)(long)ctx->data; | ||
815 | void *data_end = (void *)(long)ctx->data_end; | ||
816 | struct eth_hdr *eth = data; | ||
817 | __u32 eth_proto; | ||
818 | __u32 nh_off; | ||
819 | |||
820 | nh_off = sizeof(struct eth_hdr); | ||
821 | if (data + nh_off > data_end) | ||
822 | return XDP_DROP; | ||
823 | eth_proto = eth->eth_proto; | ||
824 | if (eth_proto == 8) | ||
825 | return process_packet(data, nh_off, data_end, 0, ctx); | ||
826 | else if (eth_proto == 56710) | ||
827 | return process_packet(data, nh_off, data_end, 1, ctx); | ||
828 | else | ||
829 | return XDP_DROP; | ||
830 | } | ||
831 | |||
832 | char _license[] __attribute__ ((section("license"), used)) = "GPL"; | ||
833 | int _version __attribute__ ((section("version"), used)) = 1; | ||