aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2017-12-14 20:55:09 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2017-12-17 14:34:35 -0500
commitd98588cef04529aa326c6cbc0cfa01a3a3e00ef5 (patch)
treeb11bab5e6fd494ec0c3a9e86def8a43ed9140273 /tools
parentcc2b14d51053eb055c06f45e1a5cdbfcf2b79e94 (diff)
selftests/bpf: add tests for stack_zero tracking
adjust two tests, since verifier got smarter and add new one to test stack_zero logic Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'tools')
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c66
1 files changed, 64 insertions, 2 deletions
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 88f389c6ec48..eaf294822a8f 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -5649,7 +5649,7 @@ static struct bpf_test tests[] = {
5649 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)", 5649 "helper access to variable memory: size > 0 not allowed on NULL (ARG_PTR_TO_MEM_OR_NULL)",
5650 .insns = { 5650 .insns = {
5651 BPF_MOV64_IMM(BPF_REG_1, 0), 5651 BPF_MOV64_IMM(BPF_REG_1, 0),
5652 BPF_MOV64_IMM(BPF_REG_2, 0), 5652 BPF_MOV64_IMM(BPF_REG_2, 1),
5653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5653 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5654 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5654 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5655 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64), 5655 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
@@ -5884,7 +5884,7 @@ static struct bpf_test tests[] = {
5884 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24), 5884 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -24),
5885 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16), 5885 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
5886 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8), 5886 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
5887 BPF_MOV64_IMM(BPF_REG_2, 0), 5887 BPF_MOV64_IMM(BPF_REG_2, 1),
5888 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128), 5888 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
5889 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128), 5889 BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
5890 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63), 5890 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
@@ -9056,6 +9056,68 @@ static struct bpf_test tests[] = {
9056 .result = ACCEPT, 9056 .result = ACCEPT,
9057 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 9057 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
9058 }, 9058 },
9059 {
9060 "calls: caller stack init to zero or map_value_or_null",
9061 .insns = {
9062 BPF_MOV64_IMM(BPF_REG_0, 0),
9063 BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
9064 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9065 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9066 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 4),
9067 /* fetch map_value_or_null or const_zero from stack */
9068 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
9069 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
9070 /* store into map_value */
9071 BPF_ST_MEM(BPF_W, BPF_REG_0, 0, 0),
9072 BPF_EXIT_INSN(),
9073
9074 /* subprog 1 */
9075 /* if (ctx == 0) return; */
9076 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
9077 /* else bpf_map_lookup() and *(fp - 8) = r0 */
9078 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
9079 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9080 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9081 BPF_LD_MAP_FD(BPF_REG_1, 0),
9082 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9083 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9084 BPF_FUNC_map_lookup_elem),
9085 /* write map_value_ptr_or_null into stack frame of main prog at fp-8 */
9086 BPF_STX_MEM(BPF_DW, BPF_REG_6, BPF_REG_0, 0),
9087 BPF_EXIT_INSN(),
9088 },
9089 .fixup_map1 = { 13 },
9090 .result = ACCEPT,
9091 .prog_type = BPF_PROG_TYPE_XDP,
9092 },
9093 {
9094 "calls: stack init to zero and pruning",
9095 .insns = {
9096 /* first make allocated_stack 16 byte */
9097 BPF_ST_MEM(BPF_DW, BPF_REG_10, -16, 0),
9098 /* now fork the execution such that the false branch
9099 * of JGT insn will be verified second and it skisp zero
9100 * init of fp-8 stack slot. If stack liveness marking
9101 * is missing live_read marks from call map_lookup
9102 * processing then pruning will incorrectly assume
9103 * that fp-8 stack slot was unused in the fall-through
9104 * branch and will accept the program incorrectly
9105 */
9106 BPF_JMP_IMM(BPF_JGT, BPF_REG_1, 2, 2),
9107 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
9108 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
9109 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
9110 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
9111 BPF_LD_MAP_FD(BPF_REG_1, 0),
9112 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
9113 BPF_FUNC_map_lookup_elem),
9114 BPF_EXIT_INSN(),
9115 },
9116 .fixup_map2 = { 6 },
9117 .errstr = "invalid indirect read from stack off -8+0 size 8",
9118 .result = REJECT,
9119 .prog_type = BPF_PROG_TYPE_XDP,
9120 },
9059}; 9121};
9060 9122
9061static int probe_filter_length(const struct bpf_insn *fp) 9123static int probe_filter_length(const struct bpf_insn *fp)