diff options
author | Thomas Graf <tgraf@suug.ch> | 2016-10-18 13:51:19 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2017-03-22 07:43:35 -0400 |
commit | 1411707acb85c514c603f692327c98db48127900 (patch) | |
tree | 0d7b6ce4037da53feb43d6815fff1e0e8a8ae83b | |
parent | 9e38375a4b1748946ab46a317def3046bb425f1e (diff) |
bpf: Detect identical PTR_TO_MAP_VALUE_OR_NULL registers
[ Upstream commit 57a09bf0a416700676e77102c28f9cfcb48267e0 ]
A BPF program is required to check the return register of a
map_elem_lookup() call before accessing memory. The verifier keeps
track of this by converting the type of the result register from
PTR_TO_MAP_VALUE_OR_NULL to PTR_TO_MAP_VALUE after a conditional
jump ensures safety. This check is currently exclusively performed
for the result register 0.
In the event the compiler reorders instructions, BPF_MOV64_REG
instructions may be moved before the conditional jump which causes
them to keep their type PTR_TO_MAP_VALUE_OR_NULL to which the
verifier objects when the register is accessed:
0: (b7) r1 = 10
1: (7b) *(u64 *)(r10 -8) = r1
2: (bf) r2 = r10
3: (07) r2 += -8
4: (18) r1 = 0x59c00000
6: (85) call 1
7: (bf) r4 = r0
8: (15) if r0 == 0x0 goto pc+1
R0=map_value(ks=8,vs=8) R4=map_value_or_null(ks=8,vs=8) R10=fp
9: (7a) *(u64 *)(r4 +0) = 0
R4 invalid mem access 'map_value_or_null'
This commit extends the verifier to keep track of all identical
PTR_TO_MAP_VALUE_OR_NULL registers after a map_elem_lookup() by
assigning them an ID and then marking them all when the conditional
jump is observed.
Signed-off-by: Thomas Graf <tgraf@suug.ch>
Reviewed-by: Josef Bacik <jbacik@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r-- | include/linux/bpf_verifier.h | 2 | ||||
-rw-r--r-- | kernel/bpf/verifier.c | 61 |
2 files changed, 46 insertions, 17 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 6aaf425cebc3..7453c1281531 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -24,13 +24,13 @@ struct bpf_reg_state { | |||
24 | */ | 24 | */ |
25 | s64 min_value; | 25 | s64 min_value; |
26 | u64 max_value; | 26 | u64 max_value; |
27 | u32 id; | ||
27 | union { | 28 | union { |
28 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ | 29 | /* valid when type == CONST_IMM | PTR_TO_STACK | UNKNOWN_VALUE */ |
29 | s64 imm; | 30 | s64 imm; |
30 | 31 | ||
31 | /* valid when type == PTR_TO_PACKET* */ | 32 | /* valid when type == PTR_TO_PACKET* */ |
32 | struct { | 33 | struct { |
33 | u32 id; | ||
34 | u16 off; | 34 | u16 off; |
35 | u16 range; | 35 | u16 range; |
36 | }; | 36 | }; |
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8199821f54cf..c428c9f85186 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -212,9 +212,10 @@ static void print_verifier_state(struct bpf_verifier_state *state) | |||
212 | else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || | 212 | else if (t == CONST_PTR_TO_MAP || t == PTR_TO_MAP_VALUE || |
213 | t == PTR_TO_MAP_VALUE_OR_NULL || | 213 | t == PTR_TO_MAP_VALUE_OR_NULL || |
214 | t == PTR_TO_MAP_VALUE_ADJ) | 214 | t == PTR_TO_MAP_VALUE_ADJ) |
215 | verbose("(ks=%d,vs=%d)", | 215 | verbose("(ks=%d,vs=%d,id=%u)", |
216 | reg->map_ptr->key_size, | 216 | reg->map_ptr->key_size, |
217 | reg->map_ptr->value_size); | 217 | reg->map_ptr->value_size, |
218 | reg->id); | ||
218 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) | 219 | if (reg->min_value != BPF_REGISTER_MIN_RANGE) |
219 | verbose(",min_value=%lld", | 220 | verbose(",min_value=%lld", |
220 | (long long)reg->min_value); | 221 | (long long)reg->min_value); |
@@ -447,6 +448,7 @@ static void mark_reg_unknown_value(struct bpf_reg_state *regs, u32 regno) | |||
447 | { | 448 | { |
448 | BUG_ON(regno >= MAX_BPF_REG); | 449 | BUG_ON(regno >= MAX_BPF_REG); |
449 | regs[regno].type = UNKNOWN_VALUE; | 450 | regs[regno].type = UNKNOWN_VALUE; |
451 | regs[regno].id = 0; | ||
450 | regs[regno].imm = 0; | 452 | regs[regno].imm = 0; |
451 | } | 453 | } |
452 | 454 | ||
@@ -1252,6 +1254,7 @@ static int check_call(struct bpf_verifier_env *env, int func_id) | |||
1252 | return -EINVAL; | 1254 | return -EINVAL; |
1253 | } | 1255 | } |
1254 | regs[BPF_REG_0].map_ptr = meta.map_ptr; | 1256 | regs[BPF_REG_0].map_ptr = meta.map_ptr; |
1257 | regs[BPF_REG_0].id = ++env->id_gen; | ||
1255 | } else { | 1258 | } else { |
1256 | verbose("unknown return type %d of func %d\n", | 1259 | verbose("unknown return type %d of func %d\n", |
1257 | fn->ret_type, func_id); | 1260 | fn->ret_type, func_id); |
@@ -1668,8 +1671,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
1668 | insn->src_reg); | 1671 | insn->src_reg); |
1669 | return -EACCES; | 1672 | return -EACCES; |
1670 | } | 1673 | } |
1671 | regs[insn->dst_reg].type = UNKNOWN_VALUE; | 1674 | mark_reg_unknown_value(regs, insn->dst_reg); |
1672 | regs[insn->dst_reg].map_ptr = NULL; | ||
1673 | } | 1675 | } |
1674 | } else { | 1676 | } else { |
1675 | /* case: R = imm | 1677 | /* case: R = imm |
@@ -1931,6 +1933,38 @@ static void reg_set_min_max_inv(struct bpf_reg_state *true_reg, | |||
1931 | check_reg_overflow(true_reg); | 1933 | check_reg_overflow(true_reg); |
1932 | } | 1934 | } |
1933 | 1935 | ||
1936 | static void mark_map_reg(struct bpf_reg_state *regs, u32 regno, u32 id, | ||
1937 | enum bpf_reg_type type) | ||
1938 | { | ||
1939 | struct bpf_reg_state *reg = ®s[regno]; | ||
1940 | |||
1941 | if (reg->type == PTR_TO_MAP_VALUE_OR_NULL && reg->id == id) { | ||
1942 | reg->type = type; | ||
1943 | if (type == UNKNOWN_VALUE) | ||
1944 | mark_reg_unknown_value(regs, regno); | ||
1945 | } | ||
1946 | } | ||
1947 | |||
1948 | /* The logic is similar to find_good_pkt_pointers(), both could eventually | ||
1949 | * be folded together at some point. | ||
1950 | */ | ||
1951 | static void mark_map_regs(struct bpf_verifier_state *state, u32 regno, | ||
1952 | enum bpf_reg_type type) | ||
1953 | { | ||
1954 | struct bpf_reg_state *regs = state->regs; | ||
1955 | int i; | ||
1956 | |||
1957 | for (i = 0; i < MAX_BPF_REG; i++) | ||
1958 | mark_map_reg(regs, i, regs[regno].id, type); | ||
1959 | |||
1960 | for (i = 0; i < MAX_BPF_STACK; i += BPF_REG_SIZE) { | ||
1961 | if (state->stack_slot_type[i] != STACK_SPILL) | ||
1962 | continue; | ||
1963 | mark_map_reg(state->spilled_regs, i / BPF_REG_SIZE, | ||
1964 | regs[regno].id, type); | ||
1965 | } | ||
1966 | } | ||
1967 | |||
1934 | static int check_cond_jmp_op(struct bpf_verifier_env *env, | 1968 | static int check_cond_jmp_op(struct bpf_verifier_env *env, |
1935 | struct bpf_insn *insn, int *insn_idx) | 1969 | struct bpf_insn *insn, int *insn_idx) |
1936 | { | 1970 | { |
@@ -2018,18 +2052,13 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env, | |||
2018 | if (BPF_SRC(insn->code) == BPF_K && | 2052 | if (BPF_SRC(insn->code) == BPF_K && |
2019 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && | 2053 | insn->imm == 0 && (opcode == BPF_JEQ || opcode == BPF_JNE) && |
2020 | dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { | 2054 | dst_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { |
2021 | if (opcode == BPF_JEQ) { | 2055 | /* Mark all identical map registers in each branch as either |
2022 | /* next fallthrough insn can access memory via | 2056 | * safe or unknown depending R == 0 or R != 0 conditional. |
2023 | * this register | 2057 | */ |
2024 | */ | 2058 | mark_map_regs(this_branch, insn->dst_reg, |
2025 | regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; | 2059 | opcode == BPF_JEQ ? PTR_TO_MAP_VALUE : UNKNOWN_VALUE); |
2026 | /* branch targer cannot access it, since reg == 0 */ | 2060 | mark_map_regs(other_branch, insn->dst_reg, |
2027 | mark_reg_unknown_value(other_branch->regs, | 2061 | opcode == BPF_JEQ ? UNKNOWN_VALUE : PTR_TO_MAP_VALUE); |
2028 | insn->dst_reg); | ||
2029 | } else { | ||
2030 | other_branch->regs[insn->dst_reg].type = PTR_TO_MAP_VALUE; | ||
2031 | mark_reg_unknown_value(regs, insn->dst_reg); | ||
2032 | } | ||
2033 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && | 2062 | } else if (BPF_SRC(insn->code) == BPF_X && opcode == BPF_JGT && |
2034 | dst_reg->type == PTR_TO_PACKET && | 2063 | dst_reg->type == PTR_TO_PACKET && |
2035 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { | 2064 | regs[insn->src_reg].type == PTR_TO_PACKET_END) { |