aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2017-11-22 19:42:05 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2017-11-23 04:56:35 -0500
commitc131187db2d3fa2f8bf32fdf4e9a4ef805168467 (patch)
tree27ca3840001ee21e75f21cd8ecfe3de4568a359f
parent107af8ec117b0af6e90ddfe42e568b2efd8f6ff7 (diff)
bpf: fix branch pruning logic
when the verifier detects that register contains a runtime constant and it's compared with another constant it will prune exploration of the branch that is guaranteed not to be taken at runtime. This is all correct, but malicious program may be constructed in such a way that it always has a constant comparison and the other branch is never taken under any conditions. In this case such path through the program will not be explored by the verifier. It won't be taken at run-time either, but since all instructions are JITed the malicious program may cause JITs to complain about using reserved fields, etc. To fix the issue we have to track the instructions explored by the verifier and sanitize instructions that are dead at run time with NOPs. We cannot reject such dead code, since llvm generates it for valid C code, since it doesn't do as much data flow analysis as the verifier does. Fixes: 17a5267067f3 ("bpf: verifier (add verifier core)") Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--include/linux/bpf_verifier.h2
-rw-r--r--kernel/bpf/verifier.c27
2 files changed, 28 insertions, 1 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b61482d354a2..c561b986bab0 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -115,7 +115,7 @@ struct bpf_insn_aux_data {
115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 115 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
116 }; 116 };
117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 117 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
118 int converted_op_size; /* the valid value width after perceived conversion */ 118 bool seen; /* this insn was processed by the verifier */
119}; 119};
120 120
121#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ 121#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 308b0638ec5d..d4593571c404 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3827,6 +3827,7 @@ static int do_check(struct bpf_verifier_env *env)
3827 return err; 3827 return err;
3828 3828
3829 regs = cur_regs(env); 3829 regs = cur_regs(env);
3830 env->insn_aux_data[insn_idx].seen = true;
3830 if (class == BPF_ALU || class == BPF_ALU64) { 3831 if (class == BPF_ALU || class == BPF_ALU64) {
3831 err = check_alu_op(env, insn); 3832 err = check_alu_op(env, insn);
3832 if (err) 3833 if (err)
@@ -4022,6 +4023,7 @@ process_bpf_exit:
4022 return err; 4023 return err;
4023 4024
4024 insn_idx++; 4025 insn_idx++;
4026 env->insn_aux_data[insn_idx].seen = true;
4025 } else { 4027 } else {
4026 verbose(env, "invalid BPF_LD mode\n"); 4028 verbose(env, "invalid BPF_LD mode\n");
4027 return -EINVAL; 4029 return -EINVAL;
@@ -4204,6 +4206,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4204 u32 off, u32 cnt) 4206 u32 off, u32 cnt)
4205{ 4207{
4206 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data; 4208 struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
4209 int i;
4207 4210
4208 if (cnt == 1) 4211 if (cnt == 1)
4209 return 0; 4212 return 0;
@@ -4213,6 +4216,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env, u32 prog_len,
4213 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off); 4216 memcpy(new_data, old_data, sizeof(struct bpf_insn_aux_data) * off);
4214 memcpy(new_data + off + cnt - 1, old_data + off, 4217 memcpy(new_data + off + cnt - 1, old_data + off,
4215 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1)); 4218 sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
4219 for (i = off; i < off + cnt - 1; i++)
4220 new_data[i].seen = true;
4216 env->insn_aux_data = new_data; 4221 env->insn_aux_data = new_data;
4217 vfree(old_data); 4222 vfree(old_data);
4218 return 0; 4223 return 0;
@@ -4231,6 +4236,25 @@ static struct bpf_prog *bpf_patch_insn_data(struct bpf_verifier_env *env, u32 of
4231 return new_prog; 4236 return new_prog;
4232} 4237}
4233 4238
4239/* The verifier does more data flow analysis than llvm and will not explore
4240 * branches that are dead at run time. Malicious programs can have dead code
4241 * too. Therefore replace all dead at-run-time code with nops.
4242 */
4243static void sanitize_dead_code(struct bpf_verifier_env *env)
4244{
4245 struct bpf_insn_aux_data *aux_data = env->insn_aux_data;
4246 struct bpf_insn nop = BPF_MOV64_REG(BPF_REG_0, BPF_REG_0);
4247 struct bpf_insn *insn = env->prog->insnsi;
4248 const int insn_cnt = env->prog->len;
4249 int i;
4250
4251 for (i = 0; i < insn_cnt; i++) {
4252 if (aux_data[i].seen)
4253 continue;
4254 memcpy(insn + i, &nop, sizeof(nop));
4255 }
4256}
4257
4234/* convert load instructions that access fields of 'struct __sk_buff' 4258/* convert load instructions that access fields of 'struct __sk_buff'
4235 * into sequence of instructions that access fields of 'struct sk_buff' 4259 * into sequence of instructions that access fields of 'struct sk_buff'
4236 */ 4260 */
@@ -4558,6 +4582,9 @@ skip_full_check:
4558 free_states(env); 4582 free_states(env);
4559 4583
4560 if (ret == 0) 4584 if (ret == 0)
4585 sanitize_dead_code(env);
4586
4587 if (ret == 0)
4561 /* program is valid, convert *(u32*)(ctx + off) accesses */ 4588 /* program is valid, convert *(u32*)(ctx + off) accesses */
4562 ret = convert_ctx_accesses(env); 4589 ret = convert_ctx_accesses(env);
4563 4590