aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJann Horn <jannh@google.com>2017-12-18 23:11:55 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2017-12-20 20:15:41 -0500
commit0c17d1d2c61936401f4702e1846e2c19b200f958 (patch)
tree7b91089e5a04b06fb3a6cd3d3939d17f0d5fc0c8
parent95a762e2c8c942780948091f8f2a4f32fce1ac6f (diff)
bpf: fix incorrect tracking of register size truncation
Properly handle register truncation to a smaller size. The old code first mirrors the clearing of the high 32 bits in the bitwise tristate representation, which is correct. But then, it computes the new arithmetic bounds as the intersection between the old arithmetic bounds and the bounds resulting from the bitwise tristate representation. Therefore, when coerce_reg_to_32() is called on a number with bounds [0xffff'fff8, 0x1'0000'0007], the verifier computes [0xffff'fff8, 0xffff'ffff] as bounds of the truncated number. This is incorrect: The truncated number could also be in the range [0, 7], and no meaningful arithmetic bounds can be computed in that case apart from the obvious [0, 0xffff'ffff]. Starting with v4.14, this is exploitable by unprivileged users as long as the unprivileged_bpf_disabled sysctl isn't set. Debian assigned CVE-2017-16996 for this issue. v2: - flip the mask during arithmetic bounds calculation (Ben Hutchings) v3: - add CVE number (Ben Hutchings) Fixes: b03c9f9fdc37 ("bpf/verifier: track signed and unsigned min/max values") Signed-off-by: Jann Horn <jannh@google.com> Acked-by: Edward Cree <ecree@solarflare.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--kernel/bpf/verifier.c44
1 files changed, 27 insertions, 17 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index c086010ae51e..f716bdf29dd0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1067,6 +1067,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1067 strict); 1067 strict);
1068} 1068}
1069 1069
1070/* truncate register to smaller size (in bytes)
1071 * must be called with size < BPF_REG_SIZE
1072 */
1073static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1074{
1075 u64 mask;
1076
1077 /* clear high bits in bit representation */
1078 reg->var_off = tnum_cast(reg->var_off, size);
1079
1080 /* fix arithmetic bounds */
1081 mask = ((u64)1 << (size * 8)) - 1;
1082 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1083 reg->umin_value &= mask;
1084 reg->umax_value &= mask;
1085 } else {
1086 reg->umin_value = 0;
1087 reg->umax_value = mask;
1088 }
1089 reg->smin_value = reg->umin_value;
1090 reg->smax_value = reg->umax_value;
1091}
1092
1070/* check whether memory at (regno + off) is accessible for t = (read | write) 1093/* check whether memory at (regno + off) is accessible for t = (read | write)
1071 * if t==write, value_regno is a register which value is stored into memory 1094 * if t==write, value_regno is a register which value is stored into memory
1072 * if t==read, value_regno is a register which will receive the value from memory 1095 * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1223,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1200 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1223 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1201 regs[value_regno].type == SCALAR_VALUE) { 1224 regs[value_regno].type == SCALAR_VALUE) {
1202 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1225 /* b/h/w load zero-extends, mark upper bits as known 0 */
1203 regs[value_regno].var_off = 1226 coerce_reg_to_size(&regs[value_regno], size);
1204 tnum_cast(regs[value_regno].var_off, size);
1205 __update_reg_bounds(&regs[value_regno]);
1206 } 1227 }
1207 return err; 1228 return err;
1208} 1229}
@@ -1772,14 +1793,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1772 return 0; 1793 return 0;
1773} 1794}
1774 1795
1775static void coerce_reg_to_32(struct bpf_reg_state *reg)
1776{
1777 /* clear high 32 bits */
1778 reg->var_off = tnum_cast(reg->var_off, 4);
1779 /* Update bounds */
1780 __update_reg_bounds(reg);
1781}
1782
1783static bool signed_add_overflows(s64 a, s64 b) 1796static bool signed_add_overflows(s64 a, s64 b)
1784{ 1797{
1785 /* Do the add in u64, where overflow is well-defined */ 1798 /* Do the add in u64, where overflow is well-defined */
@@ -2017,8 +2030,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2017 2030
2018 if (BPF_CLASS(insn->code) != BPF_ALU64) { 2031 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2019 /* 32-bit ALU ops are (32,32)->64 */ 2032 /* 32-bit ALU ops are (32,32)->64 */
2020 coerce_reg_to_32(dst_reg); 2033 coerce_reg_to_size(dst_reg, 4);
2021 coerce_reg_to_32(&src_reg); 2034 coerce_reg_to_size(&src_reg, 4);
2022 } 2035 }
2023 smin_val = src_reg.smin_value; 2036 smin_val = src_reg.smin_value;
2024 smax_val = src_reg.smax_value; 2037 smax_val = src_reg.smax_value;
@@ -2398,10 +2411,7 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2398 return -EACCES; 2411 return -EACCES;
2399 } 2412 }
2400 mark_reg_unknown(env, regs, insn->dst_reg); 2413 mark_reg_unknown(env, regs, insn->dst_reg);
2401 /* high 32 bits are known zero. */ 2414 coerce_reg_to_size(&regs[insn->dst_reg], 4);
2402 regs[insn->dst_reg].var_off = tnum_cast(
2403 regs[insn->dst_reg].var_off, 4);
2404 __update_reg_bounds(&regs[insn->dst_reg]);
2405 } 2415 }
2406 } else { 2416 } else {
2407 /* case: R = imm 2417 /* case: R = imm