aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJann Horn <jannh@google.com>2017-12-18 23:11:56 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2017-12-20 20:15:41 -0500
commit468f6eafa6c44cb2c5d8aad35e12f06c240a812a (patch)
tree99734f161dfc9788aa59500058cd5859ac33fab0
parent0c17d1d2c61936401f4702e1846e2c19b200f958 (diff)
bpf: fix 32-bit ALU op verification
32-bit ALU ops operate on 32-bit values and have 32-bit outputs. Adjust the verifier accordingly. Fixes: f1174f77b50c ("bpf/verifier: rework value tracking") Signed-off-by: Jann Horn <jannh@google.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
-rw-r--r--kernel/bpf/verifier.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index f716bdf29dd0..ecdc265244ca 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2017,6 +2017,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
2017 return 0; 2017 return 0;
2018} 2018}
2019 2019
2020/* WARNING: This function does calculations on 64-bit values, but the actual
2021 * execution may occur on 32-bit values. Therefore, things like bitshifts
2022 * need extra checks in the 32-bit case.
2023 */
2020static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2024static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2021 struct bpf_insn *insn, 2025 struct bpf_insn *insn,
2022 struct bpf_reg_state *dst_reg, 2026 struct bpf_reg_state *dst_reg,
@@ -2027,12 +2031,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2027 bool src_known, dst_known; 2031 bool src_known, dst_known;
2028 s64 smin_val, smax_val; 2032 s64 smin_val, smax_val;
2029 u64 umin_val, umax_val; 2033 u64 umin_val, umax_val;
2034 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2030 2035
2031 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2032 /* 32-bit ALU ops are (32,32)->64 */
2033 coerce_reg_to_size(dst_reg, 4);
2034 coerce_reg_to_size(&src_reg, 4);
2035 }
2036 smin_val = src_reg.smin_value; 2036 smin_val = src_reg.smin_value;
2037 smax_val = src_reg.smax_value; 2037 smax_val = src_reg.smax_value;
2038 umin_val = src_reg.umin_value; 2038 umin_val = src_reg.umin_value;
@@ -2168,9 +2168,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2168 __update_reg_bounds(dst_reg); 2168 __update_reg_bounds(dst_reg);
2169 break; 2169 break;
2170 case BPF_LSH: 2170 case BPF_LSH:
2171 if (umax_val > 63) { 2171 if (umax_val >= insn_bitness) {
2172 /* Shifts greater than 63 are undefined. This includes 2172 /* Shifts greater than 31 or 63 are undefined.
2173 * shifts by a negative number. 2173 * This includes shifts by a negative number.
2174 */ 2174 */
2175 mark_reg_unknown(env, regs, insn->dst_reg); 2175 mark_reg_unknown(env, regs, insn->dst_reg);
2176 break; 2176 break;
@@ -2196,9 +2196,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2196 __update_reg_bounds(dst_reg); 2196 __update_reg_bounds(dst_reg);
2197 break; 2197 break;
2198 case BPF_RSH: 2198 case BPF_RSH:
2199 if (umax_val > 63) { 2199 if (umax_val >= insn_bitness) {
2200 /* Shifts greater than 63 are undefined. This includes 2200 /* Shifts greater than 31 or 63 are undefined.
2201 * shifts by a negative number. 2201 * This includes shifts by a negative number.
2202 */ 2202 */
2203 mark_reg_unknown(env, regs, insn->dst_reg); 2203 mark_reg_unknown(env, regs, insn->dst_reg);
2204 break; 2204 break;
@@ -2234,6 +2234,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2234 break; 2234 break;
2235 } 2235 }
2236 2236
2237 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2238 /* 32-bit ALU ops are (32,32)->32 */
2239 coerce_reg_to_size(dst_reg, 4);
2240 coerce_reg_to_size(&src_reg, 4);
2241 }
2242
2237 __reg_deduce_bounds(dst_reg); 2243 __reg_deduce_bounds(dst_reg);
2238 __reg_bound_offset(dst_reg); 2244 __reg_bound_offset(dst_reg);
2239 return 0; 2245 return 0;