aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c319
1 files changed, 202 insertions, 117 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d4593571c404..b414d6b2d470 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1059 break; 1059 break;
1060 case PTR_TO_STACK: 1060 case PTR_TO_STACK:
1061 pointer_desc = "stack "; 1061 pointer_desc = "stack ";
1062 /* The stack spill tracking logic in check_stack_write()
1063 * and check_stack_read() relies on stack accesses being
1064 * aligned.
1065 */
1066 strict = true;
1062 break; 1067 break;
1063 default: 1068 default:
1064 break; 1069 break;
@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1067 strict); 1072 strict);
1068} 1073}
1069 1074
1075/* truncate register to smaller size (in bytes)
1076 * must be called with size < BPF_REG_SIZE
1077 */
1078static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1079{
1080 u64 mask;
1081
1082 /* clear high bits in bit representation */
1083 reg->var_off = tnum_cast(reg->var_off, size);
1084
1085 /* fix arithmetic bounds */
1086 mask = ((u64)1 << (size * 8)) - 1;
1087 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1088 reg->umin_value &= mask;
1089 reg->umax_value &= mask;
1090 } else {
1091 reg->umin_value = 0;
1092 reg->umax_value = mask;
1093 }
1094 reg->smin_value = reg->umin_value;
1095 reg->smax_value = reg->umax_value;
1096}
1097
1070/* check whether memory at (regno + off) is accessible for t = (read | write) 1098/* check whether memory at (regno + off) is accessible for t = (read | write)
1071 * if t==write, value_regno is a register which value is stored into memory 1099 * if t==write, value_regno is a register which value is stored into memory
1072 * if t==read, value_regno is a register which will receive the value from memory 1100 * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1200 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1228 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1201 regs[value_regno].type == SCALAR_VALUE) { 1229 regs[value_regno].type == SCALAR_VALUE) {
1202 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1230 /* b/h/w load zero-extends, mark upper bits as known 0 */
1203 regs[value_regno].var_off = 1231 coerce_reg_to_size(&regs[value_regno], size);
1204 tnum_cast(regs[value_regno].var_off, size);
1205 __update_reg_bounds(&regs[value_regno]);
1206 } 1232 }
1207 return err; 1233 return err;
1208} 1234}
@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1282 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); 1308 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1283 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1309 verbose(env, "invalid variable stack read R%d var_off=%s\n",
1284 regno, tn_buf); 1310 regno, tn_buf);
1311 return -EACCES;
1285 } 1312 }
1286 off = regs[regno].off + regs[regno].var_off.value; 1313 off = regs[regno].off + regs[regno].var_off.value;
1287 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1314 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1674,7 +1701,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1674 return -EINVAL; 1701 return -EINVAL;
1675 } 1702 }
1676 1703
1704 /* With LD_ABS/IND some JITs save/restore skb from r1. */
1677 changes_data = bpf_helper_changes_pkt_data(fn->func); 1705 changes_data = bpf_helper_changes_pkt_data(fn->func);
1706 if (changes_data && fn->arg1_type != ARG_PTR_TO_CTX) {
1707 verbose(env, "kernel subsystem misconfigured func %s#%d: r1 != ctx\n",
1708 func_id_name(func_id), func_id);
1709 return -EINVAL;
1710 }
1678 1711
1679 memset(&meta, 0, sizeof(meta)); 1712 memset(&meta, 0, sizeof(meta));
1680 meta.pkt_access = fn->pkt_access; 1713 meta.pkt_access = fn->pkt_access;
@@ -1696,6 +1729,13 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1696 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta); 1729 err = check_func_arg(env, BPF_REG_2, fn->arg2_type, &meta);
1697 if (err) 1730 if (err)
1698 return err; 1731 return err;
1732 if (func_id == BPF_FUNC_tail_call) {
1733 if (meta.map_ptr == NULL) {
1734 verbose(env, "verifier bug\n");
1735 return -EINVAL;
1736 }
1737 env->insn_aux_data[insn_idx].map_ptr = meta.map_ptr;
1738 }
1699 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta); 1739 err = check_func_arg(env, BPF_REG_3, fn->arg3_type, &meta);
1700 if (err) 1740 if (err)
1701 return err; 1741 return err;
@@ -1766,14 +1806,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1766 return 0; 1806 return 0;
1767} 1807}
1768 1808
1769static void coerce_reg_to_32(struct bpf_reg_state *reg)
1770{
1771 /* clear high 32 bits */
1772 reg->var_off = tnum_cast(reg->var_off, 4);
1773 /* Update bounds */
1774 __update_reg_bounds(reg);
1775}
1776
1777static bool signed_add_overflows(s64 a, s64 b) 1809static bool signed_add_overflows(s64 a, s64 b)
1778{ 1810{
1779 /* Do the add in u64, where overflow is well-defined */ 1811 /* Do the add in u64, where overflow is well-defined */
@@ -1794,6 +1826,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
1794 return res > a; 1826 return res > a;
1795} 1827}
1796 1828
1829static bool check_reg_sane_offset(struct bpf_verifier_env *env,
1830 const struct bpf_reg_state *reg,
1831 enum bpf_reg_type type)
1832{
1833 bool known = tnum_is_const(reg->var_off);
1834 s64 val = reg->var_off.value;
1835 s64 smin = reg->smin_value;
1836
1837 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
1838 verbose(env, "math between %s pointer and %lld is not allowed\n",
1839 reg_type_str[type], val);
1840 return false;
1841 }
1842
1843 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
1844 verbose(env, "%s pointer offset %d is not allowed\n",
1845 reg_type_str[type], reg->off);
1846 return false;
1847 }
1848
1849 if (smin == S64_MIN) {
1850 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
1851 reg_type_str[type]);
1852 return false;
1853 }
1854
1855 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
1856 verbose(env, "value %lld makes %s pointer be out of bounds\n",
1857 smin, reg_type_str[type]);
1858 return false;
1859 }
1860
1861 return true;
1862}
1863
1797/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 1864/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1798 * Caller should also handle BPF_MOV case separately. 1865 * Caller should also handle BPF_MOV case separately.
1799 * If we return -EACCES, caller may want to try again treating pointer as a 1866 * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1830,29 +1897,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1830 1897
1831 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1898 if (BPF_CLASS(insn->code) != BPF_ALU64) {
1832 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 1899 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
1833 if (!env->allow_ptr_leaks) 1900 verbose(env,
1834 verbose(env, 1901 "R%d 32-bit pointer arithmetic prohibited\n",
1835 "R%d 32-bit pointer arithmetic prohibited\n", 1902 dst);
1836 dst);
1837 return -EACCES; 1903 return -EACCES;
1838 } 1904 }
1839 1905
1840 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1906 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1841 if (!env->allow_ptr_leaks) 1907 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1842 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 1908 dst);
1843 dst);
1844 return -EACCES; 1909 return -EACCES;
1845 } 1910 }
1846 if (ptr_reg->type == CONST_PTR_TO_MAP) { 1911 if (ptr_reg->type == CONST_PTR_TO_MAP) {
1847 if (!env->allow_ptr_leaks) 1912 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1848 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 1913 dst);
1849 dst);
1850 return -EACCES; 1914 return -EACCES;
1851 } 1915 }
1852 if (ptr_reg->type == PTR_TO_PACKET_END) { 1916 if (ptr_reg->type == PTR_TO_PACKET_END) {
1853 if (!env->allow_ptr_leaks) 1917 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1854 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 1918 dst);
1855 dst);
1856 return -EACCES; 1919 return -EACCES;
1857 } 1920 }
1858 1921
@@ -1862,6 +1925,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1862 dst_reg->type = ptr_reg->type; 1925 dst_reg->type = ptr_reg->type;
1863 dst_reg->id = ptr_reg->id; 1926 dst_reg->id = ptr_reg->id;
1864 1927
1928 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
1929 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
1930 return -EINVAL;
1931
1865 switch (opcode) { 1932 switch (opcode) {
1866 case BPF_ADD: 1933 case BPF_ADD:
1867 /* We can take a fixed offset as long as it doesn't overflow 1934 /* We can take a fixed offset as long as it doesn't overflow
@@ -1915,9 +1982,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1915 case BPF_SUB: 1982 case BPF_SUB:
1916 if (dst_reg == off_reg) { 1983 if (dst_reg == off_reg) {
1917 /* scalar -= pointer. Creates an unknown scalar */ 1984 /* scalar -= pointer. Creates an unknown scalar */
1918 if (!env->allow_ptr_leaks) 1985 verbose(env, "R%d tried to subtract pointer from scalar\n",
1919 verbose(env, "R%d tried to subtract pointer from scalar\n", 1986 dst);
1920 dst);
1921 return -EACCES; 1987 return -EACCES;
1922 } 1988 }
1923 /* We don't allow subtraction from FP, because (according to 1989 /* We don't allow subtraction from FP, because (according to
@@ -1925,9 +1991,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1925 * be able to deal with it. 1991 * be able to deal with it.
1926 */ 1992 */
1927 if (ptr_reg->type == PTR_TO_STACK) { 1993 if (ptr_reg->type == PTR_TO_STACK) {
1928 if (!env->allow_ptr_leaks) 1994 verbose(env, "R%d subtraction from stack pointer prohibited\n",
1929 verbose(env, "R%d subtraction from stack pointer prohibited\n", 1995 dst);
1930 dst);
1931 return -EACCES; 1996 return -EACCES;
1932 } 1997 }
1933 if (known && (ptr_reg->off - smin_val == 1998 if (known && (ptr_reg->off - smin_val ==
@@ -1976,28 +2041,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1976 case BPF_AND: 2041 case BPF_AND:
1977 case BPF_OR: 2042 case BPF_OR:
1978 case BPF_XOR: 2043 case BPF_XOR:
1979 /* bitwise ops on pointers are troublesome, prohibit for now. 2044 /* bitwise ops on pointers are troublesome, prohibit. */
1980 * (However, in principle we could allow some cases, e.g. 2045 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1981 * ptr &= ~3 which would reduce min_value by 3.) 2046 dst, bpf_alu_string[opcode >> 4]);
1982 */
1983 if (!env->allow_ptr_leaks)
1984 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1985 dst, bpf_alu_string[opcode >> 4]);
1986 return -EACCES; 2047 return -EACCES;
1987 default: 2048 default:
1988 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 2049 /* other operators (e.g. MUL,LSH) produce non-pointer results */
1989 if (!env->allow_ptr_leaks) 2050 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
1990 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 2051 dst, bpf_alu_string[opcode >> 4]);
1991 dst, bpf_alu_string[opcode >> 4]);
1992 return -EACCES; 2052 return -EACCES;
1993 } 2053 }
1994 2054
2055 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2056 return -EINVAL;
2057
1995 __update_reg_bounds(dst_reg); 2058 __update_reg_bounds(dst_reg);
1996 __reg_deduce_bounds(dst_reg); 2059 __reg_deduce_bounds(dst_reg);
1997 __reg_bound_offset(dst_reg); 2060 __reg_bound_offset(dst_reg);
1998 return 0; 2061 return 0;
1999} 2062}
2000 2063
2064/* WARNING: This function does calculations on 64-bit values, but the actual
2065 * execution may occur on 32-bit values. Therefore, things like bitshifts
2066 * need extra checks in the 32-bit case.
2067 */
2001static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2068static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2002 struct bpf_insn *insn, 2069 struct bpf_insn *insn,
2003 struct bpf_reg_state *dst_reg, 2070 struct bpf_reg_state *dst_reg,
@@ -2008,12 +2075,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2008 bool src_known, dst_known; 2075 bool src_known, dst_known;
2009 s64 smin_val, smax_val; 2076 s64 smin_val, smax_val;
2010 u64 umin_val, umax_val; 2077 u64 umin_val, umax_val;
2078 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2011 2079
2012 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2013 /* 32-bit ALU ops are (32,32)->64 */
2014 coerce_reg_to_32(dst_reg);
2015 coerce_reg_to_32(&src_reg);
2016 }
2017 smin_val = src_reg.smin_value; 2080 smin_val = src_reg.smin_value;
2018 smax_val = src_reg.smax_value; 2081 smax_val = src_reg.smax_value;
2019 umin_val = src_reg.umin_value; 2082 umin_val = src_reg.umin_value;
@@ -2021,6 +2084,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2021 src_known = tnum_is_const(src_reg.var_off); 2084 src_known = tnum_is_const(src_reg.var_off);
2022 dst_known = tnum_is_const(dst_reg->var_off); 2085 dst_known = tnum_is_const(dst_reg->var_off);
2023 2086
2087 if (!src_known &&
2088 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2089 __mark_reg_unknown(dst_reg);
2090 return 0;
2091 }
2092
2024 switch (opcode) { 2093 switch (opcode) {
2025 case BPF_ADD: 2094 case BPF_ADD:
2026 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2095 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2149,9 +2218,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2149 __update_reg_bounds(dst_reg); 2218 __update_reg_bounds(dst_reg);
2150 break; 2219 break;
2151 case BPF_LSH: 2220 case BPF_LSH:
2152 if (umax_val > 63) { 2221 if (umax_val >= insn_bitness) {
2153 /* Shifts greater than 63 are undefined. This includes 2222 /* Shifts greater than 31 or 63 are undefined.
2154 * shifts by a negative number. 2223 * This includes shifts by a negative number.
2155 */ 2224 */
2156 mark_reg_unknown(env, regs, insn->dst_reg); 2225 mark_reg_unknown(env, regs, insn->dst_reg);
2157 break; 2226 break;
@@ -2177,27 +2246,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2177 __update_reg_bounds(dst_reg); 2246 __update_reg_bounds(dst_reg);
2178 break; 2247 break;
2179 case BPF_RSH: 2248 case BPF_RSH:
2180 if (umax_val > 63) { 2249 if (umax_val >= insn_bitness) {
2181 /* Shifts greater than 63 are undefined. This includes 2250 /* Shifts greater than 31 or 63 are undefined.
2182 * shifts by a negative number. 2251 * This includes shifts by a negative number.
2183 */ 2252 */
2184 mark_reg_unknown(env, regs, insn->dst_reg); 2253 mark_reg_unknown(env, regs, insn->dst_reg);
2185 break; 2254 break;
2186 } 2255 }
2187 /* BPF_RSH is an unsigned shift, so make the appropriate casts */ 2256 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
2188 if (dst_reg->smin_value < 0) { 2257 * be negative, then either:
2189 if (umin_val) { 2258 * 1) src_reg might be zero, so the sign bit of the result is
2190 /* Sign bit will be cleared */ 2259 * unknown, so we lose our signed bounds
2191 dst_reg->smin_value = 0; 2260 * 2) it's known negative, thus the unsigned bounds capture the
2192 } else { 2261 * signed bounds
2193 /* Lost sign bit information */ 2262 * 3) the signed bounds cross zero, so they tell us nothing
2194 dst_reg->smin_value = S64_MIN; 2263 * about the result
2195 dst_reg->smax_value = S64_MAX; 2264 * If the value in dst_reg is known nonnegative, then again the
2196 } 2265 * unsigned bounts capture the signed bounds.
2197 } else { 2266 * Thus, in all cases it suffices to blow away our signed bounds
2198 dst_reg->smin_value = 2267 * and rely on inferring new ones from the unsigned bounds and
2199 (u64)(dst_reg->smin_value) >> umax_val; 2268 * var_off of the result.
2200 } 2269 */
2270 dst_reg->smin_value = S64_MIN;
2271 dst_reg->smax_value = S64_MAX;
2201 if (src_known) 2272 if (src_known)
2202 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2273 dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2203 umin_val); 2274 umin_val);
@@ -2213,6 +2284,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2213 break; 2284 break;
2214 } 2285 }
2215 2286
2287 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2288 /* 32-bit ALU ops are (32,32)->32 */
2289 coerce_reg_to_size(dst_reg, 4);
2290 coerce_reg_to_size(&src_reg, 4);
2291 }
2292
2216 __reg_deduce_bounds(dst_reg); 2293 __reg_deduce_bounds(dst_reg);
2217 __reg_bound_offset(dst_reg); 2294 __reg_bound_offset(dst_reg);
2218 return 0; 2295 return 0;
@@ -2227,7 +2304,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2227 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; 2304 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2228 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2305 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2229 u8 opcode = BPF_OP(insn->code); 2306 u8 opcode = BPF_OP(insn->code);
2230 int rc;
2231 2307
2232 dst_reg = &regs[insn->dst_reg]; 2308 dst_reg = &regs[insn->dst_reg];
2233 src_reg = NULL; 2309 src_reg = NULL;
@@ -2238,43 +2314,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2238 if (src_reg->type != SCALAR_VALUE) { 2314 if (src_reg->type != SCALAR_VALUE) {
2239 if (dst_reg->type != SCALAR_VALUE) { 2315 if (dst_reg->type != SCALAR_VALUE) {
2240 /* Combining two pointers by any ALU op yields 2316 /* Combining two pointers by any ALU op yields
2241 * an arbitrary scalar. 2317 * an arbitrary scalar. Disallow all math except
2318 * pointer subtraction
2242 */ 2319 */
2243 if (!env->allow_ptr_leaks) { 2320 if (opcode == BPF_SUB){
2244 verbose(env, "R%d pointer %s pointer prohibited\n", 2321 mark_reg_unknown(env, regs, insn->dst_reg);
2245 insn->dst_reg, 2322 return 0;
2246 bpf_alu_string[opcode >> 4]);
2247 return -EACCES;
2248 } 2323 }
2249 mark_reg_unknown(env, regs, insn->dst_reg); 2324 verbose(env, "R%d pointer %s pointer prohibited\n",
2250 return 0; 2325 insn->dst_reg,
2326 bpf_alu_string[opcode >> 4]);
2327 return -EACCES;
2251 } else { 2328 } else {
2252 /* scalar += pointer 2329 /* scalar += pointer
2253 * This is legal, but we have to reverse our 2330 * This is legal, but we have to reverse our
2254 * src/dest handling in computing the range 2331 * src/dest handling in computing the range
2255 */ 2332 */
2256 rc = adjust_ptr_min_max_vals(env, insn, 2333 return adjust_ptr_min_max_vals(env, insn,
2257 src_reg, dst_reg); 2334 src_reg, dst_reg);
2258 if (rc == -EACCES && env->allow_ptr_leaks) {
2259 /* scalar += unknown scalar */
2260 __mark_reg_unknown(&off_reg);
2261 return adjust_scalar_min_max_vals(
2262 env, insn,
2263 dst_reg, off_reg);
2264 }
2265 return rc;
2266 } 2335 }
2267 } else if (ptr_reg) { 2336 } else if (ptr_reg) {
2268 /* pointer += scalar */ 2337 /* pointer += scalar */
2269 rc = adjust_ptr_min_max_vals(env, insn, 2338 return adjust_ptr_min_max_vals(env, insn,
2270 dst_reg, src_reg); 2339 dst_reg, src_reg);
2271 if (rc == -EACCES && env->allow_ptr_leaks) {
2272 /* unknown scalar += scalar */
2273 __mark_reg_unknown(dst_reg);
2274 return adjust_scalar_min_max_vals(
2275 env, insn, dst_reg, *src_reg);
2276 }
2277 return rc;
2278 } 2340 }
2279 } else { 2341 } else {
2280 /* Pretend the src is a reg with a known value, since we only 2342 /* Pretend the src is a reg with a known value, since we only
@@ -2283,17 +2345,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2283 off_reg.type = SCALAR_VALUE; 2345 off_reg.type = SCALAR_VALUE;
2284 __mark_reg_known(&off_reg, insn->imm); 2346 __mark_reg_known(&off_reg, insn->imm);
2285 src_reg = &off_reg; 2347 src_reg = &off_reg;
2286 if (ptr_reg) { /* pointer += K */ 2348 if (ptr_reg) /* pointer += K */
2287 rc = adjust_ptr_min_max_vals(env, insn, 2349 return adjust_ptr_min_max_vals(env, insn,
2288 ptr_reg, src_reg); 2350 ptr_reg, src_reg);
2289 if (rc == -EACCES && env->allow_ptr_leaks) {
2290 /* unknown scalar += K */
2291 __mark_reg_unknown(dst_reg);
2292 return adjust_scalar_min_max_vals(
2293 env, insn, dst_reg, off_reg);
2294 }
2295 return rc;
2296 }
2297 } 2351 }
2298 2352
2299 /* Got here implies adding two SCALAR_VALUEs */ 2353 /* Got here implies adding two SCALAR_VALUEs */
@@ -2390,17 +2444,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2390 return -EACCES; 2444 return -EACCES;
2391 } 2445 }
2392 mark_reg_unknown(env, regs, insn->dst_reg); 2446 mark_reg_unknown(env, regs, insn->dst_reg);
2393 /* high 32 bits are known zero. */ 2447 coerce_reg_to_size(&regs[insn->dst_reg], 4);
2394 regs[insn->dst_reg].var_off = tnum_cast(
2395 regs[insn->dst_reg].var_off, 4);
2396 __update_reg_bounds(&regs[insn->dst_reg]);
2397 } 2448 }
2398 } else { 2449 } else {
2399 /* case: R = imm 2450 /* case: R = imm
2400 * remember the value we stored into this reg 2451 * remember the value we stored into this reg
2401 */ 2452 */
2402 regs[insn->dst_reg].type = SCALAR_VALUE; 2453 regs[insn->dst_reg].type = SCALAR_VALUE;
2403 __mark_reg_known(regs + insn->dst_reg, insn->imm); 2454 if (BPF_CLASS(insn->code) == BPF_ALU64) {
2455 __mark_reg_known(regs + insn->dst_reg,
2456 insn->imm);
2457 } else {
2458 __mark_reg_known(regs + insn->dst_reg,
2459 (u32)insn->imm);
2460 }
2404 } 2461 }
2405 2462
2406 } else if (opcode > BPF_END) { 2463 } else if (opcode > BPF_END) {
@@ -3431,15 +3488,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3431 return range_within(rold, rcur) && 3488 return range_within(rold, rcur) &&
3432 tnum_in(rold->var_off, rcur->var_off); 3489 tnum_in(rold->var_off, rcur->var_off);
3433 } else { 3490 } else {
3434 /* if we knew anything about the old value, we're not 3491 /* We're trying to use a pointer in place of a scalar.
3435 * equal, because we can't know anything about the 3492 * Even if the scalar was unbounded, this could lead to
3436 * scalar value of the pointer in the new value. 3493 * pointer leaks because scalars are allowed to leak
3494 * while pointers are not. We could make this safe in
3495 * special cases if root is calling us, but it's
3496 * probably not worth the hassle.
3437 */ 3497 */
3438 return rold->umin_value == 0 && 3498 return false;
3439 rold->umax_value == U64_MAX &&
3440 rold->smin_value == S64_MIN &&
3441 rold->smax_value == S64_MAX &&
3442 tnum_is_unknown(rold->var_off);
3443 } 3499 }
3444 case PTR_TO_MAP_VALUE: 3500 case PTR_TO_MAP_VALUE:
3445 /* If the new min/max/var_off satisfy the old ones and 3501 /* If the new min/max/var_off satisfy the old ones and
@@ -4407,6 +4463,35 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
4407 */ 4463 */
4408 insn->imm = 0; 4464 insn->imm = 0;
4409 insn->code = BPF_JMP | BPF_TAIL_CALL; 4465 insn->code = BPF_JMP | BPF_TAIL_CALL;
4466
4467 /* instead of changing every JIT dealing with tail_call
4468 * emit two extra insns:
4469 * if (index >= max_entries) goto out;
4470 * index &= array->index_mask;
4471 * to avoid out-of-bounds cpu speculation
4472 */
4473 map_ptr = env->insn_aux_data[i + delta].map_ptr;
4474 if (map_ptr == BPF_MAP_PTR_POISON) {
4475 verbose(env, "tail_call obusing map_ptr\n");
4476 return -EINVAL;
4477 }
4478 if (!map_ptr->unpriv_array)
4479 continue;
4480 insn_buf[0] = BPF_JMP_IMM(BPF_JGE, BPF_REG_3,
4481 map_ptr->max_entries, 2);
4482 insn_buf[1] = BPF_ALU32_IMM(BPF_AND, BPF_REG_3,
4483 container_of(map_ptr,
4484 struct bpf_array,
4485 map)->index_mask);
4486 insn_buf[2] = *insn;
4487 cnt = 3;
4488 new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
4489 if (!new_prog)
4490 return -ENOMEM;
4491
4492 delta += cnt - 1;
4493 env->prog = prog = new_prog;
4494 insn = new_prog->insnsi + i + delta;
4410 continue; 4495 continue;
4411 } 4496 }
4412 4497