aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-20 23:10:29 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-20 23:10:29 -0500
commit8b6ca2bf5a9b25369d7ec821ca051246558fbee0 (patch)
treec7a9ca626644ea705d1ee28c227bef159b0003b7
parentb4681c2829e24943aadd1a7bb3a30d41d0a20050 (diff)
parent82abbf8d2fc46d79611ab58daa7c608df14bb3ee (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf
Daniel Borkmann says: ==================== pull-request: bpf 2017-12-21 The following pull-request contains BPF updates for your *net* tree. The main changes are: 1) Fix multiple security issues in the BPF verifier mostly related to the value and min/max bounds tracking rework in 4.14. Issues range from incorrect bounds calculation in some BPF_RSH cases, to improper sign extension and reg size handling on 32 bit ALU ops, missing strict alignment checks on stack pointers, and several others that got fixed, from Jann, Alexei and Edward. 2) Fix various build failures in BPF selftests on sparc64. More specifically, librt needed to be added to the libs to link against and few format string fixups for sizeof, from David. 3) Fix one last remaining issue from BPF selftest build that was still occuring on s390x from the asm/bpf_perf_event.h include which could not find the asm/ptrace.h copy, from Hendrik. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf_verifier.h4
-rw-r--r--kernel/bpf/verifier.c277
-rw-r--r--tools/arch/s390/include/uapi/asm/bpf_perf_event.h2
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/test_progs.c8
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c605
6 files changed, 730 insertions, 168 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c561b986bab0..1632bb13ad8a 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -15,11 +15,11 @@
15 * In practice this is far bigger than any realistic pointer offset; this limit 15 * In practice this is far bigger than any realistic pointer offset; this limit
16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64. 16 * ensures that umax_value + (int)off + (int)size cannot overflow a u64.
17 */ 17 */
18#define BPF_MAX_VAR_OFF (1ULL << 31) 18#define BPF_MAX_VAR_OFF (1 << 29)
19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures 19/* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures
20 * that converting umax_value to int cannot overflow. 20 * that converting umax_value to int cannot overflow.
21 */ 21 */
22#define BPF_MAX_VAR_SIZ INT_MAX 22#define BPF_MAX_VAR_SIZ (1 << 29)
23 23
24/* Liveness marks, used for registers and spilled-regs (in stack slots). 24/* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that 25 * Read marks propagate upwards until they find a write mark; they record that
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e39b01317b6f..04b24876cd23 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1059,6 +1059,11 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1059 break; 1059 break;
1060 case PTR_TO_STACK: 1060 case PTR_TO_STACK:
1061 pointer_desc = "stack "; 1061 pointer_desc = "stack ";
1062 /* The stack spill tracking logic in check_stack_write()
1063 * and check_stack_read() relies on stack accesses being
1064 * aligned.
1065 */
1066 strict = true;
1062 break; 1067 break;
1063 default: 1068 default:
1064 break; 1069 break;
@@ -1067,6 +1072,29 @@ static int check_ptr_alignment(struct bpf_verifier_env *env,
1067 strict); 1072 strict);
1068} 1073}
1069 1074
1075/* truncate register to smaller size (in bytes)
1076 * must be called with size < BPF_REG_SIZE
1077 */
1078static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
1079{
1080 u64 mask;
1081
1082 /* clear high bits in bit representation */
1083 reg->var_off = tnum_cast(reg->var_off, size);
1084
1085 /* fix arithmetic bounds */
1086 mask = ((u64)1 << (size * 8)) - 1;
1087 if ((reg->umin_value & ~mask) == (reg->umax_value & ~mask)) {
1088 reg->umin_value &= mask;
1089 reg->umax_value &= mask;
1090 } else {
1091 reg->umin_value = 0;
1092 reg->umax_value = mask;
1093 }
1094 reg->smin_value = reg->umin_value;
1095 reg->smax_value = reg->umax_value;
1096}
1097
1070/* check whether memory at (regno + off) is accessible for t = (read | write) 1098/* check whether memory at (regno + off) is accessible for t = (read | write)
1071 * if t==write, value_regno is a register which value is stored into memory 1099 * if t==write, value_regno is a register which value is stored into memory
1072 * if t==read, value_regno is a register which will receive the value from memory 1100 * if t==read, value_regno is a register which will receive the value from memory
@@ -1200,9 +1228,7 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
1200 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ && 1228 if (!err && size < BPF_REG_SIZE && value_regno >= 0 && t == BPF_READ &&
1201 regs[value_regno].type == SCALAR_VALUE) { 1229 regs[value_regno].type == SCALAR_VALUE) {
1202 /* b/h/w load zero-extends, mark upper bits as known 0 */ 1230 /* b/h/w load zero-extends, mark upper bits as known 0 */
1203 regs[value_regno].var_off = 1231 coerce_reg_to_size(&regs[value_regno], size);
1204 tnum_cast(regs[value_regno].var_off, size);
1205 __update_reg_bounds(&regs[value_regno]);
1206 } 1232 }
1207 return err; 1233 return err;
1208} 1234}
@@ -1282,6 +1308,7 @@ static int check_stack_boundary(struct bpf_verifier_env *env, int regno,
1282 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off); 1308 tnum_strn(tn_buf, sizeof(tn_buf), regs[regno].var_off);
1283 verbose(env, "invalid variable stack read R%d var_off=%s\n", 1309 verbose(env, "invalid variable stack read R%d var_off=%s\n",
1284 regno, tn_buf); 1310 regno, tn_buf);
1311 return -EACCES;
1285 } 1312 }
1286 off = regs[regno].off + regs[regno].var_off.value; 1313 off = regs[regno].off + regs[regno].var_off.value;
1287 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 || 1314 if (off >= 0 || off < -MAX_BPF_STACK || off + access_size > 0 ||
@@ -1772,14 +1799,6 @@ static int check_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
1772 return 0; 1799 return 0;
1773} 1800}
1774 1801
1775static void coerce_reg_to_32(struct bpf_reg_state *reg)
1776{
1777 /* clear high 32 bits */
1778 reg->var_off = tnum_cast(reg->var_off, 4);
1779 /* Update bounds */
1780 __update_reg_bounds(reg);
1781}
1782
1783static bool signed_add_overflows(s64 a, s64 b) 1802static bool signed_add_overflows(s64 a, s64 b)
1784{ 1803{
1785 /* Do the add in u64, where overflow is well-defined */ 1804 /* Do the add in u64, where overflow is well-defined */
@@ -1800,6 +1819,41 @@ static bool signed_sub_overflows(s64 a, s64 b)
1800 return res > a; 1819 return res > a;
1801} 1820}
1802 1821
1822static bool check_reg_sane_offset(struct bpf_verifier_env *env,
1823 const struct bpf_reg_state *reg,
1824 enum bpf_reg_type type)
1825{
1826 bool known = tnum_is_const(reg->var_off);
1827 s64 val = reg->var_off.value;
1828 s64 smin = reg->smin_value;
1829
1830 if (known && (val >= BPF_MAX_VAR_OFF || val <= -BPF_MAX_VAR_OFF)) {
1831 verbose(env, "math between %s pointer and %lld is not allowed\n",
1832 reg_type_str[type], val);
1833 return false;
1834 }
1835
1836 if (reg->off >= BPF_MAX_VAR_OFF || reg->off <= -BPF_MAX_VAR_OFF) {
1837 verbose(env, "%s pointer offset %d is not allowed\n",
1838 reg_type_str[type], reg->off);
1839 return false;
1840 }
1841
1842 if (smin == S64_MIN) {
1843 verbose(env, "math between %s pointer and register with unbounded min value is not allowed\n",
1844 reg_type_str[type]);
1845 return false;
1846 }
1847
1848 if (smin >= BPF_MAX_VAR_OFF || smin <= -BPF_MAX_VAR_OFF) {
1849 verbose(env, "value %lld makes %s pointer be out of bounds\n",
1850 smin, reg_type_str[type]);
1851 return false;
1852 }
1853
1854 return true;
1855}
1856
1803/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. 1857/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
1804 * Caller should also handle BPF_MOV case separately. 1858 * Caller should also handle BPF_MOV case separately.
1805 * If we return -EACCES, caller may want to try again treating pointer as a 1859 * If we return -EACCES, caller may want to try again treating pointer as a
@@ -1836,29 +1890,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1836 1890
1837 if (BPF_CLASS(insn->code) != BPF_ALU64) { 1891 if (BPF_CLASS(insn->code) != BPF_ALU64) {
1838 /* 32-bit ALU ops on pointers produce (meaningless) scalars */ 1892 /* 32-bit ALU ops on pointers produce (meaningless) scalars */
1839 if (!env->allow_ptr_leaks) 1893 verbose(env,
1840 verbose(env, 1894 "R%d 32-bit pointer arithmetic prohibited\n",
1841 "R%d 32-bit pointer arithmetic prohibited\n", 1895 dst);
1842 dst);
1843 return -EACCES; 1896 return -EACCES;
1844 } 1897 }
1845 1898
1846 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { 1899 if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
1847 if (!env->allow_ptr_leaks) 1900 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
1848 verbose(env, "R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", 1901 dst);
1849 dst);
1850 return -EACCES; 1902 return -EACCES;
1851 } 1903 }
1852 if (ptr_reg->type == CONST_PTR_TO_MAP) { 1904 if (ptr_reg->type == CONST_PTR_TO_MAP) {
1853 if (!env->allow_ptr_leaks) 1905 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
1854 verbose(env, "R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", 1906 dst);
1855 dst);
1856 return -EACCES; 1907 return -EACCES;
1857 } 1908 }
1858 if (ptr_reg->type == PTR_TO_PACKET_END) { 1909 if (ptr_reg->type == PTR_TO_PACKET_END) {
1859 if (!env->allow_ptr_leaks) 1910 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
1860 verbose(env, "R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", 1911 dst);
1861 dst);
1862 return -EACCES; 1912 return -EACCES;
1863 } 1913 }
1864 1914
@@ -1868,6 +1918,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1868 dst_reg->type = ptr_reg->type; 1918 dst_reg->type = ptr_reg->type;
1869 dst_reg->id = ptr_reg->id; 1919 dst_reg->id = ptr_reg->id;
1870 1920
1921 if (!check_reg_sane_offset(env, off_reg, ptr_reg->type) ||
1922 !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
1923 return -EINVAL;
1924
1871 switch (opcode) { 1925 switch (opcode) {
1872 case BPF_ADD: 1926 case BPF_ADD:
1873 /* We can take a fixed offset as long as it doesn't overflow 1927 /* We can take a fixed offset as long as it doesn't overflow
@@ -1921,9 +1975,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1921 case BPF_SUB: 1975 case BPF_SUB:
1922 if (dst_reg == off_reg) { 1976 if (dst_reg == off_reg) {
1923 /* scalar -= pointer. Creates an unknown scalar */ 1977 /* scalar -= pointer. Creates an unknown scalar */
1924 if (!env->allow_ptr_leaks) 1978 verbose(env, "R%d tried to subtract pointer from scalar\n",
1925 verbose(env, "R%d tried to subtract pointer from scalar\n", 1979 dst);
1926 dst);
1927 return -EACCES; 1980 return -EACCES;
1928 } 1981 }
1929 /* We don't allow subtraction from FP, because (according to 1982 /* We don't allow subtraction from FP, because (according to
@@ -1931,9 +1984,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1931 * be able to deal with it. 1984 * be able to deal with it.
1932 */ 1985 */
1933 if (ptr_reg->type == PTR_TO_STACK) { 1986 if (ptr_reg->type == PTR_TO_STACK) {
1934 if (!env->allow_ptr_leaks) 1987 verbose(env, "R%d subtraction from stack pointer prohibited\n",
1935 verbose(env, "R%d subtraction from stack pointer prohibited\n", 1988 dst);
1936 dst);
1937 return -EACCES; 1989 return -EACCES;
1938 } 1990 }
1939 if (known && (ptr_reg->off - smin_val == 1991 if (known && (ptr_reg->off - smin_val ==
@@ -1982,28 +2034,30 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
1982 case BPF_AND: 2034 case BPF_AND:
1983 case BPF_OR: 2035 case BPF_OR:
1984 case BPF_XOR: 2036 case BPF_XOR:
1985 /* bitwise ops on pointers are troublesome, prohibit for now. 2037 /* bitwise ops on pointers are troublesome, prohibit. */
1986 * (However, in principle we could allow some cases, e.g. 2038 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1987 * ptr &= ~3 which would reduce min_value by 3.) 2039 dst, bpf_alu_string[opcode >> 4]);
1988 */
1989 if (!env->allow_ptr_leaks)
1990 verbose(env, "R%d bitwise operator %s on pointer prohibited\n",
1991 dst, bpf_alu_string[opcode >> 4]);
1992 return -EACCES; 2040 return -EACCES;
1993 default: 2041 default:
1994 /* other operators (e.g. MUL,LSH) produce non-pointer results */ 2042 /* other operators (e.g. MUL,LSH) produce non-pointer results */
1995 if (!env->allow_ptr_leaks) 2043 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n",
1996 verbose(env, "R%d pointer arithmetic with %s operator prohibited\n", 2044 dst, bpf_alu_string[opcode >> 4]);
1997 dst, bpf_alu_string[opcode >> 4]);
1998 return -EACCES; 2045 return -EACCES;
1999 } 2046 }
2000 2047
2048 if (!check_reg_sane_offset(env, dst_reg, ptr_reg->type))
2049 return -EINVAL;
2050
2001 __update_reg_bounds(dst_reg); 2051 __update_reg_bounds(dst_reg);
2002 __reg_deduce_bounds(dst_reg); 2052 __reg_deduce_bounds(dst_reg);
2003 __reg_bound_offset(dst_reg); 2053 __reg_bound_offset(dst_reg);
2004 return 0; 2054 return 0;
2005} 2055}
2006 2056
2057/* WARNING: This function does calculations on 64-bit values, but the actual
2058 * execution may occur on 32-bit values. Therefore, things like bitshifts
2059 * need extra checks in the 32-bit case.
2060 */
2007static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, 2061static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2008 struct bpf_insn *insn, 2062 struct bpf_insn *insn,
2009 struct bpf_reg_state *dst_reg, 2063 struct bpf_reg_state *dst_reg,
@@ -2014,12 +2068,8 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2014 bool src_known, dst_known; 2068 bool src_known, dst_known;
2015 s64 smin_val, smax_val; 2069 s64 smin_val, smax_val;
2016 u64 umin_val, umax_val; 2070 u64 umin_val, umax_val;
2071 u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
2017 2072
2018 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2019 /* 32-bit ALU ops are (32,32)->64 */
2020 coerce_reg_to_32(dst_reg);
2021 coerce_reg_to_32(&src_reg);
2022 }
2023 smin_val = src_reg.smin_value; 2073 smin_val = src_reg.smin_value;
2024 smax_val = src_reg.smax_value; 2074 smax_val = src_reg.smax_value;
2025 umin_val = src_reg.umin_value; 2075 umin_val = src_reg.umin_value;
@@ -2027,6 +2077,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2027 src_known = tnum_is_const(src_reg.var_off); 2077 src_known = tnum_is_const(src_reg.var_off);
2028 dst_known = tnum_is_const(dst_reg->var_off); 2078 dst_known = tnum_is_const(dst_reg->var_off);
2029 2079
2080 if (!src_known &&
2081 opcode != BPF_ADD && opcode != BPF_SUB && opcode != BPF_AND) {
2082 __mark_reg_unknown(dst_reg);
2083 return 0;
2084 }
2085
2030 switch (opcode) { 2086 switch (opcode) {
2031 case BPF_ADD: 2087 case BPF_ADD:
2032 if (signed_add_overflows(dst_reg->smin_value, smin_val) || 2088 if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
@@ -2155,9 +2211,9 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2155 __update_reg_bounds(dst_reg); 2211 __update_reg_bounds(dst_reg);
2156 break; 2212 break;
2157 case BPF_LSH: 2213 case BPF_LSH:
2158 if (umax_val > 63) { 2214 if (umax_val >= insn_bitness) {
2159 /* Shifts greater than 63 are undefined. This includes 2215 /* Shifts greater than 31 or 63 are undefined.
2160 * shifts by a negative number. 2216 * This includes shifts by a negative number.
2161 */ 2217 */
2162 mark_reg_unknown(env, regs, insn->dst_reg); 2218 mark_reg_unknown(env, regs, insn->dst_reg);
2163 break; 2219 break;
@@ -2183,27 +2239,29 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2183 __update_reg_bounds(dst_reg); 2239 __update_reg_bounds(dst_reg);
2184 break; 2240 break;
2185 case BPF_RSH: 2241 case BPF_RSH:
2186 if (umax_val > 63) { 2242 if (umax_val >= insn_bitness) {
2187 /* Shifts greater than 63 are undefined. This includes 2243 /* Shifts greater than 31 or 63 are undefined.
2188 * shifts by a negative number. 2244 * This includes shifts by a negative number.
2189 */ 2245 */
2190 mark_reg_unknown(env, regs, insn->dst_reg); 2246 mark_reg_unknown(env, regs, insn->dst_reg);
2191 break; 2247 break;
2192 } 2248 }
2193 /* BPF_RSH is an unsigned shift, so make the appropriate casts */ 2249 /* BPF_RSH is an unsigned shift. If the value in dst_reg might
2194 if (dst_reg->smin_value < 0) { 2250 * be negative, then either:
2195 if (umin_val) { 2251 * 1) src_reg might be zero, so the sign bit of the result is
2196 /* Sign bit will be cleared */ 2252 * unknown, so we lose our signed bounds
2197 dst_reg->smin_value = 0; 2253 * 2) it's known negative, thus the unsigned bounds capture the
2198 } else { 2254 * signed bounds
2199 /* Lost sign bit information */ 2255 * 3) the signed bounds cross zero, so they tell us nothing
2200 dst_reg->smin_value = S64_MIN; 2256 * about the result
2201 dst_reg->smax_value = S64_MAX; 2257 * If the value in dst_reg is known nonnegative, then again the
2202 } 2258 * unsigned bounts capture the signed bounds.
2203 } else { 2259 * Thus, in all cases it suffices to blow away our signed bounds
2204 dst_reg->smin_value = 2260 * and rely on inferring new ones from the unsigned bounds and
2205 (u64)(dst_reg->smin_value) >> umax_val; 2261 * var_off of the result.
2206 } 2262 */
2263 dst_reg->smin_value = S64_MIN;
2264 dst_reg->smax_value = S64_MAX;
2207 if (src_known) 2265 if (src_known)
2208 dst_reg->var_off = tnum_rshift(dst_reg->var_off, 2266 dst_reg->var_off = tnum_rshift(dst_reg->var_off,
2209 umin_val); 2267 umin_val);
@@ -2219,6 +2277,12 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
2219 break; 2277 break;
2220 } 2278 }
2221 2279
2280 if (BPF_CLASS(insn->code) != BPF_ALU64) {
2281 /* 32-bit ALU ops are (32,32)->32 */
2282 coerce_reg_to_size(dst_reg, 4);
2283 coerce_reg_to_size(&src_reg, 4);
2284 }
2285
2222 __reg_deduce_bounds(dst_reg); 2286 __reg_deduce_bounds(dst_reg);
2223 __reg_bound_offset(dst_reg); 2287 __reg_bound_offset(dst_reg);
2224 return 0; 2288 return 0;
@@ -2233,7 +2297,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2233 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; 2297 struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
2234 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; 2298 struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
2235 u8 opcode = BPF_OP(insn->code); 2299 u8 opcode = BPF_OP(insn->code);
2236 int rc;
2237 2300
2238 dst_reg = &regs[insn->dst_reg]; 2301 dst_reg = &regs[insn->dst_reg];
2239 src_reg = NULL; 2302 src_reg = NULL;
@@ -2244,43 +2307,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2244 if (src_reg->type != SCALAR_VALUE) { 2307 if (src_reg->type != SCALAR_VALUE) {
2245 if (dst_reg->type != SCALAR_VALUE) { 2308 if (dst_reg->type != SCALAR_VALUE) {
2246 /* Combining two pointers by any ALU op yields 2309 /* Combining two pointers by any ALU op yields
2247 * an arbitrary scalar. 2310 * an arbitrary scalar. Disallow all math except
2311 * pointer subtraction
2248 */ 2312 */
2249 if (!env->allow_ptr_leaks) { 2313 if (opcode == BPF_SUB){
2250 verbose(env, "R%d pointer %s pointer prohibited\n", 2314 mark_reg_unknown(env, regs, insn->dst_reg);
2251 insn->dst_reg, 2315 return 0;
2252 bpf_alu_string[opcode >> 4]);
2253 return -EACCES;
2254 } 2316 }
2255 mark_reg_unknown(env, regs, insn->dst_reg); 2317 verbose(env, "R%d pointer %s pointer prohibited\n",
2256 return 0; 2318 insn->dst_reg,
2319 bpf_alu_string[opcode >> 4]);
2320 return -EACCES;
2257 } else { 2321 } else {
2258 /* scalar += pointer 2322 /* scalar += pointer
2259 * This is legal, but we have to reverse our 2323 * This is legal, but we have to reverse our
2260 * src/dest handling in computing the range 2324 * src/dest handling in computing the range
2261 */ 2325 */
2262 rc = adjust_ptr_min_max_vals(env, insn, 2326 return adjust_ptr_min_max_vals(env, insn,
2263 src_reg, dst_reg); 2327 src_reg, dst_reg);
2264 if (rc == -EACCES && env->allow_ptr_leaks) {
2265 /* scalar += unknown scalar */
2266 __mark_reg_unknown(&off_reg);
2267 return adjust_scalar_min_max_vals(
2268 env, insn,
2269 dst_reg, off_reg);
2270 }
2271 return rc;
2272 } 2328 }
2273 } else if (ptr_reg) { 2329 } else if (ptr_reg) {
2274 /* pointer += scalar */ 2330 /* pointer += scalar */
2275 rc = adjust_ptr_min_max_vals(env, insn, 2331 return adjust_ptr_min_max_vals(env, insn,
2276 dst_reg, src_reg); 2332 dst_reg, src_reg);
2277 if (rc == -EACCES && env->allow_ptr_leaks) {
2278 /* unknown scalar += scalar */
2279 __mark_reg_unknown(dst_reg);
2280 return adjust_scalar_min_max_vals(
2281 env, insn, dst_reg, *src_reg);
2282 }
2283 return rc;
2284 } 2333 }
2285 } else { 2334 } else {
2286 /* Pretend the src is a reg with a known value, since we only 2335 /* Pretend the src is a reg with a known value, since we only
@@ -2289,17 +2338,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
2289 off_reg.type = SCALAR_VALUE; 2338 off_reg.type = SCALAR_VALUE;
2290 __mark_reg_known(&off_reg, insn->imm); 2339 __mark_reg_known(&off_reg, insn->imm);
2291 src_reg = &off_reg; 2340 src_reg = &off_reg;
2292 if (ptr_reg) { /* pointer += K */ 2341 if (ptr_reg) /* pointer += K */
2293 rc = adjust_ptr_min_max_vals(env, insn, 2342 return adjust_ptr_min_max_vals(env, insn,
2294 ptr_reg, src_reg); 2343 ptr_reg, src_reg);
2295 if (rc == -EACCES && env->allow_ptr_leaks) {
2296 /* unknown scalar += K */
2297 __mark_reg_unknown(dst_reg);
2298 return adjust_scalar_min_max_vals(
2299 env, insn, dst_reg, off_reg);
2300 }
2301 return rc;
2302 }
2303 } 2344 }
2304 2345
2305 /* Got here implies adding two SCALAR_VALUEs */ 2346 /* Got here implies adding two SCALAR_VALUEs */
@@ -2396,17 +2437,20 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
2396 return -EACCES; 2437 return -EACCES;
2397 } 2438 }
2398 mark_reg_unknown(env, regs, insn->dst_reg); 2439 mark_reg_unknown(env, regs, insn->dst_reg);
2399 /* high 32 bits are known zero. */ 2440 coerce_reg_to_size(&regs[insn->dst_reg], 4);
2400 regs[insn->dst_reg].var_off = tnum_cast(
2401 regs[insn->dst_reg].var_off, 4);
2402 __update_reg_bounds(&regs[insn->dst_reg]);
2403 } 2441 }
2404 } else { 2442 } else {
2405 /* case: R = imm 2443 /* case: R = imm
2406 * remember the value we stored into this reg 2444 * remember the value we stored into this reg
2407 */ 2445 */
2408 regs[insn->dst_reg].type = SCALAR_VALUE; 2446 regs[insn->dst_reg].type = SCALAR_VALUE;
2409 __mark_reg_known(regs + insn->dst_reg, insn->imm); 2447 if (BPF_CLASS(insn->code) == BPF_ALU64) {
2448 __mark_reg_known(regs + insn->dst_reg,
2449 insn->imm);
2450 } else {
2451 __mark_reg_known(regs + insn->dst_reg,
2452 (u32)insn->imm);
2453 }
2410 } 2454 }
2411 2455
2412 } else if (opcode > BPF_END) { 2456 } else if (opcode > BPF_END) {
@@ -3437,15 +3481,14 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3437 return range_within(rold, rcur) && 3481 return range_within(rold, rcur) &&
3438 tnum_in(rold->var_off, rcur->var_off); 3482 tnum_in(rold->var_off, rcur->var_off);
3439 } else { 3483 } else {
3440 /* if we knew anything about the old value, we're not 3484 /* We're trying to use a pointer in place of a scalar.
3441 * equal, because we can't know anything about the 3485 * Even if the scalar was unbounded, this could lead to
3442 * scalar value of the pointer in the new value. 3486 * pointer leaks because scalars are allowed to leak
3487 * while pointers are not. We could make this safe in
3488 * special cases if root is calling us, but it's
3489 * probably not worth the hassle.
3443 */ 3490 */
3444 return rold->umin_value == 0 && 3491 return false;
3445 rold->umax_value == U64_MAX &&
3446 rold->smin_value == S64_MIN &&
3447 rold->smax_value == S64_MAX &&
3448 tnum_is_unknown(rold->var_off);
3449 } 3492 }
3450 case PTR_TO_MAP_VALUE: 3493 case PTR_TO_MAP_VALUE:
3451 /* If the new min/max/var_off satisfy the old ones and 3494 /* If the new min/max/var_off satisfy the old ones and
diff --git a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
index cefe7c7cd4f6..0a8e37a519f2 100644
--- a/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
+++ b/tools/arch/s390/include/uapi/asm/bpf_perf_event.h
@@ -2,7 +2,7 @@
2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ 2#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
3#define _UAPI__ASM_BPF_PERF_EVENT_H__ 3#define _UAPI__ASM_BPF_PERF_EVENT_H__
4 4
5#include <asm/ptrace.h> 5#include "ptrace.h"
6 6
7typedef user_pt_regs bpf_user_pt_regs_t; 7typedef user_pt_regs bpf_user_pt_regs_t;
8 8
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 792af7c3b74f..05fc4e2e7b3a 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -11,7 +11,7 @@ ifneq ($(wildcard $(GENHDR)),)
11endif 11endif
12 12
13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include 13CFLAGS += -Wall -O2 -I$(APIDIR) -I$(LIBDIR) -I$(GENDIR) $(GENFLAGS) -I../../../include
14LDLIBS += -lcap -lelf 14LDLIBS += -lcap -lelf -lrt
15 15
16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \ 16TEST_GEN_PROGS = test_verifier test_tag test_maps test_lru_map test_lpm_map test_progs \
17 test_align test_verifier_log test_dev_cgroup 17 test_align test_verifier_log test_dev_cgroup
diff --git a/tools/testing/selftests/bpf/test_progs.c b/tools/testing/selftests/bpf/test_progs.c
index 69427531408d..6761be18a91f 100644
--- a/tools/testing/selftests/bpf/test_progs.c
+++ b/tools/testing/selftests/bpf/test_progs.c
@@ -351,7 +351,7 @@ static void test_bpf_obj_id(void)
351 info_len != sizeof(struct bpf_map_info) || 351 info_len != sizeof(struct bpf_map_info) ||
352 strcmp((char *)map_infos[i].name, expected_map_name), 352 strcmp((char *)map_infos[i].name, expected_map_name),
353 "get-map-info(fd)", 353 "get-map-info(fd)",
354 "err %d errno %d type %d(%d) info_len %u(%lu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n", 354 "err %d errno %d type %d(%d) info_len %u(%Zu) key_size %u value_size %u max_entries %u map_flags %X name %s(%s)\n",
355 err, errno, 355 err, errno,
356 map_infos[i].type, BPF_MAP_TYPE_ARRAY, 356 map_infos[i].type, BPF_MAP_TYPE_ARRAY,
357 info_len, sizeof(struct bpf_map_info), 357 info_len, sizeof(struct bpf_map_info),
@@ -395,7 +395,7 @@ static void test_bpf_obj_id(void)
395 *(int *)prog_infos[i].map_ids != map_infos[i].id || 395 *(int *)prog_infos[i].map_ids != map_infos[i].id ||
396 strcmp((char *)prog_infos[i].name, expected_prog_name), 396 strcmp((char *)prog_infos[i].name, expected_prog_name),
397 "get-prog-info(fd)", 397 "get-prog-info(fd)",
398 "err %d errno %d i %d type %d(%d) info_len %u(%lu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n", 398 "err %d errno %d i %d type %d(%d) info_len %u(%Zu) jit_enabled %d jited_prog_len %u xlated_prog_len %u jited_prog %d xlated_prog %d load_time %lu(%lu) uid %u(%u) nr_map_ids %u(%u) map_id %u(%u) name %s(%s)\n",
399 err, errno, i, 399 err, errno, i,
400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER, 400 prog_infos[i].type, BPF_PROG_TYPE_SOCKET_FILTER,
401 info_len, sizeof(struct bpf_prog_info), 401 info_len, sizeof(struct bpf_prog_info),
@@ -463,7 +463,7 @@ static void test_bpf_obj_id(void)
463 memcmp(&prog_info, &prog_infos[i], info_len) || 463 memcmp(&prog_info, &prog_infos[i], info_len) ||
464 *(int *)prog_info.map_ids != saved_map_id, 464 *(int *)prog_info.map_ids != saved_map_id,
465 "get-prog-info(next_id->fd)", 465 "get-prog-info(next_id->fd)",
466 "err %d errno %d info_len %u(%lu) memcmp %d map_id %u(%u)\n", 466 "err %d errno %d info_len %u(%Zu) memcmp %d map_id %u(%u)\n",
467 err, errno, info_len, sizeof(struct bpf_prog_info), 467 err, errno, info_len, sizeof(struct bpf_prog_info),
468 memcmp(&prog_info, &prog_infos[i], info_len), 468 memcmp(&prog_info, &prog_infos[i], info_len),
469 *(int *)prog_info.map_ids, saved_map_id); 469 *(int *)prog_info.map_ids, saved_map_id);
@@ -509,7 +509,7 @@ static void test_bpf_obj_id(void)
509 memcmp(&map_info, &map_infos[i], info_len) || 509 memcmp(&map_info, &map_infos[i], info_len) ||
510 array_value != array_magic_value, 510 array_value != array_magic_value,
511 "check get-map-info(next_id->fd)", 511 "check get-map-info(next_id->fd)",
512 "err %d errno %d info_len %u(%lu) memcmp %d array_value %llu(%llu)\n", 512 "err %d errno %d info_len %u(%Zu) memcmp %d array_value %llu(%llu)\n",
513 err, errno, info_len, sizeof(struct bpf_map_info), 513 err, errno, info_len, sizeof(struct bpf_map_info),
514 memcmp(&map_info, &map_infos[i], info_len), 514 memcmp(&map_info, &map_infos[i], info_len),
515 array_value, array_magic_value); 515 array_value, array_magic_value);
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index b03ecfd7185b..b51017404c62 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -422,9 +422,7 @@ static struct bpf_test tests[] = {
422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), 422 BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
423 BPF_EXIT_INSN(), 423 BPF_EXIT_INSN(),
424 }, 424 },
425 .errstr_unpriv = "R1 subtraction from stack pointer", 425 .errstr = "R1 subtraction from stack pointer",
426 .result_unpriv = REJECT,
427 .errstr = "R1 invalid mem access",
428 .result = REJECT, 426 .result = REJECT,
429 }, 427 },
430 { 428 {
@@ -606,7 +604,6 @@ static struct bpf_test tests[] = {
606 }, 604 },
607 .errstr = "misaligned stack access", 605 .errstr = "misaligned stack access",
608 .result = REJECT, 606 .result = REJECT,
609 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
610 }, 607 },
611 { 608 {
612 "invalid map_fd for function call", 609 "invalid map_fd for function call",
@@ -1797,7 +1794,6 @@ static struct bpf_test tests[] = {
1797 }, 1794 },
1798 .result = REJECT, 1795 .result = REJECT,
1799 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8", 1796 .errstr = "misaligned stack access off (0x0; 0x0)+-8+2 size 8",
1800 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1801 }, 1797 },
1802 { 1798 {
1803 "PTR_TO_STACK store/load - bad alignment on reg", 1799 "PTR_TO_STACK store/load - bad alignment on reg",
@@ -1810,7 +1806,6 @@ static struct bpf_test tests[] = {
1810 }, 1806 },
1811 .result = REJECT, 1807 .result = REJECT,
1812 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8", 1808 .errstr = "misaligned stack access off (0x0; 0x0)+-10+8 size 8",
1813 .flags = F_LOAD_WITH_STRICT_ALIGNMENT,
1814 }, 1809 },
1815 { 1810 {
1816 "PTR_TO_STACK store/load - out of bounds low", 1811 "PTR_TO_STACK store/load - out of bounds low",
@@ -1862,9 +1857,8 @@ static struct bpf_test tests[] = {
1862 BPF_MOV64_IMM(BPF_REG_0, 0), 1857 BPF_MOV64_IMM(BPF_REG_0, 0),
1863 BPF_EXIT_INSN(), 1858 BPF_EXIT_INSN(),
1864 }, 1859 },
1865 .result = ACCEPT, 1860 .result = REJECT,
1866 .result_unpriv = REJECT, 1861 .errstr = "R1 pointer += pointer",
1867 .errstr_unpriv = "R1 pointer += pointer",
1868 }, 1862 },
1869 { 1863 {
1870 "unpriv: neg pointer", 1864 "unpriv: neg pointer",
@@ -2592,7 +2586,8 @@ static struct bpf_test tests[] = {
2592 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 2586 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
2593 offsetof(struct __sk_buff, data)), 2587 offsetof(struct __sk_buff, data)),
2594 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), 2588 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
2595 BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), 2589 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
2590 offsetof(struct __sk_buff, len)),
2596 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), 2591 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
2597 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), 2592 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
2598 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), 2593 BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@@ -2899,7 +2894,7 @@ static struct bpf_test tests[] = {
2899 BPF_MOV64_IMM(BPF_REG_0, 0), 2894 BPF_MOV64_IMM(BPF_REG_0, 0),
2900 BPF_EXIT_INSN(), 2895 BPF_EXIT_INSN(),
2901 }, 2896 },
2902 .errstr = "invalid access to packet", 2897 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
2903 .result = REJECT, 2898 .result = REJECT,
2904 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 2899 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
2905 }, 2900 },
@@ -3885,9 +3880,7 @@ static struct bpf_test tests[] = {
3885 BPF_EXIT_INSN(), 3880 BPF_EXIT_INSN(),
3886 }, 3881 },
3887 .fixup_map2 = { 3, 11 }, 3882 .fixup_map2 = { 3, 11 },
3888 .errstr_unpriv = "R0 pointer += pointer", 3883 .errstr = "R0 pointer += pointer",
3889 .errstr = "R0 invalid mem access 'inv'",
3890 .result_unpriv = REJECT,
3891 .result = REJECT, 3884 .result = REJECT,
3892 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, 3885 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
3893 }, 3886 },
@@ -3928,7 +3921,7 @@ static struct bpf_test tests[] = {
3928 BPF_EXIT_INSN(), 3921 BPF_EXIT_INSN(),
3929 }, 3922 },
3930 .fixup_map1 = { 4 }, 3923 .fixup_map1 = { 4 },
3931 .errstr = "R4 invalid mem access", 3924 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3932 .result = REJECT, 3925 .result = REJECT,
3933 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3926 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3934 }, 3927 },
@@ -3949,7 +3942,7 @@ static struct bpf_test tests[] = {
3949 BPF_EXIT_INSN(), 3942 BPF_EXIT_INSN(),
3950 }, 3943 },
3951 .fixup_map1 = { 4 }, 3944 .fixup_map1 = { 4 },
3952 .errstr = "R4 invalid mem access", 3945 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3953 .result = REJECT, 3946 .result = REJECT,
3954 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3947 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3955 }, 3948 },
@@ -3970,7 +3963,7 @@ static struct bpf_test tests[] = {
3970 BPF_EXIT_INSN(), 3963 BPF_EXIT_INSN(),
3971 }, 3964 },
3972 .fixup_map1 = { 4 }, 3965 .fixup_map1 = { 4 },
3973 .errstr = "R4 invalid mem access", 3966 .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
3974 .result = REJECT, 3967 .result = REJECT,
3975 .prog_type = BPF_PROG_TYPE_SCHED_CLS 3968 .prog_type = BPF_PROG_TYPE_SCHED_CLS
3976 }, 3969 },
@@ -5195,10 +5188,8 @@ static struct bpf_test tests[] = {
5195 BPF_EXIT_INSN(), 5188 BPF_EXIT_INSN(),
5196 }, 5189 },
5197 .fixup_map2 = { 3 }, 5190 .fixup_map2 = { 3 },
5198 .errstr_unpriv = "R0 bitwise operator &= on pointer", 5191 .errstr = "R0 bitwise operator &= on pointer",
5199 .errstr = "invalid mem access 'inv'",
5200 .result = REJECT, 5192 .result = REJECT,
5201 .result_unpriv = REJECT,
5202 }, 5193 },
5203 { 5194 {
5204 "map element value illegal alu op, 2", 5195 "map element value illegal alu op, 2",
@@ -5214,10 +5205,8 @@ static struct bpf_test tests[] = {
5214 BPF_EXIT_INSN(), 5205 BPF_EXIT_INSN(),
5215 }, 5206 },
5216 .fixup_map2 = { 3 }, 5207 .fixup_map2 = { 3 },
5217 .errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", 5208 .errstr = "R0 32-bit pointer arithmetic prohibited",
5218 .errstr = "invalid mem access 'inv'",
5219 .result = REJECT, 5209 .result = REJECT,
5220 .result_unpriv = REJECT,
5221 }, 5210 },
5222 { 5211 {
5223 "map element value illegal alu op, 3", 5212 "map element value illegal alu op, 3",
@@ -5233,10 +5222,8 @@ static struct bpf_test tests[] = {
5233 BPF_EXIT_INSN(), 5222 BPF_EXIT_INSN(),
5234 }, 5223 },
5235 .fixup_map2 = { 3 }, 5224 .fixup_map2 = { 3 },
5236 .errstr_unpriv = "R0 pointer arithmetic with /= operator", 5225 .errstr = "R0 pointer arithmetic with /= operator",
5237 .errstr = "invalid mem access 'inv'",
5238 .result = REJECT, 5226 .result = REJECT,
5239 .result_unpriv = REJECT,
5240 }, 5227 },
5241 { 5228 {
5242 "map element value illegal alu op, 4", 5229 "map element value illegal alu op, 4",
@@ -6019,8 +6006,7 @@ static struct bpf_test tests[] = {
6019 BPF_EXIT_INSN(), 6006 BPF_EXIT_INSN(),
6020 }, 6007 },
6021 .fixup_map_in_map = { 3 }, 6008 .fixup_map_in_map = { 3 },
6022 .errstr = "R1 type=inv expected=map_ptr", 6009 .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6023 .errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
6024 .result = REJECT, 6010 .result = REJECT,
6025 }, 6011 },
6026 { 6012 {
@@ -6324,7 +6310,7 @@ static struct bpf_test tests[] = {
6324 BPF_EXIT_INSN(), 6310 BPF_EXIT_INSN(),
6325 }, 6311 },
6326 .fixup_map1 = { 3 }, 6312 .fixup_map1 = { 3 },
6327 .errstr = "R0 min value is negative", 6313 .errstr = "unbounded min value",
6328 .result = REJECT, 6314 .result = REJECT,
6329 }, 6315 },
6330 { 6316 {
@@ -6348,7 +6334,7 @@ static struct bpf_test tests[] = {
6348 BPF_EXIT_INSN(), 6334 BPF_EXIT_INSN(),
6349 }, 6335 },
6350 .fixup_map1 = { 3 }, 6336 .fixup_map1 = { 3 },
6351 .errstr = "R0 min value is negative", 6337 .errstr = "unbounded min value",
6352 .result = REJECT, 6338 .result = REJECT,
6353 }, 6339 },
6354 { 6340 {
@@ -6374,7 +6360,7 @@ static struct bpf_test tests[] = {
6374 BPF_EXIT_INSN(), 6360 BPF_EXIT_INSN(),
6375 }, 6361 },
6376 .fixup_map1 = { 3 }, 6362 .fixup_map1 = { 3 },
6377 .errstr = "R8 invalid mem access 'inv'", 6363 .errstr = "unbounded min value",
6378 .result = REJECT, 6364 .result = REJECT,
6379 }, 6365 },
6380 { 6366 {
@@ -6399,7 +6385,7 @@ static struct bpf_test tests[] = {
6399 BPF_EXIT_INSN(), 6385 BPF_EXIT_INSN(),
6400 }, 6386 },
6401 .fixup_map1 = { 3 }, 6387 .fixup_map1 = { 3 },
6402 .errstr = "R8 invalid mem access 'inv'", 6388 .errstr = "unbounded min value",
6403 .result = REJECT, 6389 .result = REJECT,
6404 }, 6390 },
6405 { 6391 {
@@ -6447,7 +6433,7 @@ static struct bpf_test tests[] = {
6447 BPF_EXIT_INSN(), 6433 BPF_EXIT_INSN(),
6448 }, 6434 },
6449 .fixup_map1 = { 3 }, 6435 .fixup_map1 = { 3 },
6450 .errstr = "R0 min value is negative", 6436 .errstr = "unbounded min value",
6451 .result = REJECT, 6437 .result = REJECT,
6452 }, 6438 },
6453 { 6439 {
@@ -6518,7 +6504,7 @@ static struct bpf_test tests[] = {
6518 BPF_EXIT_INSN(), 6504 BPF_EXIT_INSN(),
6519 }, 6505 },
6520 .fixup_map1 = { 3 }, 6506 .fixup_map1 = { 3 },
6521 .errstr = "R0 min value is negative", 6507 .errstr = "unbounded min value",
6522 .result = REJECT, 6508 .result = REJECT,
6523 }, 6509 },
6524 { 6510 {
@@ -6569,7 +6555,7 @@ static struct bpf_test tests[] = {
6569 BPF_EXIT_INSN(), 6555 BPF_EXIT_INSN(),
6570 }, 6556 },
6571 .fixup_map1 = { 3 }, 6557 .fixup_map1 = { 3 },
6572 .errstr = "R0 min value is negative", 6558 .errstr = "unbounded min value",
6573 .result = REJECT, 6559 .result = REJECT,
6574 }, 6560 },
6575 { 6561 {
@@ -6596,7 +6582,7 @@ static struct bpf_test tests[] = {
6596 BPF_EXIT_INSN(), 6582 BPF_EXIT_INSN(),
6597 }, 6583 },
6598 .fixup_map1 = { 3 }, 6584 .fixup_map1 = { 3 },
6599 .errstr = "R0 min value is negative", 6585 .errstr = "unbounded min value",
6600 .result = REJECT, 6586 .result = REJECT,
6601 }, 6587 },
6602 { 6588 {
@@ -6622,7 +6608,7 @@ static struct bpf_test tests[] = {
6622 BPF_EXIT_INSN(), 6608 BPF_EXIT_INSN(),
6623 }, 6609 },
6624 .fixup_map1 = { 3 }, 6610 .fixup_map1 = { 3 },
6625 .errstr = "R0 min value is negative", 6611 .errstr = "unbounded min value",
6626 .result = REJECT, 6612 .result = REJECT,
6627 }, 6613 },
6628 { 6614 {
@@ -6651,7 +6637,7 @@ static struct bpf_test tests[] = {
6651 BPF_EXIT_INSN(), 6637 BPF_EXIT_INSN(),
6652 }, 6638 },
6653 .fixup_map1 = { 3 }, 6639 .fixup_map1 = { 3 },
6654 .errstr = "R0 min value is negative", 6640 .errstr = "unbounded min value",
6655 .result = REJECT, 6641 .result = REJECT,
6656 }, 6642 },
6657 { 6643 {
@@ -6681,7 +6667,7 @@ static struct bpf_test tests[] = {
6681 BPF_JMP_IMM(BPF_JA, 0, 0, -7), 6667 BPF_JMP_IMM(BPF_JA, 0, 0, -7),
6682 }, 6668 },
6683 .fixup_map1 = { 4 }, 6669 .fixup_map1 = { 4 },
6684 .errstr = "R0 min value is negative", 6670 .errstr = "unbounded min value",
6685 .result = REJECT, 6671 .result = REJECT,
6686 }, 6672 },
6687 { 6673 {
@@ -6709,8 +6695,7 @@ static struct bpf_test tests[] = {
6709 BPF_EXIT_INSN(), 6695 BPF_EXIT_INSN(),
6710 }, 6696 },
6711 .fixup_map1 = { 3 }, 6697 .fixup_map1 = { 3 },
6712 .errstr_unpriv = "R0 pointer comparison prohibited", 6698 .errstr = "unbounded min value",
6713 .errstr = "R0 min value is negative",
6714 .result = REJECT, 6699 .result = REJECT,
6715 .result_unpriv = REJECT, 6700 .result_unpriv = REJECT,
6716 }, 6701 },
@@ -6766,6 +6751,462 @@ static struct bpf_test tests[] = {
6766 .result = REJECT, 6751 .result = REJECT,
6767 }, 6752 },
6768 { 6753 {
6754 "bounds check based on zero-extended MOV",
6755 .insns = {
6756 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6757 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6758 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6759 BPF_LD_MAP_FD(BPF_REG_1, 0),
6760 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6761 BPF_FUNC_map_lookup_elem),
6762 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6763 /* r2 = 0x0000'0000'ffff'ffff */
6764 BPF_MOV32_IMM(BPF_REG_2, 0xffffffff),
6765 /* r2 = 0 */
6766 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6767 /* no-op */
6768 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6769 /* access at offset 0 */
6770 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6771 /* exit */
6772 BPF_MOV64_IMM(BPF_REG_0, 0),
6773 BPF_EXIT_INSN(),
6774 },
6775 .fixup_map1 = { 3 },
6776 .result = ACCEPT
6777 },
6778 {
6779 "bounds check based on sign-extended MOV. test1",
6780 .insns = {
6781 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6782 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6783 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6784 BPF_LD_MAP_FD(BPF_REG_1, 0),
6785 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6786 BPF_FUNC_map_lookup_elem),
6787 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6788 /* r2 = 0xffff'ffff'ffff'ffff */
6789 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6790 /* r2 = 0xffff'ffff */
6791 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 32),
6792 /* r0 = <oob pointer> */
6793 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6794 /* access to OOB pointer */
6795 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6796 /* exit */
6797 BPF_MOV64_IMM(BPF_REG_0, 0),
6798 BPF_EXIT_INSN(),
6799 },
6800 .fixup_map1 = { 3 },
6801 .errstr = "map_value pointer and 4294967295",
6802 .result = REJECT
6803 },
6804 {
6805 "bounds check based on sign-extended MOV. test2",
6806 .insns = {
6807 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6808 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6809 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6810 BPF_LD_MAP_FD(BPF_REG_1, 0),
6811 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6812 BPF_FUNC_map_lookup_elem),
6813 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6814 /* r2 = 0xffff'ffff'ffff'ffff */
6815 BPF_MOV64_IMM(BPF_REG_2, 0xffffffff),
6816 /* r2 = 0xfff'ffff */
6817 BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 36),
6818 /* r0 = <oob pointer> */
6819 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_2),
6820 /* access to OOB pointer */
6821 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6822 /* exit */
6823 BPF_MOV64_IMM(BPF_REG_0, 0),
6824 BPF_EXIT_INSN(),
6825 },
6826 .fixup_map1 = { 3 },
6827 .errstr = "R0 min value is outside of the array range",
6828 .result = REJECT
6829 },
6830 {
6831 "bounds check based on reg_off + var_off + insn_off. test1",
6832 .insns = {
6833 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6834 offsetof(struct __sk_buff, mark)),
6835 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6836 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6837 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6838 BPF_LD_MAP_FD(BPF_REG_1, 0),
6839 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6840 BPF_FUNC_map_lookup_elem),
6841 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6842 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6843 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 29) - 1),
6844 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6845 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6846 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6847 BPF_MOV64_IMM(BPF_REG_0, 0),
6848 BPF_EXIT_INSN(),
6849 },
6850 .fixup_map1 = { 4 },
6851 .errstr = "value_size=8 off=1073741825",
6852 .result = REJECT,
6853 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6854 },
6855 {
6856 "bounds check based on reg_off + var_off + insn_off. test2",
6857 .insns = {
6858 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
6859 offsetof(struct __sk_buff, mark)),
6860 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6861 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6862 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6863 BPF_LD_MAP_FD(BPF_REG_1, 0),
6864 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6865 BPF_FUNC_map_lookup_elem),
6866 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
6867 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 1),
6868 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, (1 << 30) - 1),
6869 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_6),
6870 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, (1 << 29) - 1),
6871 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 3),
6872 BPF_MOV64_IMM(BPF_REG_0, 0),
6873 BPF_EXIT_INSN(),
6874 },
6875 .fixup_map1 = { 4 },
6876 .errstr = "value 1073741823",
6877 .result = REJECT,
6878 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
6879 },
6880 {
6881 "bounds check after truncation of non-boundary-crossing range",
6882 .insns = {
6883 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6884 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6885 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6886 BPF_LD_MAP_FD(BPF_REG_1, 0),
6887 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6888 BPF_FUNC_map_lookup_elem),
6889 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6890 /* r1 = [0x00, 0xff] */
6891 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6892 BPF_MOV64_IMM(BPF_REG_2, 1),
6893 /* r2 = 0x10'0000'0000 */
6894 BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 36),
6895 /* r1 = [0x10'0000'0000, 0x10'0000'00ff] */
6896 BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
6897 /* r1 = [0x10'7fff'ffff, 0x10'8000'00fe] */
6898 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
6899 /* r1 = [0x00, 0xff] */
6900 BPF_ALU32_IMM(BPF_SUB, BPF_REG_1, 0x7fffffff),
6901 /* r1 = 0 */
6902 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6903 /* no-op */
6904 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6905 /* access at offset 0 */
6906 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6907 /* exit */
6908 BPF_MOV64_IMM(BPF_REG_0, 0),
6909 BPF_EXIT_INSN(),
6910 },
6911 .fixup_map1 = { 3 },
6912 .result = ACCEPT
6913 },
6914 {
6915 "bounds check after truncation of boundary-crossing range (1)",
6916 .insns = {
6917 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6918 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6919 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6920 BPF_LD_MAP_FD(BPF_REG_1, 0),
6921 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6922 BPF_FUNC_map_lookup_elem),
6923 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6924 /* r1 = [0x00, 0xff] */
6925 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6926 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6927 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6928 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6929 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6930 * [0x0000'0000, 0x0000'007f]
6931 */
6932 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 0),
6933 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6934 /* r1 = [0x00, 0xff] or
6935 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6936 */
6937 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6938 /* r1 = 0 or
6939 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6940 */
6941 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6942 /* no-op or OOB pointer computation */
6943 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6944 /* potentially OOB access */
6945 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6946 /* exit */
6947 BPF_MOV64_IMM(BPF_REG_0, 0),
6948 BPF_EXIT_INSN(),
6949 },
6950 .fixup_map1 = { 3 },
6951 /* not actually fully unbounded, but the bound is very high */
6952 .errstr = "R0 unbounded memory access",
6953 .result = REJECT
6954 },
6955 {
6956 "bounds check after truncation of boundary-crossing range (2)",
6957 .insns = {
6958 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6959 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6960 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6961 BPF_LD_MAP_FD(BPF_REG_1, 0),
6962 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6963 BPF_FUNC_map_lookup_elem),
6964 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
6965 /* r1 = [0x00, 0xff] */
6966 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
6967 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6968 /* r1 = [0xffff'ff80, 0x1'0000'007f] */
6969 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0xffffff80 >> 1),
6970 /* r1 = [0xffff'ff80, 0xffff'ffff] or
6971 * [0x0000'0000, 0x0000'007f]
6972 * difference to previous test: truncation via MOV32
6973 * instead of ALU32.
6974 */
6975 BPF_MOV32_REG(BPF_REG_1, BPF_REG_1),
6976 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6977 /* r1 = [0x00, 0xff] or
6978 * [0xffff'ffff'0000'0080, 0xffff'ffff'ffff'ffff]
6979 */
6980 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 0xffffff80 >> 1),
6981 /* r1 = 0 or
6982 * [0x00ff'ffff'ff00'0000, 0x00ff'ffff'ffff'ffff]
6983 */
6984 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
6985 /* no-op or OOB pointer computation */
6986 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6987 /* potentially OOB access */
6988 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
6989 /* exit */
6990 BPF_MOV64_IMM(BPF_REG_0, 0),
6991 BPF_EXIT_INSN(),
6992 },
6993 .fixup_map1 = { 3 },
6994 /* not actually fully unbounded, but the bound is very high */
6995 .errstr = "R0 unbounded memory access",
6996 .result = REJECT
6997 },
6998 {
6999 "bounds check after wrapping 32-bit addition",
7000 .insns = {
7001 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7002 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7003 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7004 BPF_LD_MAP_FD(BPF_REG_1, 0),
7005 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7006 BPF_FUNC_map_lookup_elem),
7007 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
7008 /* r1 = 0x7fff'ffff */
7009 BPF_MOV64_IMM(BPF_REG_1, 0x7fffffff),
7010 /* r1 = 0xffff'fffe */
7011 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7012 /* r1 = 0 */
7013 BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 2),
7014 /* no-op */
7015 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7016 /* access at offset 0 */
7017 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7018 /* exit */
7019 BPF_MOV64_IMM(BPF_REG_0, 0),
7020 BPF_EXIT_INSN(),
7021 },
7022 .fixup_map1 = { 3 },
7023 .result = ACCEPT
7024 },
7025 {
7026 "bounds check after shift with oversized count operand",
7027 .insns = {
7028 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7029 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7030 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7031 BPF_LD_MAP_FD(BPF_REG_1, 0),
7032 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7033 BPF_FUNC_map_lookup_elem),
7034 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7035 BPF_MOV64_IMM(BPF_REG_2, 32),
7036 BPF_MOV64_IMM(BPF_REG_1, 1),
7037 /* r1 = (u32)1 << (u32)32 = ? */
7038 BPF_ALU32_REG(BPF_LSH, BPF_REG_1, BPF_REG_2),
7039 /* r1 = [0x0000, 0xffff] */
7040 BPF_ALU64_IMM(BPF_AND, BPF_REG_1, 0xffff),
7041 /* computes unknown pointer, potentially OOB */
7042 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7043 /* potentially OOB access */
7044 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7045 /* exit */
7046 BPF_MOV64_IMM(BPF_REG_0, 0),
7047 BPF_EXIT_INSN(),
7048 },
7049 .fixup_map1 = { 3 },
7050 .errstr = "R0 max value is outside of the array range",
7051 .result = REJECT
7052 },
7053 {
7054 "bounds check after right shift of maybe-negative number",
7055 .insns = {
7056 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7057 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7058 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7059 BPF_LD_MAP_FD(BPF_REG_1, 0),
7060 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7061 BPF_FUNC_map_lookup_elem),
7062 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 6),
7063 /* r1 = [0x00, 0xff] */
7064 BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7065 /* r1 = [-0x01, 0xfe] */
7066 BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 1),
7067 /* r1 = 0 or 0xff'ffff'ffff'ffff */
7068 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7069 /* r1 = 0 or 0xffff'ffff'ffff */
7070 BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 8),
7071 /* computes unknown pointer, potentially OOB */
7072 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7073 /* potentially OOB access */
7074 BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_0, 0),
7075 /* exit */
7076 BPF_MOV64_IMM(BPF_REG_0, 0),
7077 BPF_EXIT_INSN(),
7078 },
7079 .fixup_map1 = { 3 },
7080 .errstr = "R0 unbounded memory access",
7081 .result = REJECT
7082 },
7083 {
7084 "bounds check map access with off+size signed 32bit overflow. test1",
7085 .insns = {
7086 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7087 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7088 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7089 BPF_LD_MAP_FD(BPF_REG_1, 0),
7090 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7091 BPF_FUNC_map_lookup_elem),
7092 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7093 BPF_EXIT_INSN(),
7094 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x7ffffffe),
7095 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7096 BPF_JMP_A(0),
7097 BPF_EXIT_INSN(),
7098 },
7099 .fixup_map1 = { 3 },
7100 .errstr = "map_value pointer and 2147483646",
7101 .result = REJECT
7102 },
7103 {
7104 "bounds check map access with off+size signed 32bit overflow. test2",
7105 .insns = {
7106 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7107 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7109 BPF_LD_MAP_FD(BPF_REG_1, 0),
7110 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7111 BPF_FUNC_map_lookup_elem),
7112 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7113 BPF_EXIT_INSN(),
7114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7116 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 0x1fffffff),
7117 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7118 BPF_JMP_A(0),
7119 BPF_EXIT_INSN(),
7120 },
7121 .fixup_map1 = { 3 },
7122 .errstr = "pointer offset 1073741822",
7123 .result = REJECT
7124 },
7125 {
7126 "bounds check map access with off+size signed 32bit overflow. test3",
7127 .insns = {
7128 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7129 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7130 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7131 BPF_LD_MAP_FD(BPF_REG_1, 0),
7132 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7133 BPF_FUNC_map_lookup_elem),
7134 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7135 BPF_EXIT_INSN(),
7136 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7137 BPF_ALU64_IMM(BPF_SUB, BPF_REG_0, 0x1fffffff),
7138 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7139 BPF_JMP_A(0),
7140 BPF_EXIT_INSN(),
7141 },
7142 .fixup_map1 = { 3 },
7143 .errstr = "pointer offset -1073741822",
7144 .result = REJECT
7145 },
7146 {
7147 "bounds check map access with off+size signed 32bit overflow. test4",
7148 .insns = {
7149 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7150 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7151 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7152 BPF_LD_MAP_FD(BPF_REG_1, 0),
7153 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7154 BPF_FUNC_map_lookup_elem),
7155 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 1),
7156 BPF_EXIT_INSN(),
7157 BPF_MOV64_IMM(BPF_REG_1, 1000000),
7158 BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 1000000),
7159 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
7160 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 2),
7161 BPF_JMP_A(0),
7162 BPF_EXIT_INSN(),
7163 },
7164 .fixup_map1 = { 3 },
7165 .errstr = "map_value pointer and 1000000000000",
7166 .result = REJECT
7167 },
7168 {
7169 "pointer/scalar confusion in state equality check (way 1)",
7170 .insns = {
7171 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7172 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7173 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7174 BPF_LD_MAP_FD(BPF_REG_1, 0),
7175 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7176 BPF_FUNC_map_lookup_elem),
7177 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
7178 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7179 BPF_JMP_A(1),
7180 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7181 BPF_JMP_A(0),
7182 BPF_EXIT_INSN(),
7183 },
7184 .fixup_map1 = { 3 },
7185 .result = ACCEPT,
7186 .result_unpriv = REJECT,
7187 .errstr_unpriv = "R0 leaks addr as return value"
7188 },
7189 {
7190 "pointer/scalar confusion in state equality check (way 2)",
7191 .insns = {
7192 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7193 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
7194 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
7195 BPF_LD_MAP_FD(BPF_REG_1, 0),
7196 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7197 BPF_FUNC_map_lookup_elem),
7198 BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
7199 BPF_MOV64_REG(BPF_REG_0, BPF_REG_10),
7200 BPF_JMP_A(1),
7201 BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0),
7202 BPF_EXIT_INSN(),
7203 },
7204 .fixup_map1 = { 3 },
7205 .result = ACCEPT,
7206 .result_unpriv = REJECT,
7207 .errstr_unpriv = "R0 leaks addr as return value"
7208 },
7209 {
6769 "variable-offset ctx access", 7210 "variable-offset ctx access",
6770 .insns = { 7211 .insns = {
6771 /* Get an unknown value */ 7212 /* Get an unknown value */
@@ -6807,6 +7248,71 @@ static struct bpf_test tests[] = {
6807 .prog_type = BPF_PROG_TYPE_LWT_IN, 7248 .prog_type = BPF_PROG_TYPE_LWT_IN,
6808 }, 7249 },
6809 { 7250 {
7251 "indirect variable-offset stack access",
7252 .insns = {
7253 /* Fill the top 8 bytes of the stack */
7254 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
7255 /* Get an unknown value */
7256 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
7257 /* Make it small and 4-byte aligned */
7258 BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 4),
7259 BPF_ALU64_IMM(BPF_SUB, BPF_REG_2, 8),
7260 /* add it to fp. We now have either fp-4 or fp-8, but
7261 * we don't know which
7262 */
7263 BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_10),
7264 /* dereference it indirectly */
7265 BPF_LD_MAP_FD(BPF_REG_1, 0),
7266 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
7267 BPF_FUNC_map_lookup_elem),
7268 BPF_MOV64_IMM(BPF_REG_0, 0),
7269 BPF_EXIT_INSN(),
7270 },
7271 .fixup_map1 = { 5 },
7272 .errstr = "variable stack read R2",
7273 .result = REJECT,
7274 .prog_type = BPF_PROG_TYPE_LWT_IN,
7275 },
7276 {
7277 "direct stack access with 32-bit wraparound. test1",
7278 .insns = {
7279 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7280 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7281 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
7282 BPF_MOV32_IMM(BPF_REG_0, 0),
7283 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7284 BPF_EXIT_INSN()
7285 },
7286 .errstr = "fp pointer and 2147483647",
7287 .result = REJECT
7288 },
7289 {
7290 "direct stack access with 32-bit wraparound. test2",
7291 .insns = {
7292 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7293 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
7295 BPF_MOV32_IMM(BPF_REG_0, 0),
7296 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7297 BPF_EXIT_INSN()
7298 },
7299 .errstr = "fp pointer and 1073741823",
7300 .result = REJECT
7301 },
7302 {
7303 "direct stack access with 32-bit wraparound. test3",
7304 .insns = {
7305 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
7306 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7307 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
7308 BPF_MOV32_IMM(BPF_REG_0, 0),
7309 BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
7310 BPF_EXIT_INSN()
7311 },
7312 .errstr = "fp pointer offset 1073741822",
7313 .result = REJECT
7314 },
7315 {
6810 "liveness pruning and write screening", 7316 "liveness pruning and write screening",
6811 .insns = { 7317 .insns = {
6812 /* Get an unknown value */ 7318 /* Get an unknown value */
@@ -7128,6 +7634,19 @@ static struct bpf_test tests[] = {
7128 .prog_type = BPF_PROG_TYPE_SCHED_CLS, 7634 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7129 }, 7635 },
7130 { 7636 {
7637 "pkt_end - pkt_start is allowed",
7638 .insns = {
7639 BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
7640 offsetof(struct __sk_buff, data_end)),
7641 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
7642 offsetof(struct __sk_buff, data)),
7643 BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
7644 BPF_EXIT_INSN(),
7645 },
7646 .result = ACCEPT,
7647 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
7648 },
7649 {
7131 "XDP pkt read, pkt_end mangling, bad access 1", 7650 "XDP pkt read, pkt_end mangling, bad access 1",
7132 .insns = { 7651 .insns = {
7133 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 7652 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -7142,7 +7661,7 @@ static struct bpf_test tests[] = {
7142 BPF_MOV64_IMM(BPF_REG_0, 0), 7661 BPF_MOV64_IMM(BPF_REG_0, 0),
7143 BPF_EXIT_INSN(), 7662 BPF_EXIT_INSN(),
7144 }, 7663 },
7145 .errstr = "R1 offset is outside of the packet", 7664 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7146 .result = REJECT, 7665 .result = REJECT,
7147 .prog_type = BPF_PROG_TYPE_XDP, 7666 .prog_type = BPF_PROG_TYPE_XDP,
7148 }, 7667 },
@@ -7161,7 +7680,7 @@ static struct bpf_test tests[] = {
7161 BPF_MOV64_IMM(BPF_REG_0, 0), 7680 BPF_MOV64_IMM(BPF_REG_0, 0),
7162 BPF_EXIT_INSN(), 7681 BPF_EXIT_INSN(),
7163 }, 7682 },
7164 .errstr = "R1 offset is outside of the packet", 7683 .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
7165 .result = REJECT, 7684 .result = REJECT,
7166 .prog_type = BPF_PROG_TYPE_XDP, 7685 .prog_type = BPF_PROG_TYPE_XDP,
7167 }, 7686 },