aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2019-06-20 00:06:27 -0400
committerDavid S. Miller <davem@davemloft.net>2019-06-20 00:06:27 -0400
commitdca73a65a68329ee386d3ff473152bac66eaab39 (patch)
tree97c41afb932bdd6cbe67e7ffc38bfe5952c97798 /include/linux
parent497ad9f5b2dc86b733761b9afa44ecfa2f17be65 (diff)
parent94079b64255fe40b9b53fd2e4081f68b9b14f54a (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says: ==================== pull-request: bpf-next 2019-06-19 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) new SO_REUSEPORT_DETACH_BPF setsocktopt, from Martin. 2) BTF based map definition, from Andrii. 3) support bpf_map_lookup_elem for xskmap, from Jonathan. 4) bounded loops and scalar precision logic in the verifier, from Alexei. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/bpf.h25
-rw-r--r--include/linux/bpf_verifier.h69
2 files changed, 93 insertions, 1 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 9f7c453db70c..a62e7889b0b6 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -277,6 +277,7 @@ enum bpf_reg_type {
277 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ 277 PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */
278 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ 278 PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
279 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ 279 PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
280 PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
280}; 281};
281 282
282/* The information passed from prog-specific *_is_valid_access 283/* The information passed from prog-specific *_is_valid_access
@@ -1098,6 +1099,15 @@ u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1098 struct bpf_insn *insn_buf, 1099 struct bpf_insn *insn_buf,
1099 struct bpf_prog *prog, 1100 struct bpf_prog *prog,
1100 u32 *target_size); 1101 u32 *target_size);
1102
1103bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
1104 struct bpf_insn_access_aux *info);
1105
1106u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1107 const struct bpf_insn *si,
1108 struct bpf_insn *insn_buf,
1109 struct bpf_prog *prog,
1110 u32 *target_size);
1101#else 1111#else
1102static inline bool bpf_tcp_sock_is_valid_access(int off, int size, 1112static inline bool bpf_tcp_sock_is_valid_access(int off, int size,
1103 enum bpf_access_type type, 1113 enum bpf_access_type type,
@@ -1114,6 +1124,21 @@ static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type,
1114{ 1124{
1115 return 0; 1125 return 0;
1116} 1126}
1127static inline bool bpf_xdp_sock_is_valid_access(int off, int size,
1128 enum bpf_access_type type,
1129 struct bpf_insn_access_aux *info)
1130{
1131 return false;
1132}
1133
1134static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
1135 const struct bpf_insn *si,
1136 struct bpf_insn *insn_buf,
1137 struct bpf_prog *prog,
1138 u32 *target_size)
1139{
1140 return 0;
1141}
1117#endif /* CONFIG_INET */ 1142#endif /* CONFIG_INET */
1118 1143
1119#endif /* _LINUX_BPF_H */ 1144#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 026ba8b81e88..5fe99f322b1c 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -136,6 +136,8 @@ struct bpf_reg_state {
136 */ 136 */
137 s32 subreg_def; 137 s32 subreg_def;
138 enum bpf_reg_liveness live; 138 enum bpf_reg_liveness live;
139 /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */
140 bool precise;
139}; 141};
140 142
141enum bpf_stack_slot_type { 143enum bpf_stack_slot_type {
@@ -187,14 +189,77 @@ struct bpf_func_state {
187 struct bpf_stack_state *stack; 189 struct bpf_stack_state *stack;
188}; 190};
189 191
192struct bpf_idx_pair {
193 u32 prev_idx;
194 u32 idx;
195};
196
190#define MAX_CALL_FRAMES 8 197#define MAX_CALL_FRAMES 8
191struct bpf_verifier_state { 198struct bpf_verifier_state {
192 /* call stack tracking */ 199 /* call stack tracking */
193 struct bpf_func_state *frame[MAX_CALL_FRAMES]; 200 struct bpf_func_state *frame[MAX_CALL_FRAMES];
201 struct bpf_verifier_state *parent;
202 /*
203 * 'branches' field is the number of branches left to explore:
204 * 0 - all possible paths from this state reached bpf_exit or
205 * were safely pruned
206 * 1 - at least one path is being explored.
207 * This state hasn't reached bpf_exit
208 * 2 - at least two paths are being explored.
209 * This state is an immediate parent of two children.
210 * One is fallthrough branch with branches==1 and another
211 * state is pushed into stack (to be explored later) also with
212 * branches==1. The parent of this state has branches==1.
213 * The verifier state tree connected via 'parent' pointer looks like:
214 * 1
215 * 1
216 * 2 -> 1 (first 'if' pushed into stack)
217 * 1
218 * 2 -> 1 (second 'if' pushed into stack)
219 * 1
220 * 1
221 * 1 bpf_exit.
222 *
223 * Once do_check() reaches bpf_exit, it calls update_branch_counts()
224 * and the verifier state tree will look:
225 * 1
226 * 1
227 * 2 -> 1 (first 'if' pushed into stack)
228 * 1
229 * 1 -> 1 (second 'if' pushed into stack)
230 * 0
231 * 0
232 * 0 bpf_exit.
233 * After pop_stack() the do_check() will resume at second 'if'.
234 *
235 * If is_state_visited() sees a state with branches > 0 it means
236 * there is a loop. If such state is exactly equal to the current state
237 * it's an infinite loop. Note states_equal() checks for states
238 * equvalency, so two states being 'states_equal' does not mean
239 * infinite loop. The exact comparison is provided by
240 * states_maybe_looping() function. It's a stronger pre-check and
241 * much faster than states_equal().
242 *
243 * This algorithm may not find all possible infinite loops or
244 * loop iteration count may be too high.
245 * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in.
246 */
247 u32 branches;
194 u32 insn_idx; 248 u32 insn_idx;
195 u32 curframe; 249 u32 curframe;
196 u32 active_spin_lock; 250 u32 active_spin_lock;
197 bool speculative; 251 bool speculative;
252
253 /* first and last insn idx of this verifier state */
254 u32 first_insn_idx;
255 u32 last_insn_idx;
256 /* jmp history recorded from first to last.
257 * backtracking is using it to go from last to first.
258 * For most states jmp_history_cnt is [0-3].
259 * For loops can go up to ~40.
260 */
261 struct bpf_idx_pair *jmp_history;
262 u32 jmp_history_cnt;
198}; 263};
199 264
200#define bpf_get_spilled_reg(slot, frame) \ 265#define bpf_get_spilled_reg(slot, frame) \
@@ -309,7 +374,9 @@ struct bpf_verifier_env {
309 } cfg; 374 } cfg;
310 u32 subprog_cnt; 375 u32 subprog_cnt;
311 /* number of instructions analyzed by the verifier */ 376 /* number of instructions analyzed by the verifier */
312 u32 insn_processed; 377 u32 prev_insn_processed, insn_processed;
378 /* number of jmps, calls, exits analyzed so far */
379 u32 prev_jmps_processed, jmps_processed;
313 /* total verification time */ 380 /* total verification time */
314 u64 verification_time; 381 u64 verification_time;
315 /* maximum number of verifier states kept in 'branching' instructions */ 382 /* maximum number of verifier states kept in 'branching' instructions */