aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2017-05-17 21:00:06 -0400
committerDavid S. Miller <davem@davemloft.net>2017-05-17 22:55:27 -0400
commit3c2ce60bdd3d57051bf85615deec04a694473840 (patch)
tree1eccf24afe708484072dc80c378201b92be0732d
parent7dd7eb9513bd02184d45f000ab69d78cb1fa1531 (diff)
bpf: adjust verifier heuristics
Current limits with regards to processing program paths do not really reflect today's needs anymore due to programs becoming more complex and verifier smarter, keeping track of more data such as const ALU operations, alignment tracking, spilling of PTR_TO_MAP_VALUE_ADJ registers, and other features allowing for smarter matching of what LLVM generates. This also comes with the side-effect that we result in fewer opportunities to prune search states and thus often need to do more work to prove safety than in the past due to different register states and stack layout where we mismatch. Generally, it's quite hard to determine what caused a sudden increase in complexity, it could be caused by something as trivial as a single branch somewhere at the beginning of the program where LLVM assigned a stack slot that is marked differently throughout other branches and thus causing a mismatch, where verifier then needs to prove safety for the whole rest of the program. Subsequently, programs with even less than half the insn size limit can get rejected. We noticed that while some programs load fine under pre 4.11, they get rejected due to hitting limits on more recent kernels. We saw that in the vast majority of cases (90+%) pruning failed due to register mismatches. In case of stack mismatches, majority of cases failed due to different stack slot types (invalid, spill, misc) rather than differences in spilled registers. This patch makes pruning more aggressive by also adding markers that sit at conditional jumps as well. Currently, we only mark jump targets for pruning. For example in direct packet access, these are usually error paths where we bail out. We found that adding these markers, it can reduce number of processed insns by up to 30%. Another option is to ignore reg->id in probing PTR_TO_MAP_VALUE_OR_NULL registers, which can help pruning slightly as well by up to 7% observed complexity reduction as stand-alone. Meaning, if a previous path with register type PTR_TO_MAP_VALUE_OR_NULL for map X was found to be safe, then in the current state a PTR_TO_MAP_VALUE_OR_NULL register for the same map X must be safe as well. Last but not least the patch also adds a scheduling point and bumps the current limit for instructions to be processed to a more adequate value. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--kernel/bpf/verifier.c12
1 files changed, 11 insertions, 1 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 39f2dcbc4cbc..1eddb713b815 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -140,7 +140,7 @@ struct bpf_verifier_stack_elem {
140 struct bpf_verifier_stack_elem *next; 140 struct bpf_verifier_stack_elem *next;
141}; 141};
142 142
143#define BPF_COMPLEXITY_LIMIT_INSNS 65536 143#define BPF_COMPLEXITY_LIMIT_INSNS 98304
144#define BPF_COMPLEXITY_LIMIT_STACK 1024 144#define BPF_COMPLEXITY_LIMIT_STACK 1024
145 145
146#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA) 146#define BPF_MAP_PTR_POISON ((void *)0xeB9F + POISON_POINTER_DELTA)
@@ -2640,6 +2640,7 @@ peek_stack:
2640 env->explored_states[t + 1] = STATE_LIST_MARK; 2640 env->explored_states[t + 1] = STATE_LIST_MARK;
2641 } else { 2641 } else {
2642 /* conditional jump with two edges */ 2642 /* conditional jump with two edges */
2643 env->explored_states[t] = STATE_LIST_MARK;
2643 ret = push_insn(t, t + 1, FALLTHROUGH, env); 2644 ret = push_insn(t, t + 1, FALLTHROUGH, env);
2644 if (ret == 1) 2645 if (ret == 1)
2645 goto peek_stack; 2646 goto peek_stack;
@@ -2798,6 +2799,12 @@ static bool states_equal(struct bpf_verifier_env *env,
2798 rcur->type != NOT_INIT)) 2799 rcur->type != NOT_INIT))
2799 continue; 2800 continue;
2800 2801
2802 /* Don't care about the reg->id in this case. */
2803 if (rold->type == PTR_TO_MAP_VALUE_OR_NULL &&
2804 rcur->type == PTR_TO_MAP_VALUE_OR_NULL &&
2805 rold->map_ptr == rcur->map_ptr)
2806 continue;
2807
2801 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET && 2808 if (rold->type == PTR_TO_PACKET && rcur->type == PTR_TO_PACKET &&
2802 compare_ptrs_to_packet(rold, rcur)) 2809 compare_ptrs_to_packet(rold, rcur))
2803 continue; 2810 continue;
@@ -2932,6 +2939,9 @@ static int do_check(struct bpf_verifier_env *env)
2932 goto process_bpf_exit; 2939 goto process_bpf_exit;
2933 } 2940 }
2934 2941
2942 if (need_resched())
2943 cond_resched();
2944
2935 if (log_level > 1 || (log_level && do_print_state)) { 2945 if (log_level > 1 || (log_level && do_print_state)) {
2936 if (log_level > 1) 2946 if (log_level > 1)
2937 verbose("%d:", insn_idx); 2947 verbose("%d:", insn_idx);