aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-24 01:38:08 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-24 01:38:08 -0400
commit81152518e985e2db22f22ca89b12169851644af6 (patch)
tree180afe2c34f0bd7901b27138d06c6ede1d1456c5
parent60890e046081aef61980dbc812ac5100ad078a87 (diff)
parent8e9cd9ce90d48369b2c5ddd79fe3d4a4cb1ccb56 (diff)
Merge branch 'bpf-verifier-fixes'
Edward Cree says: ==================== bpf: verifier fixes Fix a couple of bugs introduced in my recent verifier patches. Patch #2 does slightly increase the insn count on bpf_lxc.o, but only by about a hundred insns (i.e. 0.2%). v2: added test for write-marks bug (patch #1); reworded comment on propagate_liveness() for clarity. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf_verifier.h14
-rw-r--r--kernel/bpf/verifier.c78
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c44
3 files changed, 103 insertions, 33 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 91d07efed2ba..b8d200f60a40 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -21,6 +21,19 @@
21 */ 21 */
22#define BPF_MAX_VAR_SIZ INT_MAX 22#define BPF_MAX_VAR_SIZ INT_MAX
23 23
24/* Liveness marks, used for registers and spilled-regs (in stack slots).
25 * Read marks propagate upwards until they find a write mark; they record that
26 * "one of this state's descendants read this reg" (and therefore the reg is
27 * relevant for states_equal() checks).
28 * Write marks collect downwards and do not propagate; they record that "the
29 * straight-line code that reached this state (from its parent) wrote this reg"
30 * (and therefore that reads propagated from this state or its descendants
31 * should not propagate to its parent).
32 * A state with a write mark can receive read marks; it just won't propagate
33 * them to its parent, since the write mark is a property, not of the state,
34 * but of the link between it and its parent. See mark_reg_read() and
35 * mark_stack_slot_read() in kernel/bpf/verifier.c.
36 */
24enum bpf_reg_liveness { 37enum bpf_reg_liveness {
25 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ 38 REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */
26 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */ 39 REG_LIVE_READ, /* reg was read, so we're sensitive to initial value */
@@ -125,7 +138,6 @@ struct bpf_verifier_env {
125 u32 id_gen; /* used to generate unique reg IDs */ 138 u32 id_gen; /* used to generate unique reg IDs */
126 bool allow_ptr_leaks; 139 bool allow_ptr_leaks;
127 bool seen_direct_write; 140 bool seen_direct_write;
128 bool varlen_map_value_access;
129 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 141 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
130}; 142};
131 143
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e42c096ba20d..d690c7dd1f1a 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -832,11 +832,6 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
832 */ 832 */
833 if (log_level) 833 if (log_level)
834 print_verifier_state(state); 834 print_verifier_state(state);
835 /* If the offset is variable, we will need to be stricter in state
836 * pruning from now on.
837 */
838 if (!tnum_is_const(reg->var_off))
839 env->varlen_map_value_access = true;
840 /* The minimum value is only important with signed 835 /* The minimum value is only important with signed
841 * comparisons where we can't assume the floor of a 836 * comparisons where we can't assume the floor of a
842 * value is 0. If we are using signed variables for our 837 * value is 0. If we are using signed variables for our
@@ -3247,9 +3242,8 @@ static bool check_ids(u32 old_id, u32 cur_id, struct idpair *idmap)
3247} 3242}
3248 3243
3249/* Returns true if (rold safe implies rcur safe) */ 3244/* Returns true if (rold safe implies rcur safe) */
3250static bool regsafe(struct bpf_reg_state *rold, 3245static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur,
3251 struct bpf_reg_state *rcur, 3246 struct idpair *idmap)
3252 bool varlen_map_access, struct idpair *idmap)
3253{ 3247{
3254 if (!(rold->live & REG_LIVE_READ)) 3248 if (!(rold->live & REG_LIVE_READ))
3255 /* explored state didn't use this */ 3249 /* explored state didn't use this */
@@ -3281,22 +3275,14 @@ static bool regsafe(struct bpf_reg_state *rold,
3281 tnum_is_unknown(rold->var_off); 3275 tnum_is_unknown(rold->var_off);
3282 } 3276 }
3283 case PTR_TO_MAP_VALUE: 3277 case PTR_TO_MAP_VALUE:
3284 if (varlen_map_access) { 3278 /* If the new min/max/var_off satisfy the old ones and
3285 /* If the new min/max/var_off satisfy the old ones and 3279 * everything else matches, we are OK.
3286 * everything else matches, we are OK. 3280 * We don't care about the 'id' value, because nothing
3287 * We don't care about the 'id' value, because nothing 3281 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL)
3288 * uses it for PTR_TO_MAP_VALUE (only for ..._OR_NULL) 3282 */
3289 */ 3283 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 &&
3290 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0 && 3284 range_within(rold, rcur) &&
3291 range_within(rold, rcur) && 3285 tnum_in(rold->var_off, rcur->var_off);
3292 tnum_in(rold->var_off, rcur->var_off);
3293 } else {
3294 /* If the ranges/var_off were not the same, but
3295 * everything else was and we didn't do a variable
3296 * access into a map then we are a-ok.
3297 */
3298 return memcmp(rold, rcur, offsetof(struct bpf_reg_state, id)) == 0;
3299 }
3300 case PTR_TO_MAP_VALUE_OR_NULL: 3286 case PTR_TO_MAP_VALUE_OR_NULL:
3301 /* a PTR_TO_MAP_VALUE could be safe to use as a 3287 /* a PTR_TO_MAP_VALUE could be safe to use as a
3302 * PTR_TO_MAP_VALUE_OR_NULL into the same map. 3288 * PTR_TO_MAP_VALUE_OR_NULL into the same map.
@@ -3380,7 +3366,6 @@ static bool states_equal(struct bpf_verifier_env *env,
3380 struct bpf_verifier_state *old, 3366 struct bpf_verifier_state *old,
3381 struct bpf_verifier_state *cur) 3367 struct bpf_verifier_state *cur)
3382{ 3368{
3383 bool varlen_map_access = env->varlen_map_value_access;
3384 struct idpair *idmap; 3369 struct idpair *idmap;
3385 bool ret = false; 3370 bool ret = false;
3386 int i; 3371 int i;
@@ -3391,8 +3376,7 @@ static bool states_equal(struct bpf_verifier_env *env,
3391 return false; 3376 return false;
3392 3377
3393 for (i = 0; i < MAX_BPF_REG; i++) { 3378 for (i = 0; i < MAX_BPF_REG; i++) {
3394 if (!regsafe(&old->regs[i], &cur->regs[i], varlen_map_access, 3379 if (!regsafe(&old->regs[i], &cur->regs[i], idmap))
3395 idmap))
3396 goto out_free; 3380 goto out_free;
3397 } 3381 }
3398 3382
@@ -3412,7 +3396,7 @@ static bool states_equal(struct bpf_verifier_env *env,
3412 continue; 3396 continue;
3413 if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE], 3397 if (!regsafe(&old->spilled_regs[i / BPF_REG_SIZE],
3414 &cur->spilled_regs[i / BPF_REG_SIZE], 3398 &cur->spilled_regs[i / BPF_REG_SIZE],
3415 varlen_map_access, idmap)) 3399 idmap))
3416 /* when explored and current stack slot are both storing 3400 /* when explored and current stack slot are both storing
3417 * spilled registers, check that stored pointers types 3401 * spilled registers, check that stored pointers types
3418 * are the same as well. 3402 * are the same as well.
@@ -3433,9 +3417,16 @@ out_free:
3433 return ret; 3417 return ret;
3434} 3418}
3435 3419
3420/* A write screens off any subsequent reads; but write marks come from the
3421 * straight-line code between a state and its parent. When we arrive at a
3422 * jump target (in the first iteration of the propagate_liveness() loop),
3423 * we didn't arrive by the straight-line code, so read marks in state must
3424 * propagate to parent regardless of state's write marks.
3425 */
3436static bool do_propagate_liveness(const struct bpf_verifier_state *state, 3426static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3437 struct bpf_verifier_state *parent) 3427 struct bpf_verifier_state *parent)
3438{ 3428{
3429 bool writes = parent == state->parent; /* Observe write marks */
3439 bool touched = false; /* any changes made? */ 3430 bool touched = false; /* any changes made? */
3440 int i; 3431 int i;
3441 3432
@@ -3447,7 +3438,9 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3447 for (i = 0; i < BPF_REG_FP; i++) { 3438 for (i = 0; i < BPF_REG_FP; i++) {
3448 if (parent->regs[i].live & REG_LIVE_READ) 3439 if (parent->regs[i].live & REG_LIVE_READ)
3449 continue; 3440 continue;
3450 if (state->regs[i].live == REG_LIVE_READ) { 3441 if (writes && (state->regs[i].live & REG_LIVE_WRITTEN))
3442 continue;
3443 if (state->regs[i].live & REG_LIVE_READ) {
3451 parent->regs[i].live |= REG_LIVE_READ; 3444 parent->regs[i].live |= REG_LIVE_READ;
3452 touched = true; 3445 touched = true;
3453 } 3446 }
@@ -3460,7 +3453,9 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3460 continue; 3453 continue;
3461 if (parent->spilled_regs[i].live & REG_LIVE_READ) 3454 if (parent->spilled_regs[i].live & REG_LIVE_READ)
3462 continue; 3455 continue;
3463 if (state->spilled_regs[i].live == REG_LIVE_READ) { 3456 if (writes && (state->spilled_regs[i].live & REG_LIVE_WRITTEN))
3457 continue;
3458 if (state->spilled_regs[i].live & REG_LIVE_READ) {
3464 parent->spilled_regs[i].live |= REG_LIVE_READ; 3459 parent->spilled_regs[i].live |= REG_LIVE_READ;
3465 touched = true; 3460 touched = true;
3466 } 3461 }
@@ -3468,6 +3463,15 @@ static bool do_propagate_liveness(const struct bpf_verifier_state *state,
3468 return touched; 3463 return touched;
3469} 3464}
3470 3465
3466/* "parent" is "a state from which we reach the current state", but initially
3467 * it is not the state->parent (i.e. "the state whose straight-line code leads
3468 * to the current state"), instead it is the state that happened to arrive at
3469 * a (prunable) equivalent of the current state. See comment above
3470 * do_propagate_liveness() for consequences of this.
3471 * This function is just a more efficient way of calling mark_reg_read() or
3472 * mark_stack_slot_read() on each reg in "parent" that is read in "state",
3473 * though it requires that parent != state->parent in the call arguments.
3474 */
3471static void propagate_liveness(const struct bpf_verifier_state *state, 3475static void propagate_liveness(const struct bpf_verifier_state *state,
3472 struct bpf_verifier_state *parent) 3476 struct bpf_verifier_state *parent)
3473{ 3477{
@@ -3496,6 +3500,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3496 /* reached equivalent register/stack state, 3500 /* reached equivalent register/stack state,
3497 * prune the search. 3501 * prune the search.
3498 * Registers read by the continuation are read by us. 3502 * Registers read by the continuation are read by us.
3503 * If we have any write marks in env->cur_state, they
3504 * will prevent corresponding reads in the continuation
3505 * from reaching our parent (an explored_state). Our
3506 * own state will get the read marks recorded, but
3507 * they'll be immediately forgotten as we're pruning
3508 * this state and will pop a new one.
3499 */ 3509 */
3500 propagate_liveness(&sl->state, &env->cur_state); 3510 propagate_liveness(&sl->state, &env->cur_state);
3501 return 1; 3511 return 1;
@@ -3519,7 +3529,12 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
3519 env->explored_states[insn_idx] = new_sl; 3529 env->explored_states[insn_idx] = new_sl;
3520 /* connect new state to parentage chain */ 3530 /* connect new state to parentage chain */
3521 env->cur_state.parent = &new_sl->state; 3531 env->cur_state.parent = &new_sl->state;
3522 /* clear liveness marks in current state */ 3532 /* clear write marks in current state: the writes we did are not writes
3533 * our child did, so they don't screen off its reads from us.
3534 * (There are no read marks in current state, because reads always mark
3535 * their parent and current state never has children yet. Only
3536 * explored_states can get read marks.)
3537 */
3523 for (i = 0; i < BPF_REG_FP; i++) 3538 for (i = 0; i < BPF_REG_FP; i++)
3524 env->cur_state.regs[i].live = REG_LIVE_NONE; 3539 env->cur_state.regs[i].live = REG_LIVE_NONE;
3525 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++) 3540 for (i = 0; i < MAX_BPF_STACK / BPF_REG_SIZE; i++)
@@ -3550,7 +3565,6 @@ static int do_check(struct bpf_verifier_env *env)
3550 init_reg_state(regs); 3565 init_reg_state(regs);
3551 state->parent = NULL; 3566 state->parent = NULL;
3552 insn_idx = 0; 3567 insn_idx = 0;
3553 env->varlen_map_value_access = false;
3554 for (;;) { 3568 for (;;) {
3555 struct bpf_insn *insn; 3569 struct bpf_insn *insn;
3556 u8 class; 3570 u8 class;
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index c03542c417db..353d17015641 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -6487,6 +6487,50 @@ static struct bpf_test tests[] = {
6487 .result = REJECT, 6487 .result = REJECT,
6488 .prog_type = BPF_PROG_TYPE_LWT_IN, 6488 .prog_type = BPF_PROG_TYPE_LWT_IN,
6489 }, 6489 },
6490 {
6491 "liveness pruning and write screening",
6492 .insns = {
6493 /* Get an unknown value */
6494 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 0),
6495 /* branch conditions teach us nothing about R2 */
6496 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6497 BPF_MOV64_IMM(BPF_REG_0, 0),
6498 BPF_JMP_IMM(BPF_JGE, BPF_REG_2, 0, 1),
6499 BPF_MOV64_IMM(BPF_REG_0, 0),
6500 BPF_EXIT_INSN(),
6501 },
6502 .errstr = "R0 !read_ok",
6503 .result = REJECT,
6504 .prog_type = BPF_PROG_TYPE_LWT_IN,
6505 },
6506 {
6507 "varlen_map_value_access pruning",
6508 .insns = {
6509 BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
6510 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
6511 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
6512 BPF_LD_MAP_FD(BPF_REG_1, 0),
6513 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
6514 BPF_FUNC_map_lookup_elem),
6515 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 8),
6516 BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
6517 BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
6518 BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
6519 BPF_MOV32_IMM(BPF_REG_1, 0),
6520 BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
6521 BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
6522 BPF_JMP_IMM(BPF_JA, 0, 0, 0),
6523 BPF_ST_MEM(BPF_DW, BPF_REG_0, 0,
6524 offsetof(struct test_val, foo)),
6525 BPF_EXIT_INSN(),
6526 },
6527 .fixup_map2 = { 3 },
6528 .errstr_unpriv = "R0 leaks addr",
6529 .errstr = "R0 unbounded memory access",
6530 .result_unpriv = REJECT,
6531 .result = REJECT,
6532 .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
6533 },
6490}; 6534};
6491 6535
6492static int probe_filter_length(const struct bpf_insn *fp) 6536static int probe_filter_length(const struct bpf_insn *fp)