aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorJoe Stringer <joe@wand.net.nz>2018-10-02 16:35:35 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2018-10-02 20:53:47 -0400
commitfd978bf7fd312581a7ca454a991f0ffb34c4204b (patch)
tree5f5cd39c65ff6f3839c78ce39684da93e1252cf3 /kernel/bpf
parent84dbf3507349696b505b6a500722538b0683e4ac (diff)
bpf: Add reference tracking to verifier
Allow helper functions to acquire a reference and return it into a register. Specific pointer types such as the PTR_TO_SOCKET will implicitly represent such a reference. The verifier must ensure that these references are released exactly once in each path through the program. To achieve this, this commit assigns an id to the pointer and tracks it in the 'bpf_func_state', then when the function or program exits, verifies that all of the acquired references have been freed. When the pointer is passed to a function that frees the reference, it is removed from the 'bpf_func_state` and all existing copies of the pointer in registers are marked invalid. Signed-off-by: Joe Stringer <joe@wand.net.nz> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/verifier.c306
1 files changed, 287 insertions, 19 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 11e982381061..cd0d8bc00bd1 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1,5 +1,6 @@
1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com 1/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
2 * Copyright (c) 2016 Facebook 2 * Copyright (c) 2016 Facebook
3 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
3 * 4 *
4 * This program is free software; you can redistribute it and/or 5 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 6 * modify it under the terms of version 2 of the GNU General Public
@@ -140,6 +141,18 @@ static const struct bpf_verifier_ops * const bpf_verifier_ops[] = {
140 * 141 *
141 * After the call R0 is set to return type of the function and registers R1-R5 142 * After the call R0 is set to return type of the function and registers R1-R5
142 * are set to NOT_INIT to indicate that they are no longer readable. 143 * are set to NOT_INIT to indicate that they are no longer readable.
144 *
145 * The following reference types represent a potential reference to a kernel
146 * resource which, after first being allocated, must be checked and freed by
147 * the BPF program:
148 * - PTR_TO_SOCKET_OR_NULL, PTR_TO_SOCKET
149 *
150 * When the verifier sees a helper call return a reference type, it allocates a
151 * pointer id for the reference and stores it in the current function state.
152 * Similar to the way that PTR_TO_MAP_VALUE_OR_NULL is converted into
153 * PTR_TO_MAP_VALUE, PTR_TO_SOCKET_OR_NULL becomes PTR_TO_SOCKET when the type
154 * passes through a NULL-check conditional. For the branch wherein the state is
155 * changed to CONST_IMM, the verifier releases the reference.
143 */ 156 */
144 157
145/* verifier_state + insn_idx are pushed to stack when branch is encountered */ 158/* verifier_state + insn_idx are pushed to stack when branch is encountered */
@@ -189,6 +202,7 @@ struct bpf_call_arg_meta {
189 int access_size; 202 int access_size;
190 s64 msize_smax_value; 203 s64 msize_smax_value;
191 u64 msize_umax_value; 204 u64 msize_umax_value;
205 int ptr_id;
192}; 206};
193 207
194static DEFINE_MUTEX(bpf_verifier_lock); 208static DEFINE_MUTEX(bpf_verifier_lock);
@@ -251,7 +265,42 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type)
251 265
252static bool reg_type_may_be_null(enum bpf_reg_type type) 266static bool reg_type_may_be_null(enum bpf_reg_type type)
253{ 267{
254 return type == PTR_TO_MAP_VALUE_OR_NULL; 268 return type == PTR_TO_MAP_VALUE_OR_NULL ||
269 type == PTR_TO_SOCKET_OR_NULL;
270}
271
272static bool type_is_refcounted(enum bpf_reg_type type)
273{
274 return type == PTR_TO_SOCKET;
275}
276
277static bool type_is_refcounted_or_null(enum bpf_reg_type type)
278{
279 return type == PTR_TO_SOCKET || type == PTR_TO_SOCKET_OR_NULL;
280}
281
282static bool reg_is_refcounted(const struct bpf_reg_state *reg)
283{
284 return type_is_refcounted(reg->type);
285}
286
287static bool reg_is_refcounted_or_null(const struct bpf_reg_state *reg)
288{
289 return type_is_refcounted_or_null(reg->type);
290}
291
292static bool arg_type_is_refcounted(enum bpf_arg_type type)
293{
294 return type == ARG_PTR_TO_SOCKET;
295}
296
297/* Determine whether the function releases some resources allocated by another
298 * function call. The first reference type argument will be assumed to be
299 * released by release_reference().
300 */
301static bool is_release_function(enum bpf_func_id func_id)
302{
303 return false;
255} 304}
256 305
257/* string representation of 'enum bpf_reg_type' */ 306/* string representation of 'enum bpf_reg_type' */
@@ -385,6 +434,12 @@ static void print_verifier_state(struct bpf_verifier_env *env,
385 else 434 else
386 verbose(env, "=%s", types_buf); 435 verbose(env, "=%s", types_buf);
387 } 436 }
437 if (state->acquired_refs && state->refs[0].id) {
438 verbose(env, " refs=%d", state->refs[0].id);
439 for (i = 1; i < state->acquired_refs; i++)
440 if (state->refs[i].id)
441 verbose(env, ",%d", state->refs[i].id);
442 }
388 verbose(env, "\n"); 443 verbose(env, "\n");
389} 444}
390 445
@@ -403,6 +458,8 @@ static int copy_##NAME##_state(struct bpf_func_state *dst, \
403 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \ 458 sizeof(*src->FIELD) * (src->COUNT / SIZE)); \
404 return 0; \ 459 return 0; \
405} 460}
461/* copy_reference_state() */
462COPY_STATE_FN(reference, acquired_refs, refs, 1)
406/* copy_stack_state() */ 463/* copy_stack_state() */
407COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 464COPY_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
408#undef COPY_STATE_FN 465#undef COPY_STATE_FN
@@ -441,6 +498,8 @@ static int realloc_##NAME##_state(struct bpf_func_state *state, int size, \
441 state->FIELD = new_##FIELD; \ 498 state->FIELD = new_##FIELD; \
442 return 0; \ 499 return 0; \
443} 500}
501/* realloc_reference_state() */
502REALLOC_STATE_FN(reference, acquired_refs, refs, 1)
444/* realloc_stack_state() */ 503/* realloc_stack_state() */
445REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE) 504REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
446#undef REALLOC_STATE_FN 505#undef REALLOC_STATE_FN
@@ -452,16 +511,89 @@ REALLOC_STATE_FN(stack, allocated_stack, stack, BPF_REG_SIZE)
452 * which realloc_stack_state() copies over. It points to previous 511 * which realloc_stack_state() copies over. It points to previous
453 * bpf_verifier_state which is never reallocated. 512 * bpf_verifier_state which is never reallocated.
454 */ 513 */
455static int realloc_func_state(struct bpf_func_state *state, int size, 514static int realloc_func_state(struct bpf_func_state *state, int stack_size,
456 bool copy_old) 515 int refs_size, bool copy_old)
457{ 516{
458 return realloc_stack_state(state, size, copy_old); 517 int err = realloc_reference_state(state, refs_size, copy_old);
518 if (err)
519 return err;
520 return realloc_stack_state(state, stack_size, copy_old);
521}
522
523/* Acquire a pointer id from the env and update the state->refs to include
524 * this new pointer reference.
525 * On success, returns a valid pointer id to associate with the register
526 * On failure, returns a negative errno.
527 */
528static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx)
529{
530 struct bpf_func_state *state = cur_func(env);
531 int new_ofs = state->acquired_refs;
532 int id, err;
533
534 err = realloc_reference_state(state, state->acquired_refs + 1, true);
535 if (err)
536 return err;
537 id = ++env->id_gen;
538 state->refs[new_ofs].id = id;
539 state->refs[new_ofs].insn_idx = insn_idx;
540
541 return id;
542}
543
544/* release function corresponding to acquire_reference_state(). Idempotent. */
545static int __release_reference_state(struct bpf_func_state *state, int ptr_id)
546{
547 int i, last_idx;
548
549 if (!ptr_id)
550 return -EFAULT;
551
552 last_idx = state->acquired_refs - 1;
553 for (i = 0; i < state->acquired_refs; i++) {
554 if (state->refs[i].id == ptr_id) {
555 if (last_idx && i != last_idx)
556 memcpy(&state->refs[i], &state->refs[last_idx],
557 sizeof(*state->refs));
558 memset(&state->refs[last_idx], 0, sizeof(*state->refs));
559 state->acquired_refs--;
560 return 0;
561 }
562 }
563 return -EFAULT;
564}
565
566/* variation on the above for cases where we expect that there must be an
567 * outstanding reference for the specified ptr_id.
568 */
569static int release_reference_state(struct bpf_verifier_env *env, int ptr_id)
570{
571 struct bpf_func_state *state = cur_func(env);
572 int err;
573
574 err = __release_reference_state(state, ptr_id);
575 if (WARN_ON_ONCE(err != 0))
576 verbose(env, "verifier internal error: can't release reference\n");
577 return err;
578}
579
580static int transfer_reference_state(struct bpf_func_state *dst,
581 struct bpf_func_state *src)
582{
583 int err = realloc_reference_state(dst, src->acquired_refs, false);
584 if (err)
585 return err;
586 err = copy_reference_state(dst, src);
587 if (err)
588 return err;
589 return 0;
459} 590}
460 591
461static void free_func_state(struct bpf_func_state *state) 592static void free_func_state(struct bpf_func_state *state)
462{ 593{
463 if (!state) 594 if (!state)
464 return; 595 return;
596 kfree(state->refs);
465 kfree(state->stack); 597 kfree(state->stack);
466 kfree(state); 598 kfree(state);
467} 599}
@@ -487,10 +619,14 @@ static int copy_func_state(struct bpf_func_state *dst,
487{ 619{
488 int err; 620 int err;
489 621
490 err = realloc_func_state(dst, src->allocated_stack, false); 622 err = realloc_func_state(dst, src->allocated_stack, src->acquired_refs,
623 false);
624 if (err)
625 return err;
626 memcpy(dst, src, offsetof(struct bpf_func_state, acquired_refs));
627 err = copy_reference_state(dst, src);
491 if (err) 628 if (err)
492 return err; 629 return err;
493 memcpy(dst, src, offsetof(struct bpf_func_state, allocated_stack));
494 return copy_stack_state(dst, src); 630 return copy_stack_state(dst, src);
495} 631}
496 632
@@ -1015,7 +1151,7 @@ static int check_stack_write(struct bpf_verifier_env *env,
1015 enum bpf_reg_type type; 1151 enum bpf_reg_type type;
1016 1152
1017 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE), 1153 err = realloc_func_state(state, round_up(slot + 1, BPF_REG_SIZE),
1018 true); 1154 state->acquired_refs, true);
1019 if (err) 1155 if (err)
1020 return err; 1156 return err;
1021 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0, 1157 /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
@@ -1399,7 +1535,8 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno)
1399{ 1535{
1400 const struct bpf_reg_state *reg = cur_regs(env) + regno; 1536 const struct bpf_reg_state *reg = cur_regs(env) + regno;
1401 1537
1402 return reg->type == PTR_TO_CTX; 1538 return reg->type == PTR_TO_CTX ||
1539 reg->type == PTR_TO_SOCKET;
1403} 1540}
1404 1541
1405static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) 1542static bool is_pkt_reg(struct bpf_verifier_env *env, int regno)
@@ -2003,6 +2140,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno,
2003 expected_type = PTR_TO_SOCKET; 2140 expected_type = PTR_TO_SOCKET;
2004 if (type != expected_type) 2141 if (type != expected_type)
2005 goto err_type; 2142 goto err_type;
2143 if (meta->ptr_id || !reg->id) {
2144 verbose(env, "verifier internal error: mismatched references meta=%d, reg=%d\n",
2145 meta->ptr_id, reg->id);
2146 return -EFAULT;
2147 }
2148 meta->ptr_id = reg->id;
2006 } else if (arg_type_is_mem_ptr(arg_type)) { 2149 } else if (arg_type_is_mem_ptr(arg_type)) {
2007 expected_type = PTR_TO_STACK; 2150 expected_type = PTR_TO_STACK;
2008 /* One exception here. In case function allows for NULL to be 2151 /* One exception here. In case function allows for NULL to be
@@ -2292,10 +2435,32 @@ static bool check_arg_pair_ok(const struct bpf_func_proto *fn)
2292 return true; 2435 return true;
2293} 2436}
2294 2437
2438static bool check_refcount_ok(const struct bpf_func_proto *fn)
2439{
2440 int count = 0;
2441
2442 if (arg_type_is_refcounted(fn->arg1_type))
2443 count++;
2444 if (arg_type_is_refcounted(fn->arg2_type))
2445 count++;
2446 if (arg_type_is_refcounted(fn->arg3_type))
2447 count++;
2448 if (arg_type_is_refcounted(fn->arg4_type))
2449 count++;
2450 if (arg_type_is_refcounted(fn->arg5_type))
2451 count++;
2452
2453 /* We only support one arg being unreferenced at the moment,
2454 * which is sufficient for the helper functions we have right now.
2455 */
2456 return count <= 1;
2457}
2458
2295static int check_func_proto(const struct bpf_func_proto *fn) 2459static int check_func_proto(const struct bpf_func_proto *fn)
2296{ 2460{
2297 return check_raw_mode_ok(fn) && 2461 return check_raw_mode_ok(fn) &&
2298 check_arg_pair_ok(fn) ? 0 : -EINVAL; 2462 check_arg_pair_ok(fn) &&
2463 check_refcount_ok(fn) ? 0 : -EINVAL;
2299} 2464}
2300 2465
2301/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END] 2466/* Packet data might have moved, any old PTR_TO_PACKET[_META,_END]
@@ -2328,12 +2493,45 @@ static void clear_all_pkt_pointers(struct bpf_verifier_env *env)
2328 __clear_all_pkt_pointers(env, vstate->frame[i]); 2493 __clear_all_pkt_pointers(env, vstate->frame[i]);
2329} 2494}
2330 2495
2496static void release_reg_references(struct bpf_verifier_env *env,
2497 struct bpf_func_state *state, int id)
2498{
2499 struct bpf_reg_state *regs = state->regs, *reg;
2500 int i;
2501
2502 for (i = 0; i < MAX_BPF_REG; i++)
2503 if (regs[i].id == id)
2504 mark_reg_unknown(env, regs, i);
2505
2506 bpf_for_each_spilled_reg(i, state, reg) {
2507 if (!reg)
2508 continue;
2509 if (reg_is_refcounted(reg) && reg->id == id)
2510 __mark_reg_unknown(reg);
2511 }
2512}
2513
2514/* The pointer with the specified id has released its reference to kernel
2515 * resources. Identify all copies of the same pointer and clear the reference.
2516 */
2517static int release_reference(struct bpf_verifier_env *env,
2518 struct bpf_call_arg_meta *meta)
2519{
2520 struct bpf_verifier_state *vstate = env->cur_state;
2521 int i;
2522
2523 for (i = 0; i <= vstate->curframe; i++)
2524 release_reg_references(env, vstate->frame[i], meta->ptr_id);
2525
2526 return release_reference_state(env, meta->ptr_id);
2527}
2528
2331static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, 2529static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2332 int *insn_idx) 2530 int *insn_idx)
2333{ 2531{
2334 struct bpf_verifier_state *state = env->cur_state; 2532 struct bpf_verifier_state *state = env->cur_state;
2335 struct bpf_func_state *caller, *callee; 2533 struct bpf_func_state *caller, *callee;
2336 int i, subprog, target_insn; 2534 int i, err, subprog, target_insn;
2337 2535
2338 if (state->curframe + 1 >= MAX_CALL_FRAMES) { 2536 if (state->curframe + 1 >= MAX_CALL_FRAMES) {
2339 verbose(env, "the call stack of %d frames is too deep\n", 2537 verbose(env, "the call stack of %d frames is too deep\n",
@@ -2371,6 +2569,11 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2371 state->curframe + 1 /* frameno within this callchain */, 2569 state->curframe + 1 /* frameno within this callchain */,
2372 subprog /* subprog number within this prog */); 2570 subprog /* subprog number within this prog */);
2373 2571
2572 /* Transfer references to the callee */
2573 err = transfer_reference_state(callee, caller);
2574 if (err)
2575 return err;
2576
2374 /* copy r1 - r5 args that callee can access. The copy includes parent 2577 /* copy r1 - r5 args that callee can access. The copy includes parent
2375 * pointers, which connects us up to the liveness chain 2578 * pointers, which connects us up to the liveness chain
2376 */ 2579 */
@@ -2403,6 +2606,7 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2403 struct bpf_verifier_state *state = env->cur_state; 2606 struct bpf_verifier_state *state = env->cur_state;
2404 struct bpf_func_state *caller, *callee; 2607 struct bpf_func_state *caller, *callee;
2405 struct bpf_reg_state *r0; 2608 struct bpf_reg_state *r0;
2609 int err;
2406 2610
2407 callee = state->frame[state->curframe]; 2611 callee = state->frame[state->curframe];
2408 r0 = &callee->regs[BPF_REG_0]; 2612 r0 = &callee->regs[BPF_REG_0];
@@ -2422,6 +2626,11 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx)
2422 /* return to the caller whatever r0 had in the callee */ 2626 /* return to the caller whatever r0 had in the callee */
2423 caller->regs[BPF_REG_0] = *r0; 2627 caller->regs[BPF_REG_0] = *r0;
2424 2628
2629 /* Transfer references to the caller */
2630 err = transfer_reference_state(caller, callee);
2631 if (err)
2632 return err;
2633
2425 *insn_idx = callee->callsite + 1; 2634 *insn_idx = callee->callsite + 1;
2426 if (env->log.level) { 2635 if (env->log.level) {
2427 verbose(env, "returning from callee:\n"); 2636 verbose(env, "returning from callee:\n");
@@ -2478,6 +2687,18 @@ record_func_map(struct bpf_verifier_env *env, struct bpf_call_arg_meta *meta,
2478 return 0; 2687 return 0;
2479} 2688}
2480 2689
2690static int check_reference_leak(struct bpf_verifier_env *env)
2691{
2692 struct bpf_func_state *state = cur_func(env);
2693 int i;
2694
2695 for (i = 0; i < state->acquired_refs; i++) {
2696 verbose(env, "Unreleased reference id=%d alloc_insn=%d\n",
2697 state->refs[i].id, state->refs[i].insn_idx);
2698 }
2699 return state->acquired_refs ? -EINVAL : 0;
2700}
2701
2481static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) 2702static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx)
2482{ 2703{
2483 const struct bpf_func_proto *fn = NULL; 2704 const struct bpf_func_proto *fn = NULL;
@@ -2556,6 +2777,18 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2556 return err; 2777 return err;
2557 } 2778 }
2558 2779
2780 if (func_id == BPF_FUNC_tail_call) {
2781 err = check_reference_leak(env);
2782 if (err) {
2783 verbose(env, "tail_call would lead to reference leak\n");
2784 return err;
2785 }
2786 } else if (is_release_function(func_id)) {
2787 err = release_reference(env, &meta);
2788 if (err)
2789 return err;
2790 }
2791
2559 regs = cur_regs(env); 2792 regs = cur_regs(env);
2560 2793
2561 /* check that flags argument in get_local_storage(map, flags) is 0, 2794 /* check that flags argument in get_local_storage(map, flags) is 0,
@@ -2599,9 +2832,12 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn
2599 regs[BPF_REG_0].map_ptr = meta.map_ptr; 2832 regs[BPF_REG_0].map_ptr = meta.map_ptr;
2600 regs[BPF_REG_0].id = ++env->id_gen; 2833 regs[BPF_REG_0].id = ++env->id_gen;
2601 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { 2834 } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) {
2835 int id = acquire_reference_state(env, insn_idx);
2836 if (id < 0)
2837 return id;
2602 mark_reg_known_zero(env, regs, BPF_REG_0); 2838 mark_reg_known_zero(env, regs, BPF_REG_0);
2603 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; 2839 regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL;
2604 regs[BPF_REG_0].id = ++env->id_gen; 2840 regs[BPF_REG_0].id = id;
2605 } else { 2841 } else {
2606 verbose(env, "unknown return type %d of func %s#%d\n", 2842 verbose(env, "unknown return type %d of func %s#%d\n",
2607 fn->ret_type, func_id_name(func_id), func_id); 2843 fn->ret_type, func_id_name(func_id), func_id);
@@ -3665,7 +3901,8 @@ static void reg_combine_min_max(struct bpf_reg_state *true_src,
3665 } 3901 }
3666} 3902}
3667 3903
3668static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id, 3904static void mark_ptr_or_null_reg(struct bpf_func_state *state,
3905 struct bpf_reg_state *reg, u32 id,
3669 bool is_null) 3906 bool is_null)
3670{ 3907{
3671 if (reg_type_may_be_null(reg->type) && reg->id == id) { 3908 if (reg_type_may_be_null(reg->type) && reg->id == id) {
@@ -3691,11 +3928,13 @@ static void mark_ptr_or_null_reg(struct bpf_reg_state *reg, u32 id,
3691 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { 3928 } else if (reg->type == PTR_TO_SOCKET_OR_NULL) {
3692 reg->type = PTR_TO_SOCKET; 3929 reg->type = PTR_TO_SOCKET;
3693 } 3930 }
3694 /* We don't need id from this point onwards anymore, thus we 3931 if (is_null || !reg_is_refcounted(reg)) {
3695 * should better reset it, so that state pruning has chances 3932 /* We don't need id from this point onwards anymore,
3696 * to take effect. 3933 * thus we should better reset it, so that state
3697 */ 3934 * pruning has chances to take effect.
3698 reg->id = 0; 3935 */
3936 reg->id = 0;
3937 }
3699 } 3938 }
3700} 3939}
3701 3940
@@ -3710,15 +3949,18 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno,
3710 u32 id = regs[regno].id; 3949 u32 id = regs[regno].id;
3711 int i, j; 3950 int i, j;
3712 3951
3952 if (reg_is_refcounted_or_null(&regs[regno]) && is_null)
3953 __release_reference_state(state, id);
3954
3713 for (i = 0; i < MAX_BPF_REG; i++) 3955 for (i = 0; i < MAX_BPF_REG; i++)
3714 mark_ptr_or_null_reg(&regs[i], id, is_null); 3956 mark_ptr_or_null_reg(state, &regs[i], id, is_null);
3715 3957
3716 for (j = 0; j <= vstate->curframe; j++) { 3958 for (j = 0; j <= vstate->curframe; j++) {
3717 state = vstate->frame[j]; 3959 state = vstate->frame[j];
3718 bpf_for_each_spilled_reg(i, state, reg) { 3960 bpf_for_each_spilled_reg(i, state, reg) {
3719 if (!reg) 3961 if (!reg)
3720 continue; 3962 continue;
3721 mark_ptr_or_null_reg(reg, id, is_null); 3963 mark_ptr_or_null_reg(state, reg, id, is_null);
3722 } 3964 }
3723 } 3965 }
3724} 3966}
@@ -4050,6 +4292,16 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
4050 if (err) 4292 if (err)
4051 return err; 4293 return err;
4052 4294
4295 /* Disallow usage of BPF_LD_[ABS|IND] with reference tracking, as
4296 * gen_ld_abs() may terminate the program at runtime, leading to
4297 * reference leak.
4298 */
4299 err = check_reference_leak(env);
4300 if (err) {
4301 verbose(env, "BPF_LD_[ABS|IND] cannot be mixed with socket references\n");
4302 return err;
4303 }
4304
4053 if (regs[BPF_REG_6].type != PTR_TO_CTX) { 4305 if (regs[BPF_REG_6].type != PTR_TO_CTX) {
4054 verbose(env, 4306 verbose(env,
4055 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n"); 4307 "at the time of BPF_LD_ABS|IND R6 != pointer to skb\n");
@@ -4542,6 +4794,14 @@ static bool stacksafe(struct bpf_func_state *old,
4542 return true; 4794 return true;
4543} 4795}
4544 4796
4797static bool refsafe(struct bpf_func_state *old, struct bpf_func_state *cur)
4798{
4799 if (old->acquired_refs != cur->acquired_refs)
4800 return false;
4801 return !memcmp(old->refs, cur->refs,
4802 sizeof(*old->refs) * old->acquired_refs);
4803}
4804
4545/* compare two verifier states 4805/* compare two verifier states
4546 * 4806 *
4547 * all states stored in state_list are known to be valid, since 4807 * all states stored in state_list are known to be valid, since
@@ -4587,6 +4847,9 @@ static bool func_states_equal(struct bpf_func_state *old,
4587 4847
4588 if (!stacksafe(old, cur, idmap)) 4848 if (!stacksafe(old, cur, idmap))
4589 goto out_free; 4849 goto out_free;
4850
4851 if (!refsafe(old, cur))
4852 goto out_free;
4590 ret = true; 4853 ret = true;
4591out_free: 4854out_free:
4592 kfree(idmap); 4855 kfree(idmap);
@@ -4868,6 +5131,7 @@ static int do_check(struct bpf_verifier_env *env)
4868 5131
4869 regs = cur_regs(env); 5132 regs = cur_regs(env);
4870 env->insn_aux_data[insn_idx].seen = true; 5133 env->insn_aux_data[insn_idx].seen = true;
5134
4871 if (class == BPF_ALU || class == BPF_ALU64) { 5135 if (class == BPF_ALU || class == BPF_ALU64) {
4872 err = check_alu_op(env, insn); 5136 err = check_alu_op(env, insn);
4873 if (err) 5137 if (err)
@@ -5032,6 +5296,10 @@ static int do_check(struct bpf_verifier_env *env)
5032 continue; 5296 continue;
5033 } 5297 }
5034 5298
5299 err = check_reference_leak(env);
5300 if (err)
5301 return err;
5302
5035 /* eBPF calling convetion is such that R0 is used 5303 /* eBPF calling convetion is such that R0 is used
5036 * to return the value from eBPF program. 5304 * to return the value from eBPF program.
5037 * Make sure that it's readable at this time 5305 * Make sure that it's readable at this time