diff options
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r-- | kernel/bpf/verifier.c | 247 |
1 files changed, 169 insertions, 78 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index eb1a596aebd3..d5e1a6c4165d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/stringify.h> | 22 | #include <linux/stringify.h> |
23 | #include <linux/bsearch.h> | 23 | #include <linux/bsearch.h> |
24 | #include <linux/sort.h> | 24 | #include <linux/sort.h> |
25 | #include <linux/perf_event.h> | ||
25 | 26 | ||
26 | #include "disasm.h" | 27 | #include "disasm.h" |
27 | 28 | ||
@@ -164,6 +165,8 @@ struct bpf_call_arg_meta { | |||
164 | bool pkt_access; | 165 | bool pkt_access; |
165 | int regno; | 166 | int regno; |
166 | int access_size; | 167 | int access_size; |
168 | s64 msize_smax_value; | ||
169 | u64 msize_umax_value; | ||
167 | }; | 170 | }; |
168 | 171 | ||
169 | static DEFINE_MUTEX(bpf_verifier_lock); | 172 | static DEFINE_MUTEX(bpf_verifier_lock); |
@@ -738,18 +741,19 @@ enum reg_arg_type { | |||
738 | 741 | ||
739 | static int cmp_subprogs(const void *a, const void *b) | 742 | static int cmp_subprogs(const void *a, const void *b) |
740 | { | 743 | { |
741 | return *(int *)a - *(int *)b; | 744 | return ((struct bpf_subprog_info *)a)->start - |
745 | ((struct bpf_subprog_info *)b)->start; | ||
742 | } | 746 | } |
743 | 747 | ||
744 | static int find_subprog(struct bpf_verifier_env *env, int off) | 748 | static int find_subprog(struct bpf_verifier_env *env, int off) |
745 | { | 749 | { |
746 | u32 *p; | 750 | struct bpf_subprog_info *p; |
747 | 751 | ||
748 | p = bsearch(&off, env->subprog_starts, env->subprog_cnt, | 752 | p = bsearch(&off, env->subprog_info, env->subprog_cnt, |
749 | sizeof(env->subprog_starts[0]), cmp_subprogs); | 753 | sizeof(env->subprog_info[0]), cmp_subprogs); |
750 | if (!p) | 754 | if (!p) |
751 | return -ENOENT; | 755 | return -ENOENT; |
752 | return p - env->subprog_starts; | 756 | return p - env->subprog_info; |
753 | 757 | ||
754 | } | 758 | } |
755 | 759 | ||
@@ -769,18 +773,24 @@ static int add_subprog(struct bpf_verifier_env *env, int off) | |||
769 | verbose(env, "too many subprograms\n"); | 773 | verbose(env, "too many subprograms\n"); |
770 | return -E2BIG; | 774 | return -E2BIG; |
771 | } | 775 | } |
772 | env->subprog_starts[env->subprog_cnt++] = off; | 776 | env->subprog_info[env->subprog_cnt++].start = off; |
773 | sort(env->subprog_starts, env->subprog_cnt, | 777 | sort(env->subprog_info, env->subprog_cnt, |
774 | sizeof(env->subprog_starts[0]), cmp_subprogs, NULL); | 778 | sizeof(env->subprog_info[0]), cmp_subprogs, NULL); |
775 | return 0; | 779 | return 0; |
776 | } | 780 | } |
777 | 781 | ||
778 | static int check_subprogs(struct bpf_verifier_env *env) | 782 | static int check_subprogs(struct bpf_verifier_env *env) |
779 | { | 783 | { |
780 | int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; | 784 | int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; |
785 | struct bpf_subprog_info *subprog = env->subprog_info; | ||
781 | struct bpf_insn *insn = env->prog->insnsi; | 786 | struct bpf_insn *insn = env->prog->insnsi; |
782 | int insn_cnt = env->prog->len; | 787 | int insn_cnt = env->prog->len; |
783 | 788 | ||
789 | /* Add entry function. */ | ||
790 | ret = add_subprog(env, 0); | ||
791 | if (ret < 0) | ||
792 | return ret; | ||
793 | |||
784 | /* determine subprog starts. The end is one before the next starts */ | 794 | /* determine subprog starts. The end is one before the next starts */ |
785 | for (i = 0; i < insn_cnt; i++) { | 795 | for (i = 0; i < insn_cnt; i++) { |
786 | if (insn[i].code != (BPF_JMP | BPF_CALL)) | 796 | if (insn[i].code != (BPF_JMP | BPF_CALL)) |
@@ -800,16 +810,18 @@ static int check_subprogs(struct bpf_verifier_env *env) | |||
800 | return ret; | 810 | return ret; |
801 | } | 811 | } |
802 | 812 | ||
813 | /* Add a fake 'exit' subprog which could simplify subprog iteration | ||
814 | * logic. 'subprog_cnt' should not be increased. | ||
815 | */ | ||
816 | subprog[env->subprog_cnt].start = insn_cnt; | ||
817 | |||
803 | if (env->log.level > 1) | 818 | if (env->log.level > 1) |
804 | for (i = 0; i < env->subprog_cnt; i++) | 819 | for (i = 0; i < env->subprog_cnt; i++) |
805 | verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]); | 820 | verbose(env, "func#%d @%d\n", i, subprog[i].start); |
806 | 821 | ||
807 | /* now check that all jumps are within the same subprog */ | 822 | /* now check that all jumps are within the same subprog */ |
808 | subprog_start = 0; | 823 | subprog_start = subprog[cur_subprog].start; |
809 | if (env->subprog_cnt == cur_subprog) | 824 | subprog_end = subprog[cur_subprog + 1].start; |
810 | subprog_end = insn_cnt; | ||
811 | else | ||
812 | subprog_end = env->subprog_starts[cur_subprog++]; | ||
813 | for (i = 0; i < insn_cnt; i++) { | 825 | for (i = 0; i < insn_cnt; i++) { |
814 | u8 code = insn[i].code; | 826 | u8 code = insn[i].code; |
815 | 827 | ||
@@ -834,10 +846,9 @@ next: | |||
834 | return -EINVAL; | 846 | return -EINVAL; |
835 | } | 847 | } |
836 | subprog_start = subprog_end; | 848 | subprog_start = subprog_end; |
837 | if (env->subprog_cnt == cur_subprog) | 849 | cur_subprog++; |
838 | subprog_end = insn_cnt; | 850 | if (cur_subprog < env->subprog_cnt) |
839 | else | 851 | subprog_end = subprog[cur_subprog + 1].start; |
840 | subprog_end = env->subprog_starts[cur_subprog++]; | ||
841 | } | 852 | } |
842 | } | 853 | } |
843 | return 0; | 854 | return 0; |
@@ -1470,13 +1481,13 @@ static int update_stack_depth(struct bpf_verifier_env *env, | |||
1470 | const struct bpf_func_state *func, | 1481 | const struct bpf_func_state *func, |
1471 | int off) | 1482 | int off) |
1472 | { | 1483 | { |
1473 | u16 stack = env->subprog_stack_depth[func->subprogno]; | 1484 | u16 stack = env->subprog_info[func->subprogno].stack_depth; |
1474 | 1485 | ||
1475 | if (stack >= -off) | 1486 | if (stack >= -off) |
1476 | return 0; | 1487 | return 0; |
1477 | 1488 | ||
1478 | /* update known max for given subprogram */ | 1489 | /* update known max for given subprogram */ |
1479 | env->subprog_stack_depth[func->subprogno] = -off; | 1490 | env->subprog_info[func->subprogno].stack_depth = -off; |
1480 | return 0; | 1491 | return 0; |
1481 | } | 1492 | } |
1482 | 1493 | ||
@@ -1488,9 +1499,9 @@ static int update_stack_depth(struct bpf_verifier_env *env, | |||
1488 | */ | 1499 | */ |
1489 | static int check_max_stack_depth(struct bpf_verifier_env *env) | 1500 | static int check_max_stack_depth(struct bpf_verifier_env *env) |
1490 | { | 1501 | { |
1491 | int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end; | 1502 | int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; |
1503 | struct bpf_subprog_info *subprog = env->subprog_info; | ||
1492 | struct bpf_insn *insn = env->prog->insnsi; | 1504 | struct bpf_insn *insn = env->prog->insnsi; |
1493 | int insn_cnt = env->prog->len; | ||
1494 | int ret_insn[MAX_CALL_FRAMES]; | 1505 | int ret_insn[MAX_CALL_FRAMES]; |
1495 | int ret_prog[MAX_CALL_FRAMES]; | 1506 | int ret_prog[MAX_CALL_FRAMES]; |
1496 | 1507 | ||
@@ -1498,17 +1509,14 @@ process_func: | |||
1498 | /* round up to 32-bytes, since this is granularity | 1509 | /* round up to 32-bytes, since this is granularity |
1499 | * of interpreter stack size | 1510 | * of interpreter stack size |
1500 | */ | 1511 | */ |
1501 | depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); | 1512 | depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); |
1502 | if (depth > MAX_BPF_STACK) { | 1513 | if (depth > MAX_BPF_STACK) { |
1503 | verbose(env, "combined stack size of %d calls is %d. Too large\n", | 1514 | verbose(env, "combined stack size of %d calls is %d. Too large\n", |
1504 | frame + 1, depth); | 1515 | frame + 1, depth); |
1505 | return -EACCES; | 1516 | return -EACCES; |
1506 | } | 1517 | } |
1507 | continue_func: | 1518 | continue_func: |
1508 | if (env->subprog_cnt == subprog) | 1519 | subprog_end = subprog[idx + 1].start; |
1509 | subprog_end = insn_cnt; | ||
1510 | else | ||
1511 | subprog_end = env->subprog_starts[subprog]; | ||
1512 | for (; i < subprog_end; i++) { | 1520 | for (; i < subprog_end; i++) { |
1513 | if (insn[i].code != (BPF_JMP | BPF_CALL)) | 1521 | if (insn[i].code != (BPF_JMP | BPF_CALL)) |
1514 | continue; | 1522 | continue; |
@@ -1516,17 +1524,16 @@ continue_func: | |||
1516 | continue; | 1524 | continue; |
1517 | /* remember insn and function to return to */ | 1525 | /* remember insn and function to return to */ |
1518 | ret_insn[frame] = i + 1; | 1526 | ret_insn[frame] = i + 1; |
1519 | ret_prog[frame] = subprog; | 1527 | ret_prog[frame] = idx; |
1520 | 1528 | ||
1521 | /* find the callee */ | 1529 | /* find the callee */ |
1522 | i = i + insn[i].imm + 1; | 1530 | i = i + insn[i].imm + 1; |
1523 | subprog = find_subprog(env, i); | 1531 | idx = find_subprog(env, i); |
1524 | if (subprog < 0) { | 1532 | if (idx < 0) { |
1525 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", | 1533 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", |
1526 | i); | 1534 | i); |
1527 | return -EFAULT; | 1535 | return -EFAULT; |
1528 | } | 1536 | } |
1529 | subprog++; | ||
1530 | frame++; | 1537 | frame++; |
1531 | if (frame >= MAX_CALL_FRAMES) { | 1538 | if (frame >= MAX_CALL_FRAMES) { |
1532 | WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); | 1539 | WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); |
@@ -1539,10 +1546,10 @@ continue_func: | |||
1539 | */ | 1546 | */ |
1540 | if (frame == 0) | 1547 | if (frame == 0) |
1541 | return 0; | 1548 | return 0; |
1542 | depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); | 1549 | depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); |
1543 | frame--; | 1550 | frame--; |
1544 | i = ret_insn[frame]; | 1551 | i = ret_insn[frame]; |
1545 | subprog = ret_prog[frame]; | 1552 | idx = ret_prog[frame]; |
1546 | goto continue_func; | 1553 | goto continue_func; |
1547 | } | 1554 | } |
1548 | 1555 | ||
@@ -1558,8 +1565,7 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env, | |||
1558 | start); | 1565 | start); |
1559 | return -EFAULT; | 1566 | return -EFAULT; |
1560 | } | 1567 | } |
1561 | subprog++; | 1568 | return env->subprog_info[subprog].stack_depth; |
1562 | return env->subprog_stack_depth[subprog]; | ||
1563 | } | 1569 | } |
1564 | #endif | 1570 | #endif |
1565 | 1571 | ||
@@ -1984,6 +1990,12 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, | |||
1984 | } else if (arg_type_is_mem_size(arg_type)) { | 1990 | } else if (arg_type_is_mem_size(arg_type)) { |
1985 | bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); | 1991 | bool zero_size_allowed = (arg_type == ARG_CONST_SIZE_OR_ZERO); |
1986 | 1992 | ||
1993 | /* remember the mem_size which may be used later | ||
1994 | * to refine return values. | ||
1995 | */ | ||
1996 | meta->msize_smax_value = reg->smax_value; | ||
1997 | meta->msize_umax_value = reg->umax_value; | ||
1998 | |||
1987 | /* The register is SCALAR_VALUE; the access check | 1999 | /* The register is SCALAR_VALUE; the access check |
1988 | * happens using its boundaries. | 2000 | * happens using its boundaries. |
1989 | */ | 2001 | */ |
@@ -2061,8 +2073,11 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, | |||
2061 | if (func_id != BPF_FUNC_redirect_map) | 2073 | if (func_id != BPF_FUNC_redirect_map) |
2062 | goto error; | 2074 | goto error; |
2063 | break; | 2075 | break; |
2064 | /* Restrict bpf side of cpumap, open when use-cases appear */ | 2076 | /* Restrict bpf side of cpumap and xskmap, open when use-cases |
2077 | * appear. | ||
2078 | */ | ||
2065 | case BPF_MAP_TYPE_CPUMAP: | 2079 | case BPF_MAP_TYPE_CPUMAP: |
2080 | case BPF_MAP_TYPE_XSKMAP: | ||
2066 | if (func_id != BPF_FUNC_redirect_map) | 2081 | if (func_id != BPF_FUNC_redirect_map) |
2067 | goto error; | 2082 | goto error; |
2068 | break; | 2083 | break; |
@@ -2087,7 +2102,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, | |||
2087 | case BPF_FUNC_tail_call: | 2102 | case BPF_FUNC_tail_call: |
2088 | if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) | 2103 | if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) |
2089 | goto error; | 2104 | goto error; |
2090 | if (env->subprog_cnt) { | 2105 | if (env->subprog_cnt > 1) { |
2091 | verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); | 2106 | verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); |
2092 | return -EINVAL; | 2107 | return -EINVAL; |
2093 | } | 2108 | } |
@@ -2109,7 +2124,8 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env, | |||
2109 | break; | 2124 | break; |
2110 | case BPF_FUNC_redirect_map: | 2125 | case BPF_FUNC_redirect_map: |
2111 | if (map->map_type != BPF_MAP_TYPE_DEVMAP && | 2126 | if (map->map_type != BPF_MAP_TYPE_DEVMAP && |
2112 | map->map_type != BPF_MAP_TYPE_CPUMAP) | 2127 | map->map_type != BPF_MAP_TYPE_CPUMAP && |
2128 | map->map_type != BPF_MAP_TYPE_XSKMAP) | ||
2113 | goto error; | 2129 | goto error; |
2114 | break; | 2130 | break; |
2115 | case BPF_FUNC_sk_redirect_map: | 2131 | case BPF_FUNC_sk_redirect_map: |
@@ -2259,7 +2275,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, | |||
2259 | /* remember the callsite, it will be used by bpf_exit */ | 2275 | /* remember the callsite, it will be used by bpf_exit */ |
2260 | *insn_idx /* callsite */, | 2276 | *insn_idx /* callsite */, |
2261 | state->curframe + 1 /* frameno within this callchain */, | 2277 | state->curframe + 1 /* frameno within this callchain */, |
2262 | subprog + 1 /* subprog number within this prog */); | 2278 | subprog /* subprog number within this prog */); |
2263 | 2279 | ||
2264 | /* copy r1 - r5 args that callee can access */ | 2280 | /* copy r1 - r5 args that callee can access */ |
2265 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) | 2281 | for (i = BPF_REG_1; i <= BPF_REG_5; i++) |
@@ -2323,6 +2339,23 @@ static int prepare_func_exit(struct bpf_verifier_env *env, int *insn_idx) | |||
2323 | return 0; | 2339 | return 0; |
2324 | } | 2340 | } |
2325 | 2341 | ||
2342 | static void do_refine_retval_range(struct bpf_reg_state *regs, int ret_type, | ||
2343 | int func_id, | ||
2344 | struct bpf_call_arg_meta *meta) | ||
2345 | { | ||
2346 | struct bpf_reg_state *ret_reg = ®s[BPF_REG_0]; | ||
2347 | |||
2348 | if (ret_type != RET_INTEGER || | ||
2349 | (func_id != BPF_FUNC_get_stack && | ||
2350 | func_id != BPF_FUNC_probe_read_str)) | ||
2351 | return; | ||
2352 | |||
2353 | ret_reg->smax_value = meta->msize_smax_value; | ||
2354 | ret_reg->umax_value = meta->msize_umax_value; | ||
2355 | __reg_deduce_bounds(ret_reg); | ||
2356 | __reg_bound_offset(ret_reg); | ||
2357 | } | ||
2358 | |||
2326 | static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) | 2359 | static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn_idx) |
2327 | { | 2360 | { |
2328 | const struct bpf_func_proto *fn = NULL; | 2361 | const struct bpf_func_proto *fn = NULL; |
@@ -2446,10 +2479,30 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
2446 | return -EINVAL; | 2479 | return -EINVAL; |
2447 | } | 2480 | } |
2448 | 2481 | ||
2482 | do_refine_retval_range(regs, fn->ret_type, func_id, &meta); | ||
2483 | |||
2449 | err = check_map_func_compatibility(env, meta.map_ptr, func_id); | 2484 | err = check_map_func_compatibility(env, meta.map_ptr, func_id); |
2450 | if (err) | 2485 | if (err) |
2451 | return err; | 2486 | return err; |
2452 | 2487 | ||
2488 | if (func_id == BPF_FUNC_get_stack && !env->prog->has_callchain_buf) { | ||
2489 | const char *err_str; | ||
2490 | |||
2491 | #ifdef CONFIG_PERF_EVENTS | ||
2492 | err = get_callchain_buffers(sysctl_perf_event_max_stack); | ||
2493 | err_str = "cannot get callchain buffer for func %s#%d\n"; | ||
2494 | #else | ||
2495 | err = -ENOTSUPP; | ||
2496 | err_str = "func %s#%d not supported without CONFIG_PERF_EVENTS\n"; | ||
2497 | #endif | ||
2498 | if (err) { | ||
2499 | verbose(env, err_str, func_id_name(func_id), func_id); | ||
2500 | return err; | ||
2501 | } | ||
2502 | |||
2503 | env->prog->has_callchain_buf = true; | ||
2504 | } | ||
2505 | |||
2453 | if (changes_data) | 2506 | if (changes_data) |
2454 | clear_all_pkt_pointers(env); | 2507 | clear_all_pkt_pointers(env); |
2455 | return 0; | 2508 | return 0; |
@@ -2894,10 +2947,7 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
2894 | dst_reg->umin_value <<= umin_val; | 2947 | dst_reg->umin_value <<= umin_val; |
2895 | dst_reg->umax_value <<= umax_val; | 2948 | dst_reg->umax_value <<= umax_val; |
2896 | } | 2949 | } |
2897 | if (src_known) | 2950 | dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); |
2898 | dst_reg->var_off = tnum_lshift(dst_reg->var_off, umin_val); | ||
2899 | else | ||
2900 | dst_reg->var_off = tnum_lshift(tnum_unknown, umin_val); | ||
2901 | /* We may learn something more from the var_off */ | 2951 | /* We may learn something more from the var_off */ |
2902 | __update_reg_bounds(dst_reg); | 2952 | __update_reg_bounds(dst_reg); |
2903 | break; | 2953 | break; |
@@ -2925,16 +2975,35 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env, | |||
2925 | */ | 2975 | */ |
2926 | dst_reg->smin_value = S64_MIN; | 2976 | dst_reg->smin_value = S64_MIN; |
2927 | dst_reg->smax_value = S64_MAX; | 2977 | dst_reg->smax_value = S64_MAX; |
2928 | if (src_known) | 2978 | dst_reg->var_off = tnum_rshift(dst_reg->var_off, umin_val); |
2929 | dst_reg->var_off = tnum_rshift(dst_reg->var_off, | ||
2930 | umin_val); | ||
2931 | else | ||
2932 | dst_reg->var_off = tnum_rshift(tnum_unknown, umin_val); | ||
2933 | dst_reg->umin_value >>= umax_val; | 2979 | dst_reg->umin_value >>= umax_val; |
2934 | dst_reg->umax_value >>= umin_val; | 2980 | dst_reg->umax_value >>= umin_val; |
2935 | /* We may learn something more from the var_off */ | 2981 | /* We may learn something more from the var_off */ |
2936 | __update_reg_bounds(dst_reg); | 2982 | __update_reg_bounds(dst_reg); |
2937 | break; | 2983 | break; |
2984 | case BPF_ARSH: | ||
2985 | if (umax_val >= insn_bitness) { | ||
2986 | /* Shifts greater than 31 or 63 are undefined. | ||
2987 | * This includes shifts by a negative number. | ||
2988 | */ | ||
2989 | mark_reg_unknown(env, regs, insn->dst_reg); | ||
2990 | break; | ||
2991 | } | ||
2992 | |||
2993 | /* Upon reaching here, src_known is true and | ||
2994 | * umax_val is equal to umin_val. | ||
2995 | */ | ||
2996 | dst_reg->smin_value >>= umin_val; | ||
2997 | dst_reg->smax_value >>= umin_val; | ||
2998 | dst_reg->var_off = tnum_arshift(dst_reg->var_off, umin_val); | ||
2999 | |||
3000 | /* blow away the dst_reg umin_value/umax_value and rely on | ||
3001 | * dst_reg var_off to refine the result. | ||
3002 | */ | ||
3003 | dst_reg->umin_value = 0; | ||
3004 | dst_reg->umax_value = U64_MAX; | ||
3005 | __update_reg_bounds(dst_reg); | ||
3006 | break; | ||
2938 | default: | 3007 | default: |
2939 | mark_reg_unknown(env, regs, insn->dst_reg); | 3008 | mark_reg_unknown(env, regs, insn->dst_reg); |
2940 | break; | 3009 | break; |
@@ -3818,7 +3887,12 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn) | |||
3818 | return -EINVAL; | 3887 | return -EINVAL; |
3819 | } | 3888 | } |
3820 | 3889 | ||
3821 | if (env->subprog_cnt) { | 3890 | if (!env->ops->gen_ld_abs) { |
3891 | verbose(env, "bpf verifier is misconfigured\n"); | ||
3892 | return -EINVAL; | ||
3893 | } | ||
3894 | |||
3895 | if (env->subprog_cnt > 1) { | ||
3822 | /* when program has LD_ABS insn JITs and interpreter assume | 3896 | /* when program has LD_ABS insn JITs and interpreter assume |
3823 | * that r1 == ctx == skb which is not the case for callees | 3897 | * that r1 == ctx == skb which is not the case for callees |
3824 | * that can have arbitrary arguments. It's problematic | 3898 | * that can have arbitrary arguments. It's problematic |
@@ -4849,15 +4923,15 @@ process_bpf_exit: | |||
4849 | 4923 | ||
4850 | verbose(env, "processed %d insns (limit %d), stack depth ", | 4924 | verbose(env, "processed %d insns (limit %d), stack depth ", |
4851 | insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); | 4925 | insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); |
4852 | for (i = 0; i < env->subprog_cnt + 1; i++) { | 4926 | for (i = 0; i < env->subprog_cnt; i++) { |
4853 | u32 depth = env->subprog_stack_depth[i]; | 4927 | u32 depth = env->subprog_info[i].stack_depth; |
4854 | 4928 | ||
4855 | verbose(env, "%d", depth); | 4929 | verbose(env, "%d", depth); |
4856 | if (i + 1 < env->subprog_cnt + 1) | 4930 | if (i + 1 < env->subprog_cnt) |
4857 | verbose(env, "+"); | 4931 | verbose(env, "+"); |
4858 | } | 4932 | } |
4859 | verbose(env, "\n"); | 4933 | verbose(env, "\n"); |
4860 | env->prog->aux->stack_depth = env->subprog_stack_depth[0]; | 4934 | env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; |
4861 | return 0; | 4935 | return 0; |
4862 | } | 4936 | } |
4863 | 4937 | ||
@@ -4981,7 +5055,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env) | |||
4981 | /* hold the map. If the program is rejected by verifier, | 5055 | /* hold the map. If the program is rejected by verifier, |
4982 | * the map will be released by release_maps() or it | 5056 | * the map will be released by release_maps() or it |
4983 | * will be used by the valid program until it's unloaded | 5057 | * will be used by the valid program until it's unloaded |
4984 | * and all maps are released in free_bpf_prog_info() | 5058 | * and all maps are released in free_used_maps() |
4985 | */ | 5059 | */ |
4986 | map = bpf_map_inc(map, false); | 5060 | map = bpf_map_inc(map, false); |
4987 | if (IS_ERR(map)) { | 5061 | if (IS_ERR(map)) { |
@@ -5063,10 +5137,11 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len | |||
5063 | 5137 | ||
5064 | if (len == 1) | 5138 | if (len == 1) |
5065 | return; | 5139 | return; |
5066 | for (i = 0; i < env->subprog_cnt; i++) { | 5140 | /* NOTE: fake 'exit' subprog should be updated as well. */ |
5067 | if (env->subprog_starts[i] < off) | 5141 | for (i = 0; i <= env->subprog_cnt; i++) { |
5142 | if (env->subprog_info[i].start < off) | ||
5068 | continue; | 5143 | continue; |
5069 | env->subprog_starts[i] += len - 1; | 5144 | env->subprog_info[i].start += len - 1; |
5070 | } | 5145 | } |
5071 | } | 5146 | } |
5072 | 5147 | ||
@@ -5230,7 +5305,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5230 | void *old_bpf_func; | 5305 | void *old_bpf_func; |
5231 | int err = -ENOMEM; | 5306 | int err = -ENOMEM; |
5232 | 5307 | ||
5233 | if (env->subprog_cnt == 0) | 5308 | if (env->subprog_cnt <= 1) |
5234 | return 0; | 5309 | return 0; |
5235 | 5310 | ||
5236 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { | 5311 | for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { |
@@ -5246,7 +5321,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5246 | /* temporarily remember subprog id inside insn instead of | 5321 | /* temporarily remember subprog id inside insn instead of |
5247 | * aux_data, since next loop will split up all insns into funcs | 5322 | * aux_data, since next loop will split up all insns into funcs |
5248 | */ | 5323 | */ |
5249 | insn->off = subprog + 1; | 5324 | insn->off = subprog; |
5250 | /* remember original imm in case JIT fails and fallback | 5325 | /* remember original imm in case JIT fails and fallback |
5251 | * to interpreter will be needed | 5326 | * to interpreter will be needed |
5252 | */ | 5327 | */ |
@@ -5255,16 +5330,13 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5255 | insn->imm = 1; | 5330 | insn->imm = 1; |
5256 | } | 5331 | } |
5257 | 5332 | ||
5258 | func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL); | 5333 | func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL); |
5259 | if (!func) | 5334 | if (!func) |
5260 | return -ENOMEM; | 5335 | return -ENOMEM; |
5261 | 5336 | ||
5262 | for (i = 0; i <= env->subprog_cnt; i++) { | 5337 | for (i = 0; i < env->subprog_cnt; i++) { |
5263 | subprog_start = subprog_end; | 5338 | subprog_start = subprog_end; |
5264 | if (env->subprog_cnt == i) | 5339 | subprog_end = env->subprog_info[i + 1].start; |
5265 | subprog_end = prog->len; | ||
5266 | else | ||
5267 | subprog_end = env->subprog_starts[i]; | ||
5268 | 5340 | ||
5269 | len = subprog_end - subprog_start; | 5341 | len = subprog_end - subprog_start; |
5270 | func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); | 5342 | func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); |
@@ -5281,7 +5353,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5281 | * Long term would need debug info to populate names | 5353 | * Long term would need debug info to populate names |
5282 | */ | 5354 | */ |
5283 | func[i]->aux->name[0] = 'F'; | 5355 | func[i]->aux->name[0] = 'F'; |
5284 | func[i]->aux->stack_depth = env->subprog_stack_depth[i]; | 5356 | func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; |
5285 | func[i]->jit_requested = 1; | 5357 | func[i]->jit_requested = 1; |
5286 | func[i] = bpf_int_jit_compile(func[i]); | 5358 | func[i] = bpf_int_jit_compile(func[i]); |
5287 | if (!func[i]->jited) { | 5359 | if (!func[i]->jited) { |
@@ -5294,7 +5366,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5294 | * now populate all bpf_calls with correct addresses and | 5366 | * now populate all bpf_calls with correct addresses and |
5295 | * run last pass of JIT | 5367 | * run last pass of JIT |
5296 | */ | 5368 | */ |
5297 | for (i = 0; i <= env->subprog_cnt; i++) { | 5369 | for (i = 0; i < env->subprog_cnt; i++) { |
5298 | insn = func[i]->insnsi; | 5370 | insn = func[i]->insnsi; |
5299 | for (j = 0; j < func[i]->len; j++, insn++) { | 5371 | for (j = 0; j < func[i]->len; j++, insn++) { |
5300 | if (insn->code != (BPF_JMP | BPF_CALL) || | 5372 | if (insn->code != (BPF_JMP | BPF_CALL) || |
@@ -5307,7 +5379,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5307 | __bpf_call_base; | 5379 | __bpf_call_base; |
5308 | } | 5380 | } |
5309 | } | 5381 | } |
5310 | for (i = 0; i <= env->subprog_cnt; i++) { | 5382 | for (i = 0; i < env->subprog_cnt; i++) { |
5311 | old_bpf_func = func[i]->bpf_func; | 5383 | old_bpf_func = func[i]->bpf_func; |
5312 | tmp = bpf_int_jit_compile(func[i]); | 5384 | tmp = bpf_int_jit_compile(func[i]); |
5313 | if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { | 5385 | if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { |
@@ -5321,7 +5393,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5321 | /* finally lock prog and jit images for all functions and | 5393 | /* finally lock prog and jit images for all functions and |
5322 | * populate kallsysm | 5394 | * populate kallsysm |
5323 | */ | 5395 | */ |
5324 | for (i = 0; i <= env->subprog_cnt; i++) { | 5396 | for (i = 0; i < env->subprog_cnt; i++) { |
5325 | bpf_prog_lock_ro(func[i]); | 5397 | bpf_prog_lock_ro(func[i]); |
5326 | bpf_prog_kallsyms_add(func[i]); | 5398 | bpf_prog_kallsyms_add(func[i]); |
5327 | } | 5399 | } |
@@ -5338,7 +5410,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5338 | continue; | 5410 | continue; |
5339 | insn->off = env->insn_aux_data[i].call_imm; | 5411 | insn->off = env->insn_aux_data[i].call_imm; |
5340 | subprog = find_subprog(env, i + insn->off + 1); | 5412 | subprog = find_subprog(env, i + insn->off + 1); |
5341 | addr = (unsigned long)func[subprog + 1]->bpf_func; | 5413 | addr = (unsigned long)func[subprog]->bpf_func; |
5342 | addr &= PAGE_MASK; | 5414 | addr &= PAGE_MASK; |
5343 | insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) | 5415 | insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) |
5344 | addr - __bpf_call_base; | 5416 | addr - __bpf_call_base; |
@@ -5347,10 +5419,10 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
5347 | prog->jited = 1; | 5419 | prog->jited = 1; |
5348 | prog->bpf_func = func[0]->bpf_func; | 5420 | prog->bpf_func = func[0]->bpf_func; |
5349 | prog->aux->func = func; | 5421 | prog->aux->func = func; |
5350 | prog->aux->func_cnt = env->subprog_cnt + 1; | 5422 | prog->aux->func_cnt = env->subprog_cnt; |
5351 | return 0; | 5423 | return 0; |
5352 | out_free: | 5424 | out_free: |
5353 | for (i = 0; i <= env->subprog_cnt; i++) | 5425 | for (i = 0; i < env->subprog_cnt; i++) |
5354 | if (func[i]) | 5426 | if (func[i]) |
5355 | bpf_jit_free(func[i]); | 5427 | bpf_jit_free(func[i]); |
5356 | kfree(func); | 5428 | kfree(func); |
@@ -5453,6 +5525,25 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env) | |||
5453 | continue; | 5525 | continue; |
5454 | } | 5526 | } |
5455 | 5527 | ||
5528 | if (BPF_CLASS(insn->code) == BPF_LD && | ||
5529 | (BPF_MODE(insn->code) == BPF_ABS || | ||
5530 | BPF_MODE(insn->code) == BPF_IND)) { | ||
5531 | cnt = env->ops->gen_ld_abs(insn, insn_buf); | ||
5532 | if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) { | ||
5533 | verbose(env, "bpf verifier is misconfigured\n"); | ||
5534 | return -EINVAL; | ||
5535 | } | ||
5536 | |||
5537 | new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt); | ||
5538 | if (!new_prog) | ||
5539 | return -ENOMEM; | ||
5540 | |||
5541 | delta += cnt - 1; | ||
5542 | env->prog = prog = new_prog; | ||
5543 | insn = new_prog->insnsi + i + delta; | ||
5544 | continue; | ||
5545 | } | ||
5546 | |||
5456 | if (insn->code != (BPF_JMP | BPF_CALL)) | 5547 | if (insn->code != (BPF_JMP | BPF_CALL)) |
5457 | continue; | 5548 | continue; |
5458 | if (insn->src_reg == BPF_PSEUDO_CALL) | 5549 | if (insn->src_reg == BPF_PSEUDO_CALL) |
@@ -5650,16 +5741,16 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr) | |||
5650 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) | 5741 | if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) |
5651 | env->strict_alignment = true; | 5742 | env->strict_alignment = true; |
5652 | 5743 | ||
5744 | ret = replace_map_fd_with_map_ptr(env); | ||
5745 | if (ret < 0) | ||
5746 | goto skip_full_check; | ||
5747 | |||
5653 | if (bpf_prog_is_dev_bound(env->prog->aux)) { | 5748 | if (bpf_prog_is_dev_bound(env->prog->aux)) { |
5654 | ret = bpf_prog_offload_verifier_prep(env); | 5749 | ret = bpf_prog_offload_verifier_prep(env); |
5655 | if (ret) | 5750 | if (ret) |
5656 | goto err_unlock; | 5751 | goto skip_full_check; |
5657 | } | 5752 | } |
5658 | 5753 | ||
5659 | ret = replace_map_fd_with_map_ptr(env); | ||
5660 | if (ret < 0) | ||
5661 | goto skip_full_check; | ||
5662 | |||
5663 | env->explored_states = kcalloc(env->prog->len, | 5754 | env->explored_states = kcalloc(env->prog->len, |
5664 | sizeof(struct bpf_verifier_state_list *), | 5755 | sizeof(struct bpf_verifier_state_list *), |
5665 | GFP_USER); | 5756 | GFP_USER); |
@@ -5730,7 +5821,7 @@ skip_full_check: | |||
5730 | err_release_maps: | 5821 | err_release_maps: |
5731 | if (!env->prog->aux->used_maps) | 5822 | if (!env->prog->aux->used_maps) |
5732 | /* if we didn't copy map pointers into bpf_prog_info, release | 5823 | /* if we didn't copy map pointers into bpf_prog_info, release |
5733 | * them now. Otherwise free_bpf_prog_info() will release them. | 5824 | * them now. Otherwise free_used_maps() will release them. |
5734 | */ | 5825 | */ |
5735 | release_maps(env); | 5826 | release_maps(env); |
5736 | *prog = env->prog; | 5827 | *prog = env->prog; |