diff options
| -rw-r--r-- | include/linux/bpf_verifier.h | 9 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 62 |
2 files changed, 38 insertions, 33 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index f655b926e432..8f70dc181e23 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -173,6 +173,11 @@ static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) | |||
| 173 | 173 | ||
| 174 | #define BPF_MAX_SUBPROGS 256 | 174 | #define BPF_MAX_SUBPROGS 256 |
| 175 | 175 | ||
| 176 | struct bpf_subprog_info { | ||
| 177 | u32 start; /* insn idx of function entry point */ | ||
| 178 | u16 stack_depth; /* max. stack depth used by this function */ | ||
| 179 | }; | ||
| 180 | |||
| 176 | /* single container for all structs | 181 | /* single container for all structs |
| 177 | * one verifier_env per bpf_check() call | 182 | * one verifier_env per bpf_check() call |
| 178 | */ | 183 | */ |
| @@ -191,9 +196,7 @@ struct bpf_verifier_env { | |||
| 191 | bool seen_direct_write; | 196 | bool seen_direct_write; |
| 192 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | 197 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
| 193 | struct bpf_verifier_log log; | 198 | struct bpf_verifier_log log; |
| 194 | u32 subprog_starts[BPF_MAX_SUBPROGS + 1]; | 199 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; |
| 195 | /* computes the stack depth of each bpf function */ | ||
| 196 | u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; | ||
| 197 | u32 subprog_cnt; | 200 | u32 subprog_cnt; |
| 198 | }; | 201 | }; |
| 199 | 202 | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 8e8e582a7c03..5b293b4abb70 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -741,18 +741,19 @@ enum reg_arg_type { | |||
| 741 | 741 | ||
| 742 | static int cmp_subprogs(const void *a, const void *b) | 742 | static int cmp_subprogs(const void *a, const void *b) |
| 743 | { | 743 | { |
| 744 | return *(int *)a - *(int *)b; | 744 | return ((struct bpf_subprog_info *)a)->start - |
| 745 | ((struct bpf_subprog_info *)b)->start; | ||
| 745 | } | 746 | } |
| 746 | 747 | ||
| 747 | static int find_subprog(struct bpf_verifier_env *env, int off) | 748 | static int find_subprog(struct bpf_verifier_env *env, int off) |
| 748 | { | 749 | { |
| 749 | u32 *p; | 750 | struct bpf_subprog_info *p; |
| 750 | 751 | ||
| 751 | p = bsearch(&off, env->subprog_starts, env->subprog_cnt, | 752 | p = bsearch(&off, env->subprog_info, env->subprog_cnt, |
| 752 | sizeof(env->subprog_starts[0]), cmp_subprogs); | 753 | sizeof(env->subprog_info[0]), cmp_subprogs); |
| 753 | if (!p) | 754 | if (!p) |
| 754 | return -ENOENT; | 755 | return -ENOENT; |
| 755 | return p - env->subprog_starts; | 756 | return p - env->subprog_info; |
| 756 | 757 | ||
| 757 | } | 758 | } |
| 758 | 759 | ||
| @@ -772,15 +773,16 @@ static int add_subprog(struct bpf_verifier_env *env, int off) | |||
| 772 | verbose(env, "too many subprograms\n"); | 773 | verbose(env, "too many subprograms\n"); |
| 773 | return -E2BIG; | 774 | return -E2BIG; |
| 774 | } | 775 | } |
| 775 | env->subprog_starts[env->subprog_cnt++] = off; | 776 | env->subprog_info[env->subprog_cnt++].start = off; |
| 776 | sort(env->subprog_starts, env->subprog_cnt, | 777 | sort(env->subprog_info, env->subprog_cnt, |
| 777 | sizeof(env->subprog_starts[0]), cmp_subprogs, NULL); | 778 | sizeof(env->subprog_info[0]), cmp_subprogs, NULL); |
| 778 | return 0; | 779 | return 0; |
| 779 | } | 780 | } |
| 780 | 781 | ||
| 781 | static int check_subprogs(struct bpf_verifier_env *env) | 782 | static int check_subprogs(struct bpf_verifier_env *env) |
| 782 | { | 783 | { |
| 783 | int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; | 784 | int i, ret, subprog_start, subprog_end, off, cur_subprog = 0; |
| 785 | struct bpf_subprog_info *subprog = env->subprog_info; | ||
| 784 | struct bpf_insn *insn = env->prog->insnsi; | 786 | struct bpf_insn *insn = env->prog->insnsi; |
| 785 | int insn_cnt = env->prog->len; | 787 | int insn_cnt = env->prog->len; |
| 786 | 788 | ||
| @@ -810,14 +812,14 @@ static int check_subprogs(struct bpf_verifier_env *env) | |||
| 810 | 812 | ||
| 811 | if (env->log.level > 1) | 813 | if (env->log.level > 1) |
| 812 | for (i = 0; i < env->subprog_cnt; i++) | 814 | for (i = 0; i < env->subprog_cnt; i++) |
| 813 | verbose(env, "func#%d @%d\n", i, env->subprog_starts[i]); | 815 | verbose(env, "func#%d @%d\n", i, subprog[i].start); |
| 814 | 816 | ||
| 815 | /* now check that all jumps are within the same subprog */ | 817 | /* now check that all jumps are within the same subprog */ |
| 816 | subprog_start = 0; | 818 | subprog_start = 0; |
| 817 | if (env->subprog_cnt == cur_subprog + 1) | 819 | if (env->subprog_cnt == cur_subprog + 1) |
| 818 | subprog_end = insn_cnt; | 820 | subprog_end = insn_cnt; |
| 819 | else | 821 | else |
| 820 | subprog_end = env->subprog_starts[cur_subprog + 1]; | 822 | subprog_end = subprog[cur_subprog + 1].start; |
| 821 | for (i = 0; i < insn_cnt; i++) { | 823 | for (i = 0; i < insn_cnt; i++) { |
| 822 | u8 code = insn[i].code; | 824 | u8 code = insn[i].code; |
| 823 | 825 | ||
| @@ -846,8 +848,7 @@ next: | |||
| 846 | if (env->subprog_cnt == cur_subprog + 1) | 848 | if (env->subprog_cnt == cur_subprog + 1) |
| 847 | subprog_end = insn_cnt; | 849 | subprog_end = insn_cnt; |
| 848 | else | 850 | else |
| 849 | subprog_end = | 851 | subprog_end = subprog[cur_subprog + 1].start; |
| 850 | env->subprog_starts[cur_subprog + 1]; | ||
| 851 | } | 852 | } |
| 852 | } | 853 | } |
| 853 | return 0; | 854 | return 0; |
| @@ -1480,13 +1481,13 @@ static int update_stack_depth(struct bpf_verifier_env *env, | |||
| 1480 | const struct bpf_func_state *func, | 1481 | const struct bpf_func_state *func, |
| 1481 | int off) | 1482 | int off) |
| 1482 | { | 1483 | { |
| 1483 | u16 stack = env->subprog_stack_depth[func->subprogno]; | 1484 | u16 stack = env->subprog_info[func->subprogno].stack_depth; |
| 1484 | 1485 | ||
| 1485 | if (stack >= -off) | 1486 | if (stack >= -off) |
| 1486 | return 0; | 1487 | return 0; |
| 1487 | 1488 | ||
| 1488 | /* update known max for given subprogram */ | 1489 | /* update known max for given subprogram */ |
| 1489 | env->subprog_stack_depth[func->subprogno] = -off; | 1490 | env->subprog_info[func->subprogno].stack_depth = -off; |
| 1490 | return 0; | 1491 | return 0; |
| 1491 | } | 1492 | } |
| 1492 | 1493 | ||
| @@ -1498,7 +1499,8 @@ static int update_stack_depth(struct bpf_verifier_env *env, | |||
| 1498 | */ | 1499 | */ |
| 1499 | static int check_max_stack_depth(struct bpf_verifier_env *env) | 1500 | static int check_max_stack_depth(struct bpf_verifier_env *env) |
| 1500 | { | 1501 | { |
| 1501 | int depth = 0, frame = 0, subprog = 0, i = 0, subprog_end; | 1502 | int depth = 0, frame = 0, idx = 0, i = 0, subprog_end; |
| 1503 | struct bpf_subprog_info *subprog = env->subprog_info; | ||
| 1502 | struct bpf_insn *insn = env->prog->insnsi; | 1504 | struct bpf_insn *insn = env->prog->insnsi; |
| 1503 | int insn_cnt = env->prog->len; | 1505 | int insn_cnt = env->prog->len; |
| 1504 | int ret_insn[MAX_CALL_FRAMES]; | 1506 | int ret_insn[MAX_CALL_FRAMES]; |
| @@ -1508,17 +1510,17 @@ process_func: | |||
| 1508 | /* round up to 32-bytes, since this is granularity | 1510 | /* round up to 32-bytes, since this is granularity |
| 1509 | * of interpreter stack size | 1511 | * of interpreter stack size |
| 1510 | */ | 1512 | */ |
| 1511 | depth += round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); | 1513 | depth += round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); |
| 1512 | if (depth > MAX_BPF_STACK) { | 1514 | if (depth > MAX_BPF_STACK) { |
| 1513 | verbose(env, "combined stack size of %d calls is %d. Too large\n", | 1515 | verbose(env, "combined stack size of %d calls is %d. Too large\n", |
| 1514 | frame + 1, depth); | 1516 | frame + 1, depth); |
| 1515 | return -EACCES; | 1517 | return -EACCES; |
| 1516 | } | 1518 | } |
| 1517 | continue_func: | 1519 | continue_func: |
| 1518 | if (env->subprog_cnt == subprog + 1) | 1520 | if (env->subprog_cnt == idx + 1) |
| 1519 | subprog_end = insn_cnt; | 1521 | subprog_end = insn_cnt; |
| 1520 | else | 1522 | else |
| 1521 | subprog_end = env->subprog_starts[subprog + 1]; | 1523 | subprog_end = subprog[idx + 1].start; |
| 1522 | for (; i < subprog_end; i++) { | 1524 | for (; i < subprog_end; i++) { |
| 1523 | if (insn[i].code != (BPF_JMP | BPF_CALL)) | 1525 | if (insn[i].code != (BPF_JMP | BPF_CALL)) |
| 1524 | continue; | 1526 | continue; |
| @@ -1526,12 +1528,12 @@ continue_func: | |||
| 1526 | continue; | 1528 | continue; |
| 1527 | /* remember insn and function to return to */ | 1529 | /* remember insn and function to return to */ |
| 1528 | ret_insn[frame] = i + 1; | 1530 | ret_insn[frame] = i + 1; |
| 1529 | ret_prog[frame] = subprog; | 1531 | ret_prog[frame] = idx; |
| 1530 | 1532 | ||
| 1531 | /* find the callee */ | 1533 | /* find the callee */ |
| 1532 | i = i + insn[i].imm + 1; | 1534 | i = i + insn[i].imm + 1; |
| 1533 | subprog = find_subprog(env, i); | 1535 | idx = find_subprog(env, i); |
| 1534 | if (subprog < 0) { | 1536 | if (idx < 0) { |
| 1535 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", | 1537 | WARN_ONCE(1, "verifier bug. No program starts at insn %d\n", |
| 1536 | i); | 1538 | i); |
| 1537 | return -EFAULT; | 1539 | return -EFAULT; |
| @@ -1548,10 +1550,10 @@ continue_func: | |||
| 1548 | */ | 1550 | */ |
| 1549 | if (frame == 0) | 1551 | if (frame == 0) |
| 1550 | return 0; | 1552 | return 0; |
| 1551 | depth -= round_up(max_t(u32, env->subprog_stack_depth[subprog], 1), 32); | 1553 | depth -= round_up(max_t(u32, subprog[idx].stack_depth, 1), 32); |
| 1552 | frame--; | 1554 | frame--; |
| 1553 | i = ret_insn[frame]; | 1555 | i = ret_insn[frame]; |
| 1554 | subprog = ret_prog[frame]; | 1556 | idx = ret_prog[frame]; |
| 1555 | goto continue_func; | 1557 | goto continue_func; |
| 1556 | } | 1558 | } |
| 1557 | 1559 | ||
| @@ -1567,7 +1569,7 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env, | |||
| 1567 | start); | 1569 | start); |
| 1568 | return -EFAULT; | 1570 | return -EFAULT; |
| 1569 | } | 1571 | } |
| 1570 | return env->subprog_stack_depth[subprog]; | 1572 | return env->subprog_info[subprog].stack_depth; |
| 1571 | } | 1573 | } |
| 1572 | #endif | 1574 | #endif |
| 1573 | 1575 | ||
| @@ -4926,14 +4928,14 @@ process_bpf_exit: | |||
| 4926 | verbose(env, "processed %d insns (limit %d), stack depth ", | 4928 | verbose(env, "processed %d insns (limit %d), stack depth ", |
| 4927 | insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); | 4929 | insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); |
| 4928 | for (i = 0; i < env->subprog_cnt; i++) { | 4930 | for (i = 0; i < env->subprog_cnt; i++) { |
| 4929 | u32 depth = env->subprog_stack_depth[i]; | 4931 | u32 depth = env->subprog_info[i].stack_depth; |
| 4930 | 4932 | ||
| 4931 | verbose(env, "%d", depth); | 4933 | verbose(env, "%d", depth); |
| 4932 | if (i + 1 < env->subprog_cnt) | 4934 | if (i + 1 < env->subprog_cnt) |
| 4933 | verbose(env, "+"); | 4935 | verbose(env, "+"); |
| 4934 | } | 4936 | } |
| 4935 | verbose(env, "\n"); | 4937 | verbose(env, "\n"); |
| 4936 | env->prog->aux->stack_depth = env->subprog_stack_depth[0]; | 4938 | env->prog->aux->stack_depth = env->subprog_info[0].stack_depth; |
| 4937 | return 0; | 4939 | return 0; |
| 4938 | } | 4940 | } |
| 4939 | 4941 | ||
| @@ -5140,9 +5142,9 @@ static void adjust_subprog_starts(struct bpf_verifier_env *env, u32 off, u32 len | |||
| 5140 | if (len == 1) | 5142 | if (len == 1) |
| 5141 | return; | 5143 | return; |
| 5142 | for (i = 0; i < env->subprog_cnt; i++) { | 5144 | for (i = 0; i < env->subprog_cnt; i++) { |
| 5143 | if (env->subprog_starts[i] < off) | 5145 | if (env->subprog_info[i].start < off) |
| 5144 | continue; | 5146 | continue; |
| 5145 | env->subprog_starts[i] += len - 1; | 5147 | env->subprog_info[i].start += len - 1; |
| 5146 | } | 5148 | } |
| 5147 | } | 5149 | } |
| 5148 | 5150 | ||
| @@ -5340,7 +5342,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
| 5340 | if (env->subprog_cnt == i + 1) | 5342 | if (env->subprog_cnt == i + 1) |
| 5341 | subprog_end = prog->len; | 5343 | subprog_end = prog->len; |
| 5342 | else | 5344 | else |
| 5343 | subprog_end = env->subprog_starts[i + 1]; | 5345 | subprog_end = env->subprog_info[i + 1].start; |
| 5344 | 5346 | ||
| 5345 | len = subprog_end - subprog_start; | 5347 | len = subprog_end - subprog_start; |
| 5346 | func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); | 5348 | func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); |
| @@ -5357,7 +5359,7 @@ static int jit_subprogs(struct bpf_verifier_env *env) | |||
| 5357 | * Long term would need debug info to populate names | 5359 | * Long term would need debug info to populate names |
| 5358 | */ | 5360 | */ |
| 5359 | func[i]->aux->name[0] = 'F'; | 5361 | func[i]->aux->name[0] = 'F'; |
| 5360 | func[i]->aux->stack_depth = env->subprog_stack_depth[i]; | 5362 | func[i]->aux->stack_depth = env->subprog_info[i].stack_depth; |
| 5361 | func[i]->jit_requested = 1; | 5363 | func[i]->jit_requested = 1; |
| 5362 | func[i] = bpf_int_jit_compile(func[i]); | 5364 | func[i] = bpf_int_jit_compile(func[i]); |
| 5363 | if (!func[i]->jited) { | 5365 | if (!func[i]->jited) { |
