aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@netronome.com>2018-05-02 16:17:17 -0400
committerDaniel Borkmann <daniel@iogearbox.net>2018-05-04 05:58:35 -0400
commitf910cefa32b6cdabc96b126bcfc46d8940b1dc45 (patch)
tree9749a7702e13b451ed514d4d8711bb28af91c8ea /kernel
parent5234ccf2be0e51b2cd052fe5e2fcc978e67aebc7 (diff)
bpf: unify main prog and subprog
Currently, verifier treat main prog and subprog differently. All subprogs detected are kept in env->subprog_starts while main prog is not kept there. Instead, main prog is implicitly defined as the prog start at 0. There is actually no difference between main prog and subprog, it is better to unify them, and register all progs detected into env->subprog_starts. This could also help simplifying some code logic. Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/bpf/verifier.c57
1 files changed, 31 insertions, 26 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 6ba10a83909d..8e8e582a7c03 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -768,7 +768,7 @@ static int add_subprog(struct bpf_verifier_env *env, int off)
768 ret = find_subprog(env, off); 768 ret = find_subprog(env, off);
769 if (ret >= 0) 769 if (ret >= 0)
770 return 0; 770 return 0;
771 if (env->subprog_cnt >= BPF_MAX_SUBPROGS) { 771 if (env->subprog_cnt > BPF_MAX_SUBPROGS) {
772 verbose(env, "too many subprograms\n"); 772 verbose(env, "too many subprograms\n");
773 return -E2BIG; 773 return -E2BIG;
774 } 774 }
@@ -784,6 +784,11 @@ static int check_subprogs(struct bpf_verifier_env *env)
784 struct bpf_insn *insn = env->prog->insnsi; 784 struct bpf_insn *insn = env->prog->insnsi;
785 int insn_cnt = env->prog->len; 785 int insn_cnt = env->prog->len;
786 786
787 /* Add entry function. */
788 ret = add_subprog(env, 0);
789 if (ret < 0)
790 return ret;
791
787 /* determine subprog starts. The end is one before the next starts */ 792 /* determine subprog starts. The end is one before the next starts */
788 for (i = 0; i < insn_cnt; i++) { 793 for (i = 0; i < insn_cnt; i++) {
789 if (insn[i].code != (BPF_JMP | BPF_CALL)) 794 if (insn[i].code != (BPF_JMP | BPF_CALL))
@@ -809,10 +814,10 @@ static int check_subprogs(struct bpf_verifier_env *env)
809 814
810 /* now check that all jumps are within the same subprog */ 815 /* now check that all jumps are within the same subprog */
811 subprog_start = 0; 816 subprog_start = 0;
812 if (env->subprog_cnt == cur_subprog) 817 if (env->subprog_cnt == cur_subprog + 1)
813 subprog_end = insn_cnt; 818 subprog_end = insn_cnt;
814 else 819 else
815 subprog_end = env->subprog_starts[cur_subprog++]; 820 subprog_end = env->subprog_starts[cur_subprog + 1];
816 for (i = 0; i < insn_cnt; i++) { 821 for (i = 0; i < insn_cnt; i++) {
817 u8 code = insn[i].code; 822 u8 code = insn[i].code;
818 823
@@ -836,11 +841,13 @@ next:
836 verbose(env, "last insn is not an exit or jmp\n"); 841 verbose(env, "last insn is not an exit or jmp\n");
837 return -EINVAL; 842 return -EINVAL;
838 } 843 }
844 cur_subprog++;
839 subprog_start = subprog_end; 845 subprog_start = subprog_end;
840 if (env->subprog_cnt == cur_subprog) 846 if (env->subprog_cnt == cur_subprog + 1)
841 subprog_end = insn_cnt; 847 subprog_end = insn_cnt;
842 else 848 else
843 subprog_end = env->subprog_starts[cur_subprog++]; 849 subprog_end =
850 env->subprog_starts[cur_subprog + 1];
844 } 851 }
845 } 852 }
846 return 0; 853 return 0;
@@ -1508,10 +1515,10 @@ process_func:
1508 return -EACCES; 1515 return -EACCES;
1509 } 1516 }
1510continue_func: 1517continue_func:
1511 if (env->subprog_cnt == subprog) 1518 if (env->subprog_cnt == subprog + 1)
1512 subprog_end = insn_cnt; 1519 subprog_end = insn_cnt;
1513 else 1520 else
1514 subprog_end = env->subprog_starts[subprog]; 1521 subprog_end = env->subprog_starts[subprog + 1];
1515 for (; i < subprog_end; i++) { 1522 for (; i < subprog_end; i++) {
1516 if (insn[i].code != (BPF_JMP | BPF_CALL)) 1523 if (insn[i].code != (BPF_JMP | BPF_CALL))
1517 continue; 1524 continue;
@@ -1529,7 +1536,6 @@ continue_func:
1529 i); 1536 i);
1530 return -EFAULT; 1537 return -EFAULT;
1531 } 1538 }
1532 subprog++;
1533 frame++; 1539 frame++;
1534 if (frame >= MAX_CALL_FRAMES) { 1540 if (frame >= MAX_CALL_FRAMES) {
1535 WARN_ONCE(1, "verifier bug. Call stack is too deep\n"); 1541 WARN_ONCE(1, "verifier bug. Call stack is too deep\n");
@@ -1561,7 +1567,6 @@ static int get_callee_stack_depth(struct bpf_verifier_env *env,
1561 start); 1567 start);
1562 return -EFAULT; 1568 return -EFAULT;
1563 } 1569 }
1564 subprog++;
1565 return env->subprog_stack_depth[subprog]; 1570 return env->subprog_stack_depth[subprog];
1566} 1571}
1567#endif 1572#endif
@@ -2099,7 +2104,7 @@ static int check_map_func_compatibility(struct bpf_verifier_env *env,
2099 case BPF_FUNC_tail_call: 2104 case BPF_FUNC_tail_call:
2100 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY) 2105 if (map->map_type != BPF_MAP_TYPE_PROG_ARRAY)
2101 goto error; 2106 goto error;
2102 if (env->subprog_cnt) { 2107 if (env->subprog_cnt > 1) {
2103 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n"); 2108 verbose(env, "tail_calls are not allowed in programs with bpf-to-bpf calls\n");
2104 return -EINVAL; 2109 return -EINVAL;
2105 } 2110 }
@@ -2272,7 +2277,7 @@ static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn,
2272 /* remember the callsite, it will be used by bpf_exit */ 2277 /* remember the callsite, it will be used by bpf_exit */
2273 *insn_idx /* callsite */, 2278 *insn_idx /* callsite */,
2274 state->curframe + 1 /* frameno within this callchain */, 2279 state->curframe + 1 /* frameno within this callchain */,
2275 subprog + 1 /* subprog number within this prog */); 2280 subprog /* subprog number within this prog */);
2276 2281
2277 /* copy r1 - r5 args that callee can access */ 2282 /* copy r1 - r5 args that callee can access */
2278 for (i = BPF_REG_1; i <= BPF_REG_5; i++) 2283 for (i = BPF_REG_1; i <= BPF_REG_5; i++)
@@ -3889,7 +3894,7 @@ static int check_ld_abs(struct bpf_verifier_env *env, struct bpf_insn *insn)
3889 return -EINVAL; 3894 return -EINVAL;
3890 } 3895 }
3891 3896
3892 if (env->subprog_cnt) { 3897 if (env->subprog_cnt > 1) {
3893 /* when program has LD_ABS insn JITs and interpreter assume 3898 /* when program has LD_ABS insn JITs and interpreter assume
3894 * that r1 == ctx == skb which is not the case for callees 3899 * that r1 == ctx == skb which is not the case for callees
3895 * that can have arbitrary arguments. It's problematic 3900 * that can have arbitrary arguments. It's problematic
@@ -4920,11 +4925,11 @@ process_bpf_exit:
4920 4925
4921 verbose(env, "processed %d insns (limit %d), stack depth ", 4926 verbose(env, "processed %d insns (limit %d), stack depth ",
4922 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS); 4927 insn_processed, BPF_COMPLEXITY_LIMIT_INSNS);
4923 for (i = 0; i < env->subprog_cnt + 1; i++) { 4928 for (i = 0; i < env->subprog_cnt; i++) {
4924 u32 depth = env->subprog_stack_depth[i]; 4929 u32 depth = env->subprog_stack_depth[i];
4925 4930
4926 verbose(env, "%d", depth); 4931 verbose(env, "%d", depth);
4927 if (i + 1 < env->subprog_cnt + 1) 4932 if (i + 1 < env->subprog_cnt)
4928 verbose(env, "+"); 4933 verbose(env, "+");
4929 } 4934 }
4930 verbose(env, "\n"); 4935 verbose(env, "\n");
@@ -5301,7 +5306,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5301 void *old_bpf_func; 5306 void *old_bpf_func;
5302 int err = -ENOMEM; 5307 int err = -ENOMEM;
5303 5308
5304 if (env->subprog_cnt == 0) 5309 if (env->subprog_cnt <= 1)
5305 return 0; 5310 return 0;
5306 5311
5307 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) { 5312 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5317,7 +5322,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5317 /* temporarily remember subprog id inside insn instead of 5322 /* temporarily remember subprog id inside insn instead of
5318 * aux_data, since next loop will split up all insns into funcs 5323 * aux_data, since next loop will split up all insns into funcs
5319 */ 5324 */
5320 insn->off = subprog + 1; 5325 insn->off = subprog;
5321 /* remember original imm in case JIT fails and fallback 5326 /* remember original imm in case JIT fails and fallback
5322 * to interpreter will be needed 5327 * to interpreter will be needed
5323 */ 5328 */
@@ -5326,16 +5331,16 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5326 insn->imm = 1; 5331 insn->imm = 1;
5327 } 5332 }
5328 5333
5329 func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL); 5334 func = kzalloc(sizeof(prog) * env->subprog_cnt, GFP_KERNEL);
5330 if (!func) 5335 if (!func)
5331 return -ENOMEM; 5336 return -ENOMEM;
5332 5337
5333 for (i = 0; i <= env->subprog_cnt; i++) { 5338 for (i = 0; i < env->subprog_cnt; i++) {
5334 subprog_start = subprog_end; 5339 subprog_start = subprog_end;
5335 if (env->subprog_cnt == i) 5340 if (env->subprog_cnt == i + 1)
5336 subprog_end = prog->len; 5341 subprog_end = prog->len;
5337 else 5342 else
5338 subprog_end = env->subprog_starts[i]; 5343 subprog_end = env->subprog_starts[i + 1];
5339 5344
5340 len = subprog_end - subprog_start; 5345 len = subprog_end - subprog_start;
5341 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER); 5346 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
@@ -5365,7 +5370,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5365 * now populate all bpf_calls with correct addresses and 5370 * now populate all bpf_calls with correct addresses and
5366 * run last pass of JIT 5371 * run last pass of JIT
5367 */ 5372 */
5368 for (i = 0; i <= env->subprog_cnt; i++) { 5373 for (i = 0; i < env->subprog_cnt; i++) {
5369 insn = func[i]->insnsi; 5374 insn = func[i]->insnsi;
5370 for (j = 0; j < func[i]->len; j++, insn++) { 5375 for (j = 0; j < func[i]->len; j++, insn++) {
5371 if (insn->code != (BPF_JMP | BPF_CALL) || 5376 if (insn->code != (BPF_JMP | BPF_CALL) ||
@@ -5378,7 +5383,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5378 __bpf_call_base; 5383 __bpf_call_base;
5379 } 5384 }
5380 } 5385 }
5381 for (i = 0; i <= env->subprog_cnt; i++) { 5386 for (i = 0; i < env->subprog_cnt; i++) {
5382 old_bpf_func = func[i]->bpf_func; 5387 old_bpf_func = func[i]->bpf_func;
5383 tmp = bpf_int_jit_compile(func[i]); 5388 tmp = bpf_int_jit_compile(func[i]);
5384 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) { 5389 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
@@ -5392,7 +5397,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5392 /* finally lock prog and jit images for all functions and 5397 /* finally lock prog and jit images for all functions and
5393 * populate kallsysm 5398 * populate kallsysm
5394 */ 5399 */
5395 for (i = 0; i <= env->subprog_cnt; i++) { 5400 for (i = 0; i < env->subprog_cnt; i++) {
5396 bpf_prog_lock_ro(func[i]); 5401 bpf_prog_lock_ro(func[i]);
5397 bpf_prog_kallsyms_add(func[i]); 5402 bpf_prog_kallsyms_add(func[i]);
5398 } 5403 }
@@ -5409,7 +5414,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5409 continue; 5414 continue;
5410 insn->off = env->insn_aux_data[i].call_imm; 5415 insn->off = env->insn_aux_data[i].call_imm;
5411 subprog = find_subprog(env, i + insn->off + 1); 5416 subprog = find_subprog(env, i + insn->off + 1);
5412 addr = (unsigned long)func[subprog + 1]->bpf_func; 5417 addr = (unsigned long)func[subprog]->bpf_func;
5413 addr &= PAGE_MASK; 5418 addr &= PAGE_MASK;
5414 insn->imm = (u64 (*)(u64, u64, u64, u64, u64)) 5419 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5415 addr - __bpf_call_base; 5420 addr - __bpf_call_base;
@@ -5418,10 +5423,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
5418 prog->jited = 1; 5423 prog->jited = 1;
5419 prog->bpf_func = func[0]->bpf_func; 5424 prog->bpf_func = func[0]->bpf_func;
5420 prog->aux->func = func; 5425 prog->aux->func = func;
5421 prog->aux->func_cnt = env->subprog_cnt + 1; 5426 prog->aux->func_cnt = env->subprog_cnt;
5422 return 0; 5427 return 0;
5423out_free: 5428out_free:
5424 for (i = 0; i <= env->subprog_cnt; i++) 5429 for (i = 0; i < env->subprog_cnt; i++)
5425 if (func[i]) 5430 if (func[i])
5426 bpf_jit_free(func[i]); 5431 bpf_jit_free(func[i]);
5427 kfree(func); 5432 kfree(func);