aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/syscall.c56
-rw-r--r--kernel/bpf/verifier.c57
2 files changed, 57 insertions, 56 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..48c914b983bd 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -586,59 +586,6 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl)
586 list_add(&tl->list_node, &bpf_prog_types); 586 list_add(&tl->list_node, &bpf_prog_types);
587} 587}
588 588
589/* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
594 * else ...
595 *
596 * this function is called after eBPF program passed verification
597 */
598static void fixup_bpf_calls(struct bpf_prog *prog)
599{
600 const struct bpf_func_proto *fn;
601 int i;
602
603 for (i = 0; i < prog->len; i++) {
604 struct bpf_insn *insn = &prog->insnsi[i];
605
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
610 */
611 BUG_ON(!prog->aux->ops->get_func_proto);
612
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in
622 * interpeter for every normal call
623 * and to prevent accidental JITing by
624 * JIT compiler that doesn't support
625 * bpf_tail_call yet
626 */
627 insn->imm = 0;
628 insn->code |= BPF_X;
629 continue;
630 }
631
632 fn = prog->aux->ops->get_func_proto(insn->imm);
633 /* all functions that have prototype and verifier allowed
634 * programs to call them, must be real in-kernel functions
635 */
636 BUG_ON(!fn->func);
637 insn->imm = fn->func - __bpf_call_base;
638 }
639 }
640}
641
642/* drop refcnt on maps used by eBPF program and free auxilary data */ 589/* drop refcnt on maps used by eBPF program and free auxilary data */
643static void free_used_maps(struct bpf_prog_aux *aux) 590static void free_used_maps(struct bpf_prog_aux *aux)
644{ 591{
@@ -892,9 +839,6 @@ static int bpf_prog_load(union bpf_attr *attr)
892 if (err < 0) 839 if (err < 0)
893 goto free_used_maps; 840 goto free_used_maps;
894 841
895 /* fixup BPF_CALL->imm field */
896 fixup_bpf_calls(prog);
897
898 /* eBPF program is ready to be JITed */ 842 /* eBPF program is ready to be JITed */
899 prog = bpf_prog_select_runtime(prog, &err); 843 prog = bpf_prog_select_runtime(prog, &err);
900 if (err < 0) 844 if (err < 0)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 796b68d00119..e41da6c57053 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -3233,6 +3233,60 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
3233 return 0; 3233 return 0;
3234} 3234}
3235 3235
3236/* fixup insn->imm field of bpf_call instructions:
3237 * if (insn->imm == BPF_FUNC_map_lookup_elem)
3238 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
3239 * else if (insn->imm == BPF_FUNC_map_update_elem)
3240 * insn->imm = bpf_map_update_elem - __bpf_call_base;
3241 * else ...
3242 *
3243 * this function is called after eBPF program passed verification
3244 */
3245static void fixup_bpf_calls(struct bpf_prog *prog)
3246{
3247 const struct bpf_func_proto *fn;
3248 int i;
3249
3250 for (i = 0; i < prog->len; i++) {
3251 struct bpf_insn *insn = &prog->insnsi[i];
3252
3253 if (insn->code == (BPF_JMP | BPF_CALL)) {
3254 /* we reach here when program has bpf_call instructions
3255 * and it passed bpf_check(), means that
3256 * ops->get_func_proto must have been supplied, check it
3257 */
3258 BUG_ON(!prog->aux->ops->get_func_proto);
3259
3260 if (insn->imm == BPF_FUNC_get_route_realm)
3261 prog->dst_needed = 1;
3262 if (insn->imm == BPF_FUNC_get_prandom_u32)
3263 bpf_user_rnd_init_once();
3264 if (insn->imm == BPF_FUNC_xdp_adjust_head)
3265 prog->xdp_adjust_head = 1;
3266 if (insn->imm == BPF_FUNC_tail_call) {
3267 /* mark bpf_tail_call as different opcode
3268 * to avoid conditional branch in
3269 * interpeter for every normal call
3270 * and to prevent accidental JITing by
3271 * JIT compiler that doesn't support
3272 * bpf_tail_call yet
3273 */
3274 insn->imm = 0;
3275 insn->code |= BPF_X;
3276 continue;
3277 }
3278
3279 fn = prog->aux->ops->get_func_proto(insn->imm);
3280 /* all functions that have prototype and verifier allowed
3281 * programs to call them, must be real in-kernel functions
3282 */
3283 BUG_ON(!fn->func);
3284 insn->imm = fn->func - __bpf_call_base;
3285 }
3286 }
3287}
3288
3289
3236static void free_states(struct bpf_verifier_env *env) 3290static void free_states(struct bpf_verifier_env *env)
3237{ 3291{
3238 struct bpf_verifier_state_list *sl, *sln; 3292 struct bpf_verifier_state_list *sl, *sln;
@@ -3328,6 +3382,9 @@ skip_full_check:
3328 /* program is valid, convert *(u32*)(ctx + off) accesses */ 3382 /* program is valid, convert *(u32*)(ctx + off) accesses */
3329 ret = convert_ctx_accesses(env); 3383 ret = convert_ctx_accesses(env);
3330 3384
3385 if (ret == 0)
3386 fixup_bpf_calls(env->prog);
3387
3331 if (log_level && log_len >= log_size - 1) { 3388 if (log_level && log_len >= log_size - 1) {
3332 BUG_ON(log_len >= log_size); 3389 BUG_ON(log_len >= log_size);
3333 /* verifier log exceeded user supplied buffer */ 3390 /* verifier log exceeded user supplied buffer */