aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@fb.com>2017-03-15 21:26:39 -0400
committerDavid S. Miller <davem@davemloft.net>2017-03-16 23:44:11 -0400
commite245c5c6a5656e4d61aa7bb08e9694fd6e5b2b9d (patch)
treee6767b1b7f1ccb7affc6f7ed325249870db84370 /kernel/bpf/syscall.c
parent4396e46187ca5070219b81773c4e65088dac50cc (diff)
bpf: move fixup_bpf_calls() function
no functional change. move fixup_bpf_calls() to verifier.c it's being refactored in the next patch Signed-off-by: Alexei Starovoitov <ast@kernel.org> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c56
1 files changed, 0 insertions, 56 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..48c914b983bd 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -586,59 +586,6 @@ void bpf_register_prog_type(struct bpf_prog_type_list *tl)
586 list_add(&tl->list_node, &bpf_prog_types); 586 list_add(&tl->list_node, &bpf_prog_types);
587} 587}
588 588
589/* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
594 * else ...
595 *
596 * this function is called after eBPF program passed verification
597 */
598static void fixup_bpf_calls(struct bpf_prog *prog)
599{
600 const struct bpf_func_proto *fn;
601 int i;
602
603 for (i = 0; i < prog->len; i++) {
604 struct bpf_insn *insn = &prog->insnsi[i];
605
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
610 */
611 BUG_ON(!prog->aux->ops->get_func_proto);
612
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in
622 * interpeter for every normal call
623 * and to prevent accidental JITing by
624 * JIT compiler that doesn't support
625 * bpf_tail_call yet
626 */
627 insn->imm = 0;
628 insn->code |= BPF_X;
629 continue;
630 }
631
632 fn = prog->aux->ops->get_func_proto(insn->imm);
633 /* all functions that have prototype and verifier allowed
634 * programs to call them, must be real in-kernel functions
635 */
636 BUG_ON(!fn->func);
637 insn->imm = fn->func - __bpf_call_base;
638 }
639 }
640}
641
642/* drop refcnt on maps used by eBPF program and free auxilary data */ 589/* drop refcnt on maps used by eBPF program and free auxilary data */
643static void free_used_maps(struct bpf_prog_aux *aux) 590static void free_used_maps(struct bpf_prog_aux *aux)
644{ 591{
@@ -892,9 +839,6 @@ static int bpf_prog_load(union bpf_attr *attr)
892 if (err < 0) 839 if (err < 0)
893 goto free_used_maps; 840 goto free_used_maps;
894 841
895 /* fixup BPF_CALL->imm field */
896 fixup_bpf_calls(prog);
897
898 /* eBPF program is ready to be JITed */ 842 /* eBPF program is ready to be JITed */
899 prog = bpf_prog_select_runtime(prog, &err); 843 prog = bpf_prog_select_runtime(prog, &err);
900 if (err < 0) 844 if (err < 0)