aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/verifier.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/verifier.c')
-rw-r--r--kernel/bpf/verifier.c126
1 files changed, 126 insertions, 0 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8e0e4cd0d5e4..48b2901cf483 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5012,12 +5012,138 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
5012 return 0; 5012 return 0;
5013} 5013}
5014 5014
5015static int jit_subprogs(struct bpf_verifier_env *env)
5016{
5017 struct bpf_prog *prog = env->prog, **func, *tmp;
5018 int i, j, subprog_start, subprog_end = 0, len, subprog;
5019 struct bpf_insn *insn = prog->insnsi;
5020 void *old_bpf_func;
5021 int err = -ENOMEM;
5022
5023 if (env->subprog_cnt == 0)
5024 return 0;
5025
5026 for (i = 0; i < prog->len; i++, insn++) {
5027 if (insn->code != (BPF_JMP | BPF_CALL) ||
5028 insn->src_reg != BPF_PSEUDO_CALL)
5029 continue;
5030 subprog = find_subprog(env, i + insn->imm + 1);
5031 if (subprog < 0) {
5032 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5033 i + insn->imm + 1);
5034 return -EFAULT;
5035 }
5036 /* temporarily remember subprog id inside insn instead of
5037 * aux_data, since next loop will split up all insns into funcs
5038 */
5039 insn->off = subprog + 1;
5040 /* remember original imm in case JIT fails and fallback
5041 * to interpreter will be needed
5042 */
5043 env->insn_aux_data[i].call_imm = insn->imm;
5044 /* point imm to __bpf_call_base+1 from JITs point of view */
5045 insn->imm = 1;
5046 }
5047
5048 func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
5049 if (!func)
5050 return -ENOMEM;
5051
5052 for (i = 0; i <= env->subprog_cnt; i++) {
5053 subprog_start = subprog_end;
5054 if (env->subprog_cnt == i)
5055 subprog_end = prog->len;
5056 else
5057 subprog_end = env->subprog_starts[i];
5058
5059 len = subprog_end - subprog_start;
5060 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
5061 if (!func[i])
5062 goto out_free;
5063 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5064 len * sizeof(struct bpf_insn));
5065 func[i]->len = len;
5066 func[i]->is_func = 1;
5067 /* Use bpf_prog_F_tag to indicate functions in stack traces.
5068 * Long term would need debug info to populate names
5069 */
5070 func[i]->aux->name[0] = 'F';
5071 func[i]->aux->stack_depth = env->subprog_stack_depth[i];
5072 func[i]->jit_requested = 1;
5073 func[i] = bpf_int_jit_compile(func[i]);
5074 if (!func[i]->jited) {
5075 err = -ENOTSUPP;
5076 goto out_free;
5077 }
5078 cond_resched();
5079 }
5080 /* at this point all bpf functions were successfully JITed
5081 * now populate all bpf_calls with correct addresses and
5082 * run last pass of JIT
5083 */
5084 for (i = 0; i <= env->subprog_cnt; i++) {
5085 insn = func[i]->insnsi;
5086 for (j = 0; j < func[i]->len; j++, insn++) {
5087 if (insn->code != (BPF_JMP | BPF_CALL) ||
5088 insn->src_reg != BPF_PSEUDO_CALL)
5089 continue;
5090 subprog = insn->off;
5091 insn->off = 0;
5092 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5093 func[subprog]->bpf_func -
5094 __bpf_call_base;
5095 }
5096 }
5097 for (i = 0; i <= env->subprog_cnt; i++) {
5098 old_bpf_func = func[i]->bpf_func;
5099 tmp = bpf_int_jit_compile(func[i]);
5100 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5101 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5102 err = -EFAULT;
5103 goto out_free;
5104 }
5105 cond_resched();
5106 }
5107
5108 /* finally lock prog and jit images for all functions and
5109 * populate kallsysm
5110 */
5111 for (i = 0; i <= env->subprog_cnt; i++) {
5112 bpf_prog_lock_ro(func[i]);
5113 bpf_prog_kallsyms_add(func[i]);
5114 }
5115 prog->jited = 1;
5116 prog->bpf_func = func[0]->bpf_func;
5117 prog->aux->func = func;
5118 prog->aux->func_cnt = env->subprog_cnt + 1;
5119 return 0;
5120out_free:
5121 for (i = 0; i <= env->subprog_cnt; i++)
5122 if (func[i])
5123 bpf_jit_free(func[i]);
5124 kfree(func);
5125 /* cleanup main prog to be interpreted */
5126 prog->jit_requested = 0;
5127 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5128 if (insn->code != (BPF_JMP | BPF_CALL) ||
5129 insn->src_reg != BPF_PSEUDO_CALL)
5130 continue;
5131 insn->off = 0;
5132 insn->imm = env->insn_aux_data[i].call_imm;
5133 }
5134 return err;
5135}
5136
5015static int fixup_call_args(struct bpf_verifier_env *env) 5137static int fixup_call_args(struct bpf_verifier_env *env)
5016{ 5138{
5017 struct bpf_prog *prog = env->prog; 5139 struct bpf_prog *prog = env->prog;
5018 struct bpf_insn *insn = prog->insnsi; 5140 struct bpf_insn *insn = prog->insnsi;
5019 int i, depth; 5141 int i, depth;
5020 5142
5143 if (env->prog->jit_requested)
5144 if (jit_subprogs(env) == 0)
5145 return 0;
5146
5021 for (i = 0; i < prog->len; i++, insn++) { 5147 for (i = 0; i < prog->len; i++, insn++) {
5022 if (insn->code != (BPF_JMP | BPF_CALL) || 5148 if (insn->code != (BPF_JMP | BPF_CALL) ||
5023 insn->src_reg != BPF_PSEUDO_CALL) 5149 insn->src_reg != BPF_PSEUDO_CALL)