aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/net/bpf_jit_comp.c47
-rw-r--r--include/linux/bpf.h3
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/filter.h2
-rw-r--r--kernel/bpf/core.c13
-rw-r--r--kernel/bpf/syscall.c3
-rw-r--r--kernel/bpf/verifier.c126
7 files changed, 189 insertions, 6 deletions
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 68859b58ab84..87f214fbe66e 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -1109,13 +1109,23 @@ common_load:
1109 return proglen; 1109 return proglen;
1110} 1110}
1111 1111
1112struct x64_jit_data {
1113 struct bpf_binary_header *header;
1114 int *addrs;
1115 u8 *image;
1116 int proglen;
1117 struct jit_context ctx;
1118};
1119
1112struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog) 1120struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1113{ 1121{
1114 struct bpf_binary_header *header = NULL; 1122 struct bpf_binary_header *header = NULL;
1115 struct bpf_prog *tmp, *orig_prog = prog; 1123 struct bpf_prog *tmp, *orig_prog = prog;
1124 struct x64_jit_data *jit_data;
1116 int proglen, oldproglen = 0; 1125 int proglen, oldproglen = 0;
1117 struct jit_context ctx = {}; 1126 struct jit_context ctx = {};
1118 bool tmp_blinded = false; 1127 bool tmp_blinded = false;
1128 bool extra_pass = false;
1119 u8 *image = NULL; 1129 u8 *image = NULL;
1120 int *addrs; 1130 int *addrs;
1121 int pass; 1131 int pass;
@@ -1135,10 +1145,28 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1135 prog = tmp; 1145 prog = tmp;
1136 } 1146 }
1137 1147
1148 jit_data = prog->aux->jit_data;
1149 if (!jit_data) {
1150 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1151 if (!jit_data) {
1152 prog = orig_prog;
1153 goto out;
1154 }
1155 prog->aux->jit_data = jit_data;
1156 }
1157 addrs = jit_data->addrs;
1158 if (addrs) {
1159 ctx = jit_data->ctx;
1160 oldproglen = jit_data->proglen;
1161 image = jit_data->image;
1162 header = jit_data->header;
1163 extra_pass = true;
1164 goto skip_init_addrs;
1165 }
1138 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); 1166 addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1139 if (!addrs) { 1167 if (!addrs) {
1140 prog = orig_prog; 1168 prog = orig_prog;
1141 goto out; 1169 goto out_addrs;
1142 } 1170 }
1143 1171
1144 /* Before first pass, make a rough estimation of addrs[] 1172 /* Before first pass, make a rough estimation of addrs[]
@@ -1149,6 +1177,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1149 addrs[i] = proglen; 1177 addrs[i] = proglen;
1150 } 1178 }
1151 ctx.cleanup_addr = proglen; 1179 ctx.cleanup_addr = proglen;
1180skip_init_addrs:
1152 1181
1153 /* JITed image shrinks with every pass and the loop iterates 1182 /* JITed image shrinks with every pass and the loop iterates
1154 * until the image stops shrinking. Very large bpf programs 1183 * until the image stops shrinking. Very large bpf programs
@@ -1189,7 +1218,15 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1189 1218
1190 if (image) { 1219 if (image) {
1191 bpf_flush_icache(header, image + proglen); 1220 bpf_flush_icache(header, image + proglen);
1192 bpf_jit_binary_lock_ro(header); 1221 if (!prog->is_func || extra_pass) {
1222 bpf_jit_binary_lock_ro(header);
1223 } else {
1224 jit_data->addrs = addrs;
1225 jit_data->ctx = ctx;
1226 jit_data->proglen = proglen;
1227 jit_data->image = image;
1228 jit_data->header = header;
1229 }
1193 prog->bpf_func = (void *)image; 1230 prog->bpf_func = (void *)image;
1194 prog->jited = 1; 1231 prog->jited = 1;
1195 prog->jited_len = proglen; 1232 prog->jited_len = proglen;
@@ -1197,8 +1234,12 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1197 prog = orig_prog; 1234 prog = orig_prog;
1198 } 1235 }
1199 1236
1237 if (!prog->is_func || extra_pass) {
1200out_addrs: 1238out_addrs:
1201 kfree(addrs); 1239 kfree(addrs);
1240 kfree(jit_data);
1241 prog->aux->jit_data = NULL;
1242 }
1202out: 1243out:
1203 if (tmp_blinded) 1244 if (tmp_blinded)
1204 bpf_jit_prog_release_other(prog, prog == orig_prog ? 1245 bpf_jit_prog_release_other(prog, prog == orig_prog ?
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8935f6f63d5f..da54ef644fcd 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -200,6 +200,9 @@ struct bpf_prog_aux {
200 u32 max_ctx_offset; 200 u32 max_ctx_offset;
201 u32 stack_depth; 201 u32 stack_depth;
202 u32 id; 202 u32 id;
203 u32 func_cnt;
204 struct bpf_prog **func;
205 void *jit_data; /* JIT specific data. arch dependent */
203 struct latch_tree_node ksym_tnode; 206 struct latch_tree_node ksym_tnode;
204 struct list_head ksym_lnode; 207 struct list_head ksym_lnode;
205 const struct bpf_prog_ops *ops; 208 const struct bpf_prog_ops *ops;
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 585d4e17ea88..aaac589e490c 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -143,6 +143,7 @@ struct bpf_insn_aux_data {
143 union { 143 union {
144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ 144 enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ 145 struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
146 s32 call_imm; /* saved imm field of call insn */
146 }; 147 };
147 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ 148 int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
148 bool seen; /* this insn was processed by the verifier */ 149 bool seen; /* this insn was processed by the verifier */
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 3d6edc34932c..e872b4ebaa57 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -463,6 +463,8 @@ struct bpf_prog {
463 gpl_compatible:1, /* Is filter GPL compatible? */ 463 gpl_compatible:1, /* Is filter GPL compatible? */
464 cb_access:1, /* Is control block accessed? */ 464 cb_access:1, /* Is control block accessed? */
465 dst_needed:1, /* Do we need dst entry? */ 465 dst_needed:1, /* Do we need dst entry? */
466 blinded:1, /* Was blinded */
467 is_func:1, /* program is a bpf function */
466 kprobe_override:1; /* Do we override a kprobe? */ 468 kprobe_override:1; /* Do we override a kprobe? */
467 enum bpf_prog_type type; /* Type of BPF program */ 469 enum bpf_prog_type type; /* Type of BPF program */
468 u32 len; /* Number of filter blocks */ 470 u32 len; /* Number of filter blocks */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index bda911644b1c..768e0a02d8c8 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -722,7 +722,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
722 struct bpf_insn *insn; 722 struct bpf_insn *insn;
723 int i, rewritten; 723 int i, rewritten;
724 724
725 if (!bpf_jit_blinding_enabled(prog)) 725 if (!bpf_jit_blinding_enabled(prog) || prog->blinded)
726 return prog; 726 return prog;
727 727
728 clone = bpf_prog_clone_create(prog, GFP_USER); 728 clone = bpf_prog_clone_create(prog, GFP_USER);
@@ -764,6 +764,7 @@ struct bpf_prog *bpf_jit_blind_constants(struct bpf_prog *prog)
764 i += insn_delta; 764 i += insn_delta;
765 } 765 }
766 766
767 clone->blinded = 1;
767 return clone; 768 return clone;
768} 769}
769#endif /* CONFIG_BPF_JIT */ 770#endif /* CONFIG_BPF_JIT */
@@ -1629,11 +1630,19 @@ int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array,
1629static void bpf_prog_free_deferred(struct work_struct *work) 1630static void bpf_prog_free_deferred(struct work_struct *work)
1630{ 1631{
1631 struct bpf_prog_aux *aux; 1632 struct bpf_prog_aux *aux;
1633 int i;
1632 1634
1633 aux = container_of(work, struct bpf_prog_aux, work); 1635 aux = container_of(work, struct bpf_prog_aux, work);
1634 if (bpf_prog_is_dev_bound(aux)) 1636 if (bpf_prog_is_dev_bound(aux))
1635 bpf_prog_offload_destroy(aux->prog); 1637 bpf_prog_offload_destroy(aux->prog);
1636 bpf_jit_free(aux->prog); 1638 for (i = 0; i < aux->func_cnt; i++)
1639 bpf_jit_free(aux->func[i]);
1640 if (aux->func_cnt) {
1641 kfree(aux->func);
1642 bpf_prog_unlock_free(aux->prog);
1643 } else {
1644 bpf_jit_free(aux->prog);
1645 }
1637} 1646}
1638 1647
1639/* Free internal BPF program */ 1648/* Free internal BPF program */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2c4cfeaa8d5e..e2e1c78ce1dc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1194,7 +1194,8 @@ static int bpf_prog_load(union bpf_attr *attr)
1194 goto free_used_maps; 1194 goto free_used_maps;
1195 1195
1196 /* eBPF program is ready to be JITed */ 1196 /* eBPF program is ready to be JITed */
1197 prog = bpf_prog_select_runtime(prog, &err); 1197 if (!prog->bpf_func)
1198 prog = bpf_prog_select_runtime(prog, &err);
1198 if (err < 0) 1199 if (err < 0)
1199 goto free_used_maps; 1200 goto free_used_maps;
1200 1201
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 8e0e4cd0d5e4..48b2901cf483 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5012,12 +5012,138 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
5012 return 0; 5012 return 0;
5013} 5013}
5014 5014
5015static int jit_subprogs(struct bpf_verifier_env *env)
5016{
5017 struct bpf_prog *prog = env->prog, **func, *tmp;
5018 int i, j, subprog_start, subprog_end = 0, len, subprog;
5019 struct bpf_insn *insn = prog->insnsi;
5020 void *old_bpf_func;
5021 int err = -ENOMEM;
5022
5023 if (env->subprog_cnt == 0)
5024 return 0;
5025
5026 for (i = 0; i < prog->len; i++, insn++) {
5027 if (insn->code != (BPF_JMP | BPF_CALL) ||
5028 insn->src_reg != BPF_PSEUDO_CALL)
5029 continue;
5030 subprog = find_subprog(env, i + insn->imm + 1);
5031 if (subprog < 0) {
5032 WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
5033 i + insn->imm + 1);
5034 return -EFAULT;
5035 }
5036 /* temporarily remember subprog id inside insn instead of
5037 * aux_data, since next loop will split up all insns into funcs
5038 */
5039 insn->off = subprog + 1;
5040 /* remember original imm in case JIT fails and fallback
5041 * to interpreter will be needed
5042 */
5043 env->insn_aux_data[i].call_imm = insn->imm;
5044 /* point imm to __bpf_call_base+1 from JITs point of view */
5045 insn->imm = 1;
5046 }
5047
5048 func = kzalloc(sizeof(prog) * (env->subprog_cnt + 1), GFP_KERNEL);
5049 if (!func)
5050 return -ENOMEM;
5051
5052 for (i = 0; i <= env->subprog_cnt; i++) {
5053 subprog_start = subprog_end;
5054 if (env->subprog_cnt == i)
5055 subprog_end = prog->len;
5056 else
5057 subprog_end = env->subprog_starts[i];
5058
5059 len = subprog_end - subprog_start;
5060 func[i] = bpf_prog_alloc(bpf_prog_size(len), GFP_USER);
5061 if (!func[i])
5062 goto out_free;
5063 memcpy(func[i]->insnsi, &prog->insnsi[subprog_start],
5064 len * sizeof(struct bpf_insn));
5065 func[i]->len = len;
5066 func[i]->is_func = 1;
5067 /* Use bpf_prog_F_tag to indicate functions in stack traces.
5068 * Long term would need debug info to populate names
5069 */
5070 func[i]->aux->name[0] = 'F';
5071 func[i]->aux->stack_depth = env->subprog_stack_depth[i];
5072 func[i]->jit_requested = 1;
5073 func[i] = bpf_int_jit_compile(func[i]);
5074 if (!func[i]->jited) {
5075 err = -ENOTSUPP;
5076 goto out_free;
5077 }
5078 cond_resched();
5079 }
5080 /* at this point all bpf functions were successfully JITed
5081 * now populate all bpf_calls with correct addresses and
5082 * run last pass of JIT
5083 */
5084 for (i = 0; i <= env->subprog_cnt; i++) {
5085 insn = func[i]->insnsi;
5086 for (j = 0; j < func[i]->len; j++, insn++) {
5087 if (insn->code != (BPF_JMP | BPF_CALL) ||
5088 insn->src_reg != BPF_PSEUDO_CALL)
5089 continue;
5090 subprog = insn->off;
5091 insn->off = 0;
5092 insn->imm = (u64 (*)(u64, u64, u64, u64, u64))
5093 func[subprog]->bpf_func -
5094 __bpf_call_base;
5095 }
5096 }
5097 for (i = 0; i <= env->subprog_cnt; i++) {
5098 old_bpf_func = func[i]->bpf_func;
5099 tmp = bpf_int_jit_compile(func[i]);
5100 if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
5101 verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
5102 err = -EFAULT;
5103 goto out_free;
5104 }
5105 cond_resched();
5106 }
5107
5108 /* finally lock prog and jit images for all functions and
5109 * populate kallsysm
5110 */
5111 for (i = 0; i <= env->subprog_cnt; i++) {
5112 bpf_prog_lock_ro(func[i]);
5113 bpf_prog_kallsyms_add(func[i]);
5114 }
5115 prog->jited = 1;
5116 prog->bpf_func = func[0]->bpf_func;
5117 prog->aux->func = func;
5118 prog->aux->func_cnt = env->subprog_cnt + 1;
5119 return 0;
5120out_free:
5121 for (i = 0; i <= env->subprog_cnt; i++)
5122 if (func[i])
5123 bpf_jit_free(func[i]);
5124 kfree(func);
5125 /* cleanup main prog to be interpreted */
5126 prog->jit_requested = 0;
5127 for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
5128 if (insn->code != (BPF_JMP | BPF_CALL) ||
5129 insn->src_reg != BPF_PSEUDO_CALL)
5130 continue;
5131 insn->off = 0;
5132 insn->imm = env->insn_aux_data[i].call_imm;
5133 }
5134 return err;
5135}
5136
5015static int fixup_call_args(struct bpf_verifier_env *env) 5137static int fixup_call_args(struct bpf_verifier_env *env)
5016{ 5138{
5017 struct bpf_prog *prog = env->prog; 5139 struct bpf_prog *prog = env->prog;
5018 struct bpf_insn *insn = prog->insnsi; 5140 struct bpf_insn *insn = prog->insnsi;
5019 int i, depth; 5141 int i, depth;
5020 5142
5143 if (env->prog->jit_requested)
5144 if (jit_subprogs(env) == 0)
5145 return 0;
5146
5021 for (i = 0; i < prog->len; i++, insn++) { 5147 for (i = 0; i < prog->len; i++, insn++) {
5022 if (insn->code != (BPF_JMP | BPF_CALL) || 5148 if (insn->code != (BPF_JMP | BPF_CALL) ||
5023 insn->src_reg != BPF_PSEUDO_CALL) 5149 insn->src_reg != BPF_PSEUDO_CALL)