aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/core.c')
-rw-r--r--kernel/bpf/core.c100
1 files changed, 75 insertions, 25 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index ba03ec39efb3..6ef6746a7871 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -218,47 +218,84 @@ int bpf_prog_calc_tag(struct bpf_prog *fp)
218 return 0; 218 return 0;
219} 219}
220 220
221static void bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta) 221static int bpf_adj_delta_to_imm(struct bpf_insn *insn, u32 pos, u32 delta,
222 u32 curr, const bool probe_pass)
222{ 223{
224 const s64 imm_min = S32_MIN, imm_max = S32_MAX;
225 s64 imm = insn->imm;
226
227 if (curr < pos && curr + imm + 1 > pos)
228 imm += delta;
229 else if (curr > pos + delta && curr + imm + 1 <= pos + delta)
230 imm -= delta;
231 if (imm < imm_min || imm > imm_max)
232 return -ERANGE;
233 if (!probe_pass)
234 insn->imm = imm;
235 return 0;
236}
237
238static int bpf_adj_delta_to_off(struct bpf_insn *insn, u32 pos, u32 delta,
239 u32 curr, const bool probe_pass)
240{
241 const s32 off_min = S16_MIN, off_max = S16_MAX;
242 s32 off = insn->off;
243
244 if (curr < pos && curr + off + 1 > pos)
245 off += delta;
246 else if (curr > pos + delta && curr + off + 1 <= pos + delta)
247 off -= delta;
248 if (off < off_min || off > off_max)
249 return -ERANGE;
250 if (!probe_pass)
251 insn->off = off;
252 return 0;
253}
254
255static int bpf_adj_branches(struct bpf_prog *prog, u32 pos, u32 delta,
256 const bool probe_pass)
257{
258 u32 i, insn_cnt = prog->len + (probe_pass ? delta : 0);
223 struct bpf_insn *insn = prog->insnsi; 259 struct bpf_insn *insn = prog->insnsi;
224 u32 i, insn_cnt = prog->len; 260 int ret = 0;
225 bool pseudo_call;
226 u8 code;
227 int off;
228 261
229 for (i = 0; i < insn_cnt; i++, insn++) { 262 for (i = 0; i < insn_cnt; i++, insn++) {
263 u8 code;
264
265 /* In the probing pass we still operate on the original,
266 * unpatched image in order to check overflows before we
267 * do any other adjustments. Therefore skip the patchlet.
268 */
269 if (probe_pass && i == pos) {
270 i += delta + 1;
271 insn++;
272 }
230 code = insn->code; 273 code = insn->code;
231 if (BPF_CLASS(code) != BPF_JMP) 274 if (BPF_CLASS(code) != BPF_JMP ||
232 continue; 275 BPF_OP(code) == BPF_EXIT)
233 if (BPF_OP(code) == BPF_EXIT)
234 continue; 276 continue;
277 /* Adjust offset of jmps if we cross patch boundaries. */
235 if (BPF_OP(code) == BPF_CALL) { 278 if (BPF_OP(code) == BPF_CALL) {
236 if (insn->src_reg == BPF_PSEUDO_CALL) 279 if (insn->src_reg != BPF_PSEUDO_CALL)
237 pseudo_call = true;
238 else
239 continue; 280 continue;
281 ret = bpf_adj_delta_to_imm(insn, pos, delta, i,
282 probe_pass);
240 } else { 283 } else {
241 pseudo_call = false; 284 ret = bpf_adj_delta_to_off(insn, pos, delta, i,
285 probe_pass);
242 } 286 }
243 off = pseudo_call ? insn->imm : insn->off; 287 if (ret)
244 288 break;
245 /* Adjust offset of jmps if we cross boundaries. */
246 if (i < pos && i + off + 1 > pos)
247 off += delta;
248 else if (i > pos + delta && i + off + 1 <= pos + delta)
249 off -= delta;
250
251 if (pseudo_call)
252 insn->imm = off;
253 else
254 insn->off = off;
255 } 289 }
290
291 return ret;
256} 292}
257 293
258struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 294struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
259 const struct bpf_insn *patch, u32 len) 295 const struct bpf_insn *patch, u32 len)
260{ 296{
261 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1; 297 u32 insn_adj_cnt, insn_rest, insn_delta = len - 1;
298 const u32 cnt_max = S16_MAX;
262 struct bpf_prog *prog_adj; 299 struct bpf_prog *prog_adj;
263 300
264 /* Since our patchlet doesn't expand the image, we're done. */ 301 /* Since our patchlet doesn't expand the image, we're done. */
@@ -269,6 +306,15 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
269 306
270 insn_adj_cnt = prog->len + insn_delta; 307 insn_adj_cnt = prog->len + insn_delta;
271 308
309 /* Reject anything that would potentially let the insn->off
310 * target overflow when we have excessive program expansions.
311 * We need to probe here before we do any reallocation where
312 * we afterwards may not fail anymore.
313 */
314 if (insn_adj_cnt > cnt_max &&
315 bpf_adj_branches(prog, off, insn_delta, true))
316 return NULL;
317
272 /* Several new instructions need to be inserted. Make room 318 /* Several new instructions need to be inserted. Make room
273 * for them. Likely, there's no need for a new allocation as 319 * for them. Likely, there's no need for a new allocation as
274 * last page could have large enough tailroom. 320 * last page could have large enough tailroom.
@@ -294,7 +340,11 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
294 sizeof(*patch) * insn_rest); 340 sizeof(*patch) * insn_rest);
295 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len); 341 memcpy(prog_adj->insnsi + off, patch, sizeof(*patch) * len);
296 342
297 bpf_adj_branches(prog_adj, off, insn_delta); 343 /* We are guaranteed to not fail at this point, otherwise
344 * the ship has sailed to reverse to the original state. An
345 * overflow cannot happen at this point.
346 */
347 BUG_ON(bpf_adj_branches(prog_adj, off, insn_delta, false));
298 348
299 return prog_adj; 349 return prog_adj;
300} 350}