aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2014-09-26 03:17:04 -0400
committerDavid S. Miller <davem@davemloft.net>2014-09-26 15:05:15 -0400
commit0246e64d9a5fcd4805198de59b9b5cf1f974eb41 (patch)
tree3a7b78d9f3dcfb255d0ac9ce5350cef1e195eddf /kernel/bpf
parentcbd357008604925355ae7b54a09137dabb81b580 (diff)
bpf: handle pseudo BPF_LD_IMM64 insn
eBPF programs passed from userspace are using pseudo BPF_LD_IMM64 instructions to refer to process-local map_fd. Scan the program for such instructions and if FDs are valid, convert them to 'struct bpf_map' pointers which will be used by verifier to check access to maps in bpf_map_lookup/update() calls. If program passes verifier, convert pseudo BPF_LD_IMM64 into generic by dropping BPF_PSEUDO_MAP_FD flag. Note that eBPF interpreter is generic and knows nothing about pseudo insns. Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/verifier.c147
1 files changed, 147 insertions, 0 deletions
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 871edc1f2e1f..7227543e474b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -125,10 +125,15 @@
125 * are set to NOT_INIT to indicate that they are no longer readable. 125 * are set to NOT_INIT to indicate that they are no longer readable.
126 */ 126 */
127 127
128#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
129
128/* single container for all structs 130/* single container for all structs
129 * one verifier_env per bpf_check() call 131 * one verifier_env per bpf_check() call
130 */ 132 */
131struct verifier_env { 133struct verifier_env {
134 struct bpf_prog *prog; /* eBPF program being verified */
135 struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */
136 u32 used_map_cnt; /* number of used maps */
132}; 137};
133 138
134/* verbose verifier prints what it's seeing 139/* verbose verifier prints what it's seeing
@@ -300,6 +305,115 @@ static void print_bpf_insn(struct bpf_insn *insn)
300 } 305 }
301} 306}
302 307
308/* return the map pointer stored inside BPF_LD_IMM64 instruction */
309static struct bpf_map *ld_imm64_to_map_ptr(struct bpf_insn *insn)
310{
311 u64 imm64 = ((u64) (u32) insn[0].imm) | ((u64) (u32) insn[1].imm) << 32;
312
313 return (struct bpf_map *) (unsigned long) imm64;
314}
315
316/* look for pseudo eBPF instructions that access map FDs and
317 * replace them with actual map pointers
318 */
319static int replace_map_fd_with_map_ptr(struct verifier_env *env)
320{
321 struct bpf_insn *insn = env->prog->insnsi;
322 int insn_cnt = env->prog->len;
323 int i, j;
324
325 for (i = 0; i < insn_cnt; i++, insn++) {
326 if (insn[0].code == (BPF_LD | BPF_IMM | BPF_DW)) {
327 struct bpf_map *map;
328 struct fd f;
329
330 if (i == insn_cnt - 1 || insn[1].code != 0 ||
331 insn[1].dst_reg != 0 || insn[1].src_reg != 0 ||
332 insn[1].off != 0) {
333 verbose("invalid bpf_ld_imm64 insn\n");
334 return -EINVAL;
335 }
336
337 if (insn->src_reg == 0)
338 /* valid generic load 64-bit imm */
339 goto next_insn;
340
341 if (insn->src_reg != BPF_PSEUDO_MAP_FD) {
342 verbose("unrecognized bpf_ld_imm64 insn\n");
343 return -EINVAL;
344 }
345
346 f = fdget(insn->imm);
347
348 map = bpf_map_get(f);
349 if (IS_ERR(map)) {
350 verbose("fd %d is not pointing to valid bpf_map\n",
351 insn->imm);
352 fdput(f);
353 return PTR_ERR(map);
354 }
355
356 /* store map pointer inside BPF_LD_IMM64 instruction */
357 insn[0].imm = (u32) (unsigned long) map;
358 insn[1].imm = ((u64) (unsigned long) map) >> 32;
359
360 /* check whether we recorded this map already */
361 for (j = 0; j < env->used_map_cnt; j++)
362 if (env->used_maps[j] == map) {
363 fdput(f);
364 goto next_insn;
365 }
366
367 if (env->used_map_cnt >= MAX_USED_MAPS) {
368 fdput(f);
369 return -E2BIG;
370 }
371
372 /* remember this map */
373 env->used_maps[env->used_map_cnt++] = map;
374
375 /* hold the map. If the program is rejected by verifier,
376 * the map will be released by release_maps() or it
377 * will be used by the valid program until it's unloaded
378 * and all maps are released in free_bpf_prog_info()
379 */
380 atomic_inc(&map->refcnt);
381
382 fdput(f);
383next_insn:
384 insn++;
385 i++;
386 }
387 }
388
389 /* now all pseudo BPF_LD_IMM64 instructions load valid
390 * 'struct bpf_map *' into a register instead of user map_fd.
391 * These pointers will be used later by verifier to validate map access.
392 */
393 return 0;
394}
395
396/* drop refcnt of maps used by the rejected program */
397static void release_maps(struct verifier_env *env)
398{
399 int i;
400
401 for (i = 0; i < env->used_map_cnt; i++)
402 bpf_map_put(env->used_maps[i]);
403}
404
405/* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */
406static void convert_pseudo_ld_imm64(struct verifier_env *env)
407{
408 struct bpf_insn *insn = env->prog->insnsi;
409 int insn_cnt = env->prog->len;
410 int i;
411
412 for (i = 0; i < insn_cnt; i++, insn++)
413 if (insn->code == (BPF_LD | BPF_IMM | BPF_DW))
414 insn->src_reg = 0;
415}
416
303int bpf_check(struct bpf_prog *prog, union bpf_attr *attr) 417int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
304{ 418{
305 char __user *log_ubuf = NULL; 419 char __user *log_ubuf = NULL;
@@ -316,6 +430,8 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
316 if (!env) 430 if (!env)
317 return -ENOMEM; 431 return -ENOMEM;
318 432
433 env->prog = prog;
434
319 /* grab the mutex to protect few globals used by verifier */ 435 /* grab the mutex to protect few globals used by verifier */
320 mutex_lock(&bpf_verifier_lock); 436 mutex_lock(&bpf_verifier_lock);
321 437
@@ -342,8 +458,14 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
342 log_level = 0; 458 log_level = 0;
343 } 459 }
344 460
461 ret = replace_map_fd_with_map_ptr(env);
462 if (ret < 0)
463 goto skip_full_check;
464
345 /* ret = do_check(env); */ 465 /* ret = do_check(env); */
346 466
467skip_full_check:
468
347 if (log_level && log_len >= log_size - 1) { 469 if (log_level && log_len >= log_size - 1) {
348 BUG_ON(log_len >= log_size); 470 BUG_ON(log_len >= log_size);
349 /* verifier log exceeded user supplied buffer */ 471 /* verifier log exceeded user supplied buffer */
@@ -357,11 +479,36 @@ int bpf_check(struct bpf_prog *prog, union bpf_attr *attr)
357 goto free_log_buf; 479 goto free_log_buf;
358 } 480 }
359 481
482 if (ret == 0 && env->used_map_cnt) {
483 /* if program passed verifier, update used_maps in bpf_prog_info */
484 prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
485 sizeof(env->used_maps[0]),
486 GFP_KERNEL);
487
488 if (!prog->aux->used_maps) {
489 ret = -ENOMEM;
490 goto free_log_buf;
491 }
492
493 memcpy(prog->aux->used_maps, env->used_maps,
494 sizeof(env->used_maps[0]) * env->used_map_cnt);
495 prog->aux->used_map_cnt = env->used_map_cnt;
496
497 /* program is valid. Convert pseudo bpf_ld_imm64 into generic
498 * bpf_ld_imm64 instructions
499 */
500 convert_pseudo_ld_imm64(env);
501 }
360 502
361free_log_buf: 503free_log_buf:
362 if (log_level) 504 if (log_level)
363 vfree(log_buf); 505 vfree(log_buf);
364free_env: 506free_env:
507 if (!prog->aux->used_maps)
508 /* if we didn't copy map pointers into bpf_prog_info, release
509 * them now. Otherwise free_bpf_prog_info() will release them.
510 */
511 release_maps(env);
365 kfree(env); 512 kfree(env);
366 mutex_unlock(&bpf_verifier_lock); 513 mutex_unlock(&bpf_verifier_lock);
367 return ret; 514 return ret;