aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorBrian Norris <computersforpeace@gmail.com>2017-05-15 14:19:19 -0400
committerBrian Norris <computersforpeace@gmail.com>2017-05-15 14:19:19 -0400
commitc316cf670491def52a396d3bdc5a63ad01f7fefa (patch)
treebf22299ce777088d190b532629b1bd647d28fab6 /kernel/bpf/syscall.c
parent6c51a52eeb58befd2e9be2ed7ee2c4c04139b336 (diff)
parent2ea659a9ef488125eb46da6eb571de5eae5c43f6 (diff)
Merge 'v4.12-rc1' into MTD
Bring a few queued patches in sync for -next development.
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c181
1 files changed, 78 insertions, 103 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 7af0dcc5d755..fd2411fd6914 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -27,30 +27,29 @@ DEFINE_PER_CPU(int, bpf_prog_active);
27 27
28int sysctl_unprivileged_bpf_disabled __read_mostly; 28int sysctl_unprivileged_bpf_disabled __read_mostly;
29 29
30static LIST_HEAD(bpf_map_types); 30static const struct bpf_map_ops * const bpf_map_types[] = {
31#define BPF_PROG_TYPE(_id, _ops)
32#define BPF_MAP_TYPE(_id, _ops) \
33 [_id] = &_ops,
34#include <linux/bpf_types.h>
35#undef BPF_PROG_TYPE
36#undef BPF_MAP_TYPE
37};
31 38
32static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) 39static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
33{ 40{
34 struct bpf_map_type_list *tl;
35 struct bpf_map *map; 41 struct bpf_map *map;
36 42
37 list_for_each_entry(tl, &bpf_map_types, list_node) { 43 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
38 if (tl->type == attr->map_type) { 44 !bpf_map_types[attr->map_type])
39 map = tl->ops->map_alloc(attr); 45 return ERR_PTR(-EINVAL);
40 if (IS_ERR(map))
41 return map;
42 map->ops = tl->ops;
43 map->map_type = attr->map_type;
44 return map;
45 }
46 }
47 return ERR_PTR(-EINVAL);
48}
49 46
50/* boot time registration of different map implementations */ 47 map = bpf_map_types[attr->map_type]->map_alloc(attr);
51void bpf_register_map_type(struct bpf_map_type_list *tl) 48 if (IS_ERR(map))
52{ 49 return map;
53 list_add(&tl->list_node, &bpf_map_types); 50 map->ops = bpf_map_types[attr->map_type];
51 map->map_type = attr->map_type;
52 return map;
54} 53}
55 54
56void *bpf_map_area_alloc(size_t size) 55void *bpf_map_area_alloc(size_t size)
@@ -68,8 +67,7 @@ void *bpf_map_area_alloc(size_t size)
68 return area; 67 return area;
69 } 68 }
70 69
71 return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags, 70 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
72 PAGE_KERNEL);
73} 71}
74 72
75void bpf_map_area_free(void *area) 73void bpf_map_area_free(void *area)
@@ -215,7 +213,7 @@ int bpf_map_new_fd(struct bpf_map *map)
215 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ 213 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
216 sizeof(attr->CMD##_LAST_FIELD)) != NULL 214 sizeof(attr->CMD##_LAST_FIELD)) != NULL
217 215
218#define BPF_MAP_CREATE_LAST_FIELD map_flags 216#define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
219/* called via syscall */ 217/* called via syscall */
220static int map_create(union bpf_attr *attr) 218static int map_create(union bpf_attr *attr)
221{ 219{
@@ -352,6 +350,9 @@ static int map_lookup_elem(union bpf_attr *attr)
352 err = bpf_percpu_array_copy(map, key, value); 350 err = bpf_percpu_array_copy(map, key, value);
353 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { 351 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
354 err = bpf_stackmap_copy(map, key, value); 352 err = bpf_stackmap_copy(map, key, value);
353 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
354 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
355 err = -ENOTSUPP;
355 } else { 356 } else {
356 rcu_read_lock(); 357 rcu_read_lock();
357 ptr = map->ops->map_lookup_elem(map, key); 358 ptr = map->ops->map_lookup_elem(map, key);
@@ -438,11 +439,17 @@ static int map_update_elem(union bpf_attr *attr)
438 err = bpf_percpu_array_update(map, key, value, attr->flags); 439 err = bpf_percpu_array_update(map, key, value, attr->flags);
439 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || 440 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
440 map->map_type == BPF_MAP_TYPE_PROG_ARRAY || 441 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
441 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY) { 442 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
443 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
442 rcu_read_lock(); 444 rcu_read_lock();
443 err = bpf_fd_array_map_update_elem(map, f.file, key, value, 445 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
444 attr->flags); 446 attr->flags);
445 rcu_read_unlock(); 447 rcu_read_unlock();
448 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
449 rcu_read_lock();
450 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
451 attr->flags);
452 rcu_read_unlock();
446 } else { 453 } else {
447 rcu_read_lock(); 454 rcu_read_lock();
448 err = map->ops->map_update_elem(map, key, value, attr->flags); 455 err = map->ops->map_update_elem(map, key, value, attr->flags);
@@ -528,14 +535,18 @@ static int map_get_next_key(union bpf_attr *attr)
528 if (IS_ERR(map)) 535 if (IS_ERR(map))
529 return PTR_ERR(map); 536 return PTR_ERR(map);
530 537
531 err = -ENOMEM; 538 if (ukey) {
532 key = kmalloc(map->key_size, GFP_USER); 539 err = -ENOMEM;
533 if (!key) 540 key = kmalloc(map->key_size, GFP_USER);
534 goto err_put; 541 if (!key)
542 goto err_put;
535 543
536 err = -EFAULT; 544 err = -EFAULT;
537 if (copy_from_user(key, ukey, map->key_size) != 0) 545 if (copy_from_user(key, ukey, map->key_size) != 0)
538 goto free_key; 546 goto free_key;
547 } else {
548 key = NULL;
549 }
539 550
540 err = -ENOMEM; 551 err = -ENOMEM;
541 next_key = kmalloc(map->key_size, GFP_USER); 552 next_key = kmalloc(map->key_size, GFP_USER);
@@ -564,79 +575,23 @@ err_put:
564 return err; 575 return err;
565} 576}
566 577
567static LIST_HEAD(bpf_prog_types); 578static const struct bpf_verifier_ops * const bpf_prog_types[] = {
579#define BPF_PROG_TYPE(_id, _ops) \
580 [_id] = &_ops,
581#define BPF_MAP_TYPE(_id, _ops)
582#include <linux/bpf_types.h>
583#undef BPF_PROG_TYPE
584#undef BPF_MAP_TYPE
585};
568 586
569static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog) 587static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
570{ 588{
571 struct bpf_prog_type_list *tl; 589 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
572 590 return -EINVAL;
573 list_for_each_entry(tl, &bpf_prog_types, list_node) {
574 if (tl->type == type) {
575 prog->aux->ops = tl->ops;
576 prog->type = type;
577 return 0;
578 }
579 }
580
581 return -EINVAL;
582}
583
584void bpf_register_prog_type(struct bpf_prog_type_list *tl)
585{
586 list_add(&tl->list_node, &bpf_prog_types);
587}
588
589/* fixup insn->imm field of bpf_call instructions:
590 * if (insn->imm == BPF_FUNC_map_lookup_elem)
591 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
592 * else if (insn->imm == BPF_FUNC_map_update_elem)
593 * insn->imm = bpf_map_update_elem - __bpf_call_base;
594 * else ...
595 *
596 * this function is called after eBPF program passed verification
597 */
598static void fixup_bpf_calls(struct bpf_prog *prog)
599{
600 const struct bpf_func_proto *fn;
601 int i;
602 591
603 for (i = 0; i < prog->len; i++) { 592 prog->aux->ops = bpf_prog_types[type];
604 struct bpf_insn *insn = &prog->insnsi[i]; 593 prog->type = type;
605 594 return 0;
606 if (insn->code == (BPF_JMP | BPF_CALL)) {
607 /* we reach here when program has bpf_call instructions
608 * and it passed bpf_check(), means that
609 * ops->get_func_proto must have been supplied, check it
610 */
611 BUG_ON(!prog->aux->ops->get_func_proto);
612
613 if (insn->imm == BPF_FUNC_get_route_realm)
614 prog->dst_needed = 1;
615 if (insn->imm == BPF_FUNC_get_prandom_u32)
616 bpf_user_rnd_init_once();
617 if (insn->imm == BPF_FUNC_xdp_adjust_head)
618 prog->xdp_adjust_head = 1;
619 if (insn->imm == BPF_FUNC_tail_call) {
620 /* mark bpf_tail_call as different opcode
621 * to avoid conditional branch in
622 * interpeter for every normal call
623 * and to prevent accidental JITing by
624 * JIT compiler that doesn't support
625 * bpf_tail_call yet
626 */
627 insn->imm = 0;
628 insn->code |= BPF_X;
629 continue;
630 }
631
632 fn = prog->aux->ops->get_func_proto(insn->imm);
633 /* all functions that have prototype and verifier allowed
634 * programs to call them, must be real in-kernel functions
635 */
636 BUG_ON(!fn->func);
637 insn->imm = fn->func - __bpf_call_base;
638 }
639 }
640} 595}
641 596
642/* drop refcnt on maps used by eBPF program and free auxilary data */ 597/* drop refcnt on maps used by eBPF program and free auxilary data */
@@ -892,9 +847,6 @@ static int bpf_prog_load(union bpf_attr *attr)
892 if (err < 0) 847 if (err < 0)
893 goto free_used_maps; 848 goto free_used_maps;
894 849
895 /* fixup BPF_CALL->imm field */
896 fixup_bpf_calls(prog);
897
898 /* eBPF program is ready to be JITed */ 850 /* eBPF program is ready to be JITed */
899 prog = bpf_prog_select_runtime(prog, &err); 851 prog = bpf_prog_select_runtime(prog, &err);
900 if (err < 0) 852 if (err < 0)
@@ -1020,6 +972,28 @@ static int bpf_prog_detach(const union bpf_attr *attr)
1020} 972}
1021#endif /* CONFIG_CGROUP_BPF */ 973#endif /* CONFIG_CGROUP_BPF */
1022 974
975#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
976
977static int bpf_prog_test_run(const union bpf_attr *attr,
978 union bpf_attr __user *uattr)
979{
980 struct bpf_prog *prog;
981 int ret = -ENOTSUPP;
982
983 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
984 return -EINVAL;
985
986 prog = bpf_prog_get(attr->test.prog_fd);
987 if (IS_ERR(prog))
988 return PTR_ERR(prog);
989
990 if (prog->aux->ops->test_run)
991 ret = prog->aux->ops->test_run(prog, attr, uattr);
992
993 bpf_prog_put(prog);
994 return ret;
995}
996
1023SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size) 997SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
1024{ 998{
1025 union bpf_attr attr = {}; 999 union bpf_attr attr = {};
@@ -1086,7 +1060,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
1086 case BPF_OBJ_GET: 1060 case BPF_OBJ_GET:
1087 err = bpf_obj_get(&attr); 1061 err = bpf_obj_get(&attr);
1088 break; 1062 break;
1089
1090#ifdef CONFIG_CGROUP_BPF 1063#ifdef CONFIG_CGROUP_BPF
1091 case BPF_PROG_ATTACH: 1064 case BPF_PROG_ATTACH:
1092 err = bpf_prog_attach(&attr); 1065 err = bpf_prog_attach(&attr);
@@ -1095,7 +1068,9 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
1095 err = bpf_prog_detach(&attr); 1068 err = bpf_prog_detach(&attr);
1096 break; 1069 break;
1097#endif 1070#endif
1098 1071 case BPF_PROG_TEST_RUN:
1072 err = bpf_prog_test_run(&attr, uattr);
1073 break;
1099 default: 1074 default:
1100 err = -EINVAL; 1075 err = -EINVAL;
1101 break; 1076 break;