aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@plumgrid.com>2015-10-08 01:23:22 -0400
committerDavid S. Miller <davem@davemloft.net>2015-10-12 22:13:36 -0400
commitaaac3ba95e4c8b496d22f68bd1bc01cfbf525eca (patch)
treefa5df3122c2576bd2df76f1494c88619b22b6f08 /kernel/bpf/syscall.c
parent1be7f75d1668d6296b80bf35dcf6762393530afc (diff)
bpf: charge user for creation of BPF maps and programs
since eBPF programs and maps use kernel memory consider it 'locked' memory from user accounting point of view and charge it against RLIMIT_MEMLOCK limit. This limit is typically set to 64Kbytes by distros, so almost all bpf+tracing programs would need to increase it, since they use maps, but kernel charges maximum map size upfront. For example the hash map of 1024 elements will be charged as 64Kbyte. It's inconvenient for current users and changes current behavior for root, but probably worth doing to be consistent root vs non-root. Similar accounting logic is done by mmap of perf_event. Signed-off-by: Alexei Starovoitov <ast@plumgrid.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c63
1 files changed, 63 insertions, 0 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 83697bc8e574..f640e5f7afbd 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -46,11 +46,38 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
46 list_add(&tl->list_node, &bpf_map_types); 46 list_add(&tl->list_node, &bpf_map_types);
47} 47}
48 48
49static int bpf_map_charge_memlock(struct bpf_map *map)
50{
51 struct user_struct *user = get_current_user();
52 unsigned long memlock_limit;
53
54 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
55
56 atomic_long_add(map->pages, &user->locked_vm);
57
58 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
59 atomic_long_sub(map->pages, &user->locked_vm);
60 free_uid(user);
61 return -EPERM;
62 }
63 map->user = user;
64 return 0;
65}
66
67static void bpf_map_uncharge_memlock(struct bpf_map *map)
68{
69 struct user_struct *user = map->user;
70
71 atomic_long_sub(map->pages, &user->locked_vm);
72 free_uid(user);
73}
74
49/* called from workqueue */ 75/* called from workqueue */
50static void bpf_map_free_deferred(struct work_struct *work) 76static void bpf_map_free_deferred(struct work_struct *work)
51{ 77{
52 struct bpf_map *map = container_of(work, struct bpf_map, work); 78 struct bpf_map *map = container_of(work, struct bpf_map, work);
53 79
80 bpf_map_uncharge_memlock(map);
54 /* implementation dependent freeing */ 81 /* implementation dependent freeing */
55 map->ops->map_free(map); 82 map->ops->map_free(map);
56} 83}
@@ -110,6 +137,10 @@ static int map_create(union bpf_attr *attr)
110 137
111 atomic_set(&map->refcnt, 1); 138 atomic_set(&map->refcnt, 1);
112 139
140 err = bpf_map_charge_memlock(map);
141 if (err)
142 goto free_map;
143
113 err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC); 144 err = anon_inode_getfd("bpf-map", &bpf_map_fops, map, O_RDWR | O_CLOEXEC);
114 145
115 if (err < 0) 146 if (err < 0)
@@ -442,11 +473,37 @@ static void free_used_maps(struct bpf_prog_aux *aux)
442 kfree(aux->used_maps); 473 kfree(aux->used_maps);
443} 474}
444 475
476static int bpf_prog_charge_memlock(struct bpf_prog *prog)
477{
478 struct user_struct *user = get_current_user();
479 unsigned long memlock_limit;
480
481 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
482
483 atomic_long_add(prog->pages, &user->locked_vm);
484 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
485 atomic_long_sub(prog->pages, &user->locked_vm);
486 free_uid(user);
487 return -EPERM;
488 }
489 prog->aux->user = user;
490 return 0;
491}
492
493static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
494{
495 struct user_struct *user = prog->aux->user;
496
497 atomic_long_sub(prog->pages, &user->locked_vm);
498 free_uid(user);
499}
500
445static void __prog_put_rcu(struct rcu_head *rcu) 501static void __prog_put_rcu(struct rcu_head *rcu)
446{ 502{
447 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu); 503 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
448 504
449 free_used_maps(aux); 505 free_used_maps(aux);
506 bpf_prog_uncharge_memlock(aux->prog);
450 bpf_prog_free(aux->prog); 507 bpf_prog_free(aux->prog);
451} 508}
452 509
@@ -554,6 +611,10 @@ static int bpf_prog_load(union bpf_attr *attr)
554 if (!prog) 611 if (!prog)
555 return -ENOMEM; 612 return -ENOMEM;
556 613
614 err = bpf_prog_charge_memlock(prog);
615 if (err)
616 goto free_prog_nouncharge;
617
557 prog->len = attr->insn_cnt; 618 prog->len = attr->insn_cnt;
558 619
559 err = -EFAULT; 620 err = -EFAULT;
@@ -595,6 +656,8 @@ static int bpf_prog_load(union bpf_attr *attr)
595free_used_maps: 656free_used_maps:
596 free_used_maps(prog->aux); 657 free_used_maps(prog->aux);
597free_prog: 658free_prog:
659 bpf_prog_uncharge_memlock(prog);
660free_prog_nouncharge:
598 bpf_prog_free(prog); 661 bpf_prog_free(prog);
599 return err; 662 return err;
600} 663}