aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2016-12-17 19:52:58 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-17 21:27:44 -0500
commit5ccb071e97fbd9ffe623a0d3977cc6d013bee93c (patch)
tree88eefdce0d63cacb938dffcabdf0291e2a450233
parentaafe6ae9cee32df85eb5e8bb6dd1d918e6807b09 (diff)
bpf: fix overflow in prog accounting
Commit aaac3ba95e4c ("bpf: charge user for creation of BPF maps and programs") made a wrong assumption of charging against prog->pages. Unlike map->pages, prog->pages are still subject to change when we need to expand the program through bpf_prog_realloc(). This can for example happen during verification stage when we need to expand and rewrite parts of the program. Should the required space cross a page boundary, then prog->pages is not the same anymore as its original value that we used to bpf_prog_charge_memlock() on. Thus, we'll hit a wrap-around during bpf_prog_uncharge_memlock() when prog is freed eventually. I noticed this that despite having unlimited memlock, programs suddenly refused to load with EPERM error due to insufficient memlock. There are two ways to fix this issue. One would be to add a cached variable to struct bpf_prog that takes a snapshot of prog->pages at the time of charging. The other approach is to also account for resizes. I chose to go with the latter for a couple of reasons: i) We want accounting rather to be more accurate instead of further fooling limits, ii) adding yet another page counter on struct bpf_prog would also be a waste just for this purpose. We also do want to charge as early as possible to avoid going into the verifier just to find out later on that we crossed limits. The only place that needs to be fixed is bpf_prog_realloc(), since only here we expand the program, so we try to account for the needed delta and should we fail, call-sites check for outcome anyway. On cBPF to eBPF migrations, we don't grab a reference to the user as they are charged differently. With that in place, my test case worked fine. Fixes: aaac3ba95e4c ("bpf: charge user for creation of BPF maps and programs") Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/linux/bpf.h11
-rw-r--r--kernel/bpf/core.c16
-rw-r--r--kernel/bpf/syscall.c36
3 files changed, 52 insertions, 11 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 201eb483c46f..f74ae68086dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -238,6 +238,8 @@ struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
238void bpf_prog_sub(struct bpf_prog *prog, int i); 238void bpf_prog_sub(struct bpf_prog *prog, int i);
239struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog); 239struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
240void bpf_prog_put(struct bpf_prog *prog); 240void bpf_prog_put(struct bpf_prog *prog);
241int __bpf_prog_charge(struct user_struct *user, u32 pages);
242void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
241 243
242struct bpf_map *bpf_map_get_with_uref(u32 ufd); 244struct bpf_map *bpf_map_get_with_uref(u32 ufd);
243struct bpf_map *__bpf_map_get(struct fd f); 245struct bpf_map *__bpf_map_get(struct fd f);
@@ -318,6 +320,15 @@ static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
318{ 320{
319 return ERR_PTR(-EOPNOTSUPP); 321 return ERR_PTR(-EOPNOTSUPP);
320} 322}
323
324static inline int __bpf_prog_charge(struct user_struct *user, u32 pages)
325{
326 return 0;
327}
328
329static inline void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
330{
331}
321#endif /* CONFIG_BPF_SYSCALL */ 332#endif /* CONFIG_BPF_SYSCALL */
322 333
323/* verifier prototypes for helper functions called from eBPF programs */ 334/* verifier prototypes for helper functions called from eBPF programs */
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 75c08b8068d6..1eb4f1303756 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -105,19 +105,29 @@ struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size,
105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO | 105 gfp_t gfp_flags = GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO |
106 gfp_extra_flags; 106 gfp_extra_flags;
107 struct bpf_prog *fp; 107 struct bpf_prog *fp;
108 u32 pages, delta;
109 int ret;
108 110
109 BUG_ON(fp_old == NULL); 111 BUG_ON(fp_old == NULL);
110 112
111 size = round_up(size, PAGE_SIZE); 113 size = round_up(size, PAGE_SIZE);
112 if (size <= fp_old->pages * PAGE_SIZE) 114 pages = size / PAGE_SIZE;
115 if (pages <= fp_old->pages)
113 return fp_old; 116 return fp_old;
114 117
118 delta = pages - fp_old->pages;
119 ret = __bpf_prog_charge(fp_old->aux->user, delta);
120 if (ret)
121 return NULL;
122
115 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL); 123 fp = __vmalloc(size, gfp_flags, PAGE_KERNEL);
116 if (fp != NULL) { 124 if (fp == NULL) {
125 __bpf_prog_uncharge(fp_old->aux->user, delta);
126 } else {
117 kmemcheck_annotate_bitfield(fp, meta); 127 kmemcheck_annotate_bitfield(fp, meta);
118 128
119 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); 129 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE);
120 fp->pages = size / PAGE_SIZE; 130 fp->pages = pages;
121 fp->aux->prog = fp; 131 fp->aux->prog = fp;
122 132
123 /* We keep fp->aux from fp_old around in the new 133 /* We keep fp->aux from fp_old around in the new
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 35d674c1f12e..e89acea22ecf 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -615,19 +615,39 @@ static void free_used_maps(struct bpf_prog_aux *aux)
615 kfree(aux->used_maps); 615 kfree(aux->used_maps);
616} 616}
617 617
618int __bpf_prog_charge(struct user_struct *user, u32 pages)
619{
620 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
621 unsigned long user_bufs;
622
623 if (user) {
624 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
625 if (user_bufs > memlock_limit) {
626 atomic_long_sub(pages, &user->locked_vm);
627 return -EPERM;
628 }
629 }
630
631 return 0;
632}
633
634void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
635{
636 if (user)
637 atomic_long_sub(pages, &user->locked_vm);
638}
639
618static int bpf_prog_charge_memlock(struct bpf_prog *prog) 640static int bpf_prog_charge_memlock(struct bpf_prog *prog)
619{ 641{
620 struct user_struct *user = get_current_user(); 642 struct user_struct *user = get_current_user();
621 unsigned long memlock_limit; 643 int ret;
622
623 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
624 644
625 atomic_long_add(prog->pages, &user->locked_vm); 645 ret = __bpf_prog_charge(user, prog->pages);
626 if (atomic_long_read(&user->locked_vm) > memlock_limit) { 646 if (ret) {
627 atomic_long_sub(prog->pages, &user->locked_vm);
628 free_uid(user); 647 free_uid(user);
629 return -EPERM; 648 return ret;
630 } 649 }
650
631 prog->aux->user = user; 651 prog->aux->user = user;
632 return 0; 652 return 0;
633} 653}
@@ -636,7 +656,7 @@ static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
636{ 656{
637 struct user_struct *user = prog->aux->user; 657 struct user_struct *user = prog->aux->user;
638 658
639 atomic_long_sub(prog->pages, &user->locked_vm); 659 __bpf_prog_uncharge(user, prog->pages);
640 free_uid(user); 660 free_uid(user);
641} 661}
642 662