diff options
author | Björn Töpel <bjorn.topel@intel.com> | 2019-10-29 11:43:07 -0400 |
---|---|---|
committer | Daniel Borkmann <daniel@iogearbox.net> | 2019-10-31 16:41:33 -0400 |
commit | ff1c08e1f74b6864854c39be48aa799a6a2e4d2b (patch) | |
tree | 7d4474d2002747c76fcc5b312d60bbd59edd59f5 | |
parent | 04ec044b7d30800296824783df7d9728d16d7567 (diff) |
bpf: Change size to u64 for bpf_map_{area_alloc, charge_init}()
The functions bpf_map_area_alloc() and bpf_map_charge_init() prior
this commit passed the size parameter as size_t. In this commit this
is changed to u64.
All users of these functions avoid size_t overflows on 32-bit systems,
by explicitly using u64 when calculating the allocation size and
memory charge cost. However, since the result was narrowed by the
size_t when passing size and cost to the functions, the overflow
handling was in vain.
Instead of changing all call sites to size_t and handle overflow at
the call site, the parameter is changed to u64 and checked in the
functions above.
Fixes: d407bd25a204 ("bpf: don't trigger OOM killer under pressure with map alloc")
Fixes: c85d69135a91 ("bpf: move memory size checks to bpf_map_charge_init()")
Signed-off-by: Björn Töpel <bjorn.topel@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
Link: https://lore.kernel.org/bpf/20191029154307.23053-1-bjorn.topel@gmail.com
-rw-r--r-- | include/linux/bpf.h | 4 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 7 |
2 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5b9d22338606..3bf3835d0e86 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map); | |||
656 | void bpf_map_put(struct bpf_map *map); | 656 | void bpf_map_put(struct bpf_map *map); |
657 | int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); | 657 | int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); |
658 | void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); | 658 | void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); |
659 | int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); | 659 | int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); |
660 | void bpf_map_charge_finish(struct bpf_map_memory *mem); | 660 | void bpf_map_charge_finish(struct bpf_map_memory *mem); |
661 | void bpf_map_charge_move(struct bpf_map_memory *dst, | 661 | void bpf_map_charge_move(struct bpf_map_memory *dst, |
662 | struct bpf_map_memory *src); | 662 | struct bpf_map_memory *src); |
663 | void *bpf_map_area_alloc(size_t size, int numa_node); | 663 | void *bpf_map_area_alloc(u64 size, int numa_node); |
664 | void bpf_map_area_free(void *base); | 664 | void bpf_map_area_free(void *base); |
665 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); | 665 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
666 | 666 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0937719b87e2..ace1cfaa24b6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |||
126 | return map; | 126 | return map; |
127 | } | 127 | } |
128 | 128 | ||
129 | void *bpf_map_area_alloc(size_t size, int numa_node) | 129 | void *bpf_map_area_alloc(u64 size, int numa_node) |
130 | { | 130 | { |
131 | /* We really just want to fail instead of triggering OOM killer | 131 | /* We really just want to fail instead of triggering OOM killer |
132 | * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, | 132 | * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, |
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node) | |||
141 | const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; | 141 | const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; |
142 | void *area; | 142 | void *area; |
143 | 143 | ||
144 | if (size >= SIZE_MAX) | ||
145 | return NULL; | ||
146 | |||
144 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | 147 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
145 | area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, | 148 | area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, |
146 | numa_node); | 149 | numa_node); |
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) | |||
197 | atomic_long_sub(pages, &user->locked_vm); | 200 | atomic_long_sub(pages, &user->locked_vm); |
198 | } | 201 | } |
199 | 202 | ||
200 | int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) | 203 | int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) |
201 | { | 204 | { |
202 | u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; | 205 | u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; |
203 | struct user_struct *user; | 206 | struct user_struct *user; |