diff options
-rw-r--r-- | include/linux/bpf.h | 4 | ||||
-rw-r--r-- | kernel/bpf/syscall.c | 7 |
2 files changed, 7 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5b9d22338606..3bf3835d0e86 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -656,11 +656,11 @@ void bpf_map_put_with_uref(struct bpf_map *map); | |||
656 | void bpf_map_put(struct bpf_map *map); | 656 | void bpf_map_put(struct bpf_map *map); |
657 | int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); | 657 | int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); |
658 | void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); | 658 | void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); |
659 | int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size); | 659 | int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size); |
660 | void bpf_map_charge_finish(struct bpf_map_memory *mem); | 660 | void bpf_map_charge_finish(struct bpf_map_memory *mem); |
661 | void bpf_map_charge_move(struct bpf_map_memory *dst, | 661 | void bpf_map_charge_move(struct bpf_map_memory *dst, |
662 | struct bpf_map_memory *src); | 662 | struct bpf_map_memory *src); |
663 | void *bpf_map_area_alloc(size_t size, int numa_node); | 663 | void *bpf_map_area_alloc(u64 size, int numa_node); |
664 | void bpf_map_area_free(void *base); | 664 | void bpf_map_area_free(void *base); |
665 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); | 665 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
666 | 666 | ||
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 0937719b87e2..ace1cfaa24b6 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -126,7 +126,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |||
126 | return map; | 126 | return map; |
127 | } | 127 | } |
128 | 128 | ||
129 | void *bpf_map_area_alloc(size_t size, int numa_node) | 129 | void *bpf_map_area_alloc(u64 size, int numa_node) |
130 | { | 130 | { |
131 | /* We really just want to fail instead of triggering OOM killer | 131 | /* We really just want to fail instead of triggering OOM killer |
132 | * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, | 132 | * under memory pressure, therefore we set __GFP_NORETRY to kmalloc, |
@@ -141,6 +141,9 @@ void *bpf_map_area_alloc(size_t size, int numa_node) | |||
141 | const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; | 141 | const gfp_t flags = __GFP_NOWARN | __GFP_ZERO; |
142 | void *area; | 142 | void *area; |
143 | 143 | ||
144 | if (size >= SIZE_MAX) | ||
145 | return NULL; | ||
146 | |||
144 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | 147 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
145 | area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, | 148 | area = kmalloc_node(size, GFP_USER | __GFP_NORETRY | flags, |
146 | numa_node); | 149 | numa_node); |
@@ -197,7 +200,7 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) | |||
197 | atomic_long_sub(pages, &user->locked_vm); | 200 | atomic_long_sub(pages, &user->locked_vm); |
198 | } | 201 | } |
199 | 202 | ||
200 | int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size) | 203 | int bpf_map_charge_init(struct bpf_map_memory *mem, u64 size) |
201 | { | 204 | { |
202 | u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; | 205 | u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; |
203 | struct user_struct *user; | 206 | struct user_struct *user; |