summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf.h2
-rw-r--r--kernel/bpf/arraymap.c8
-rw-r--r--kernel/bpf/cpumap.c5
-rw-r--r--kernel/bpf/devmap.c5
-rw-r--r--kernel/bpf/hashtab.c7
-rw-r--r--kernel/bpf/local_storage.c5
-rw-r--r--kernel/bpf/lpm_trie.c7
-rw-r--r--kernel/bpf/queue_stack_maps.c4
-rw-r--r--kernel/bpf/reuseport_array.c10
-rw-r--r--kernel/bpf/stackmap.c8
-rw-r--r--kernel/bpf/syscall.c9
-rw-r--r--kernel/bpf/xskmap.c5
-rw-r--r--net/core/bpf_sk_storage.c4
-rw-r--r--net/core/sock_map.c8
14 files changed, 20 insertions, 67 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3c8f24f402bf..e5a309e6a400 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -652,7 +652,7 @@ void bpf_map_put_with_uref(struct bpf_map *map);
652void bpf_map_put(struct bpf_map *map); 652void bpf_map_put(struct bpf_map *map);
653int bpf_map_charge_memlock(struct bpf_map *map, u32 pages); 653int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
654void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages); 654void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
655int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages); 655int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size);
656void bpf_map_charge_finish(struct bpf_map_memory *mem); 656void bpf_map_charge_finish(struct bpf_map_memory *mem);
657void bpf_map_charge_move(struct bpf_map_memory *dst, 657void bpf_map_charge_move(struct bpf_map_memory *dst,
658 struct bpf_map_memory *src); 658 struct bpf_map_memory *src);
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 3552da4407d9..0349cbf23cdb 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -117,14 +117,8 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
117 117
118 /* make sure there is no u32 overflow later in round_up() */ 118 /* make sure there is no u32 overflow later in round_up() */
119 cost = array_size; 119 cost = array_size;
120 if (cost >= U32_MAX - PAGE_SIZE) 120 if (percpu)
121 return ERR_PTR(-ENOMEM);
122 if (percpu) {
123 cost += (u64)attr->max_entries * elem_size * num_possible_cpus(); 121 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
124 if (cost >= U32_MAX - PAGE_SIZE)
125 return ERR_PTR(-ENOMEM);
126 }
127 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
128 122
129 ret = bpf_map_charge_init(&mem, cost); 123 ret = bpf_map_charge_init(&mem, cost);
130 if (ret < 0) 124 if (ret < 0)
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index c633c8d68023..b31a71909307 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -106,12 +106,9 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
106 /* make sure page count doesn't overflow */ 106 /* make sure page count doesn't overflow */
107 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *); 107 cost = (u64) cmap->map.max_entries * sizeof(struct bpf_cpu_map_entry *);
108 cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); 108 cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
109 if (cost >= U32_MAX - PAGE_SIZE)
110 goto free_cmap;
111 109
112 /* Notice returns -EPERM on if map size is larger than memlock limit */ 110 /* Notice returns -EPERM on if map size is larger than memlock limit */
113 ret = bpf_map_charge_init(&cmap->map.memory, 111 ret = bpf_map_charge_init(&cmap->map.memory, cost);
114 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
115 if (ret) { 112 if (ret) {
116 err = ret; 113 err = ret;
117 goto free_cmap; 114 goto free_cmap;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 371bd880ed58..5ae7cce5ef16 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -108,12 +108,9 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
108 /* make sure page count doesn't overflow */ 108 /* make sure page count doesn't overflow */
109 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 109 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
110 cost += dev_map_bitmap_size(attr) * num_possible_cpus(); 110 cost += dev_map_bitmap_size(attr) * num_possible_cpus();
111 if (cost >= U32_MAX - PAGE_SIZE)
112 goto free_dtab;
113 111
114 /* if map size is larger than memlock limit, reject it */ 112 /* if map size is larger than memlock limit, reject it */
115 err = bpf_map_charge_init(&dtab->map.memory, 113 err = bpf_map_charge_init(&dtab->map.memory, cost);
116 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
117 if (err) 114 if (err)
118 goto free_dtab; 115 goto free_dtab;
119 116
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index b0bdc7b040ad..d92e05d9979b 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -360,13 +360,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
360 else 360 else
361 cost += (u64) htab->elem_size * num_possible_cpus(); 361 cost += (u64) htab->elem_size * num_possible_cpus();
362 362
363 if (cost >= U32_MAX - PAGE_SIZE)
364 /* make sure page count doesn't overflow */
365 goto free_htab;
366
367 /* if map size is larger than memlock limit, reject it */ 363 /* if map size is larger than memlock limit, reject it */
368 err = bpf_map_charge_init(&htab->map.memory, 364 err = bpf_map_charge_init(&htab->map.memory, cost);
369 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
370 if (err) 365 if (err)
371 goto free_htab; 366 goto free_htab;
372 367
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index e49bfd4f4f6d..addd6fdceec8 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -273,7 +273,6 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
273 int numa_node = bpf_map_attr_numa_node(attr); 273 int numa_node = bpf_map_attr_numa_node(attr);
274 struct bpf_cgroup_storage_map *map; 274 struct bpf_cgroup_storage_map *map;
275 struct bpf_map_memory mem; 275 struct bpf_map_memory mem;
276 u32 pages;
277 int ret; 276 int ret;
278 277
279 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key)) 278 if (attr->key_size != sizeof(struct bpf_cgroup_storage_key))
@@ -293,9 +292,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
293 /* max_entries is not used and enforced to be 0 */ 292 /* max_entries is not used and enforced to be 0 */
294 return ERR_PTR(-EINVAL); 293 return ERR_PTR(-EINVAL);
295 294
296 pages = round_up(sizeof(struct bpf_cgroup_storage_map), PAGE_SIZE) >> 295 ret = bpf_map_charge_init(&mem, sizeof(struct bpf_cgroup_storage_map));
297 PAGE_SHIFT;
298 ret = bpf_map_charge_init(&mem, pages);
299 if (ret < 0) 296 if (ret < 0)
300 return ERR_PTR(ret); 297 return ERR_PTR(ret);
301 298
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 6345a8d2dcd0..09334f13a8a0 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -573,13 +573,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
573 cost_per_node = sizeof(struct lpm_trie_node) + 573 cost_per_node = sizeof(struct lpm_trie_node) +
574 attr->value_size + trie->data_size; 574 attr->value_size + trie->data_size;
575 cost += (u64) attr->max_entries * cost_per_node; 575 cost += (u64) attr->max_entries * cost_per_node;
576 if (cost >= U32_MAX - PAGE_SIZE) {
577 ret = -E2BIG;
578 goto out_err;
579 }
580 576
581 ret = bpf_map_charge_init(&trie->map.memory, 577 ret = bpf_map_charge_init(&trie->map.memory, cost);
582 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
583 if (ret) 578 if (ret)
584 goto out_err; 579 goto out_err;
585 580
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 224cb0fd8f03..f697647ceb54 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -73,10 +73,6 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
73 73
74 size = (u64) attr->max_entries + 1; 74 size = (u64) attr->max_entries + 1;
75 cost = queue_size = sizeof(*qs) + size * attr->value_size; 75 cost = queue_size = sizeof(*qs) + size * attr->value_size;
76 if (cost >= U32_MAX - PAGE_SIZE)
77 return ERR_PTR(-E2BIG);
78
79 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
80 76
81 ret = bpf_map_charge_init(&mem, cost); 77 ret = bpf_map_charge_init(&mem, cost);
82 if (ret < 0) 78 if (ret < 0)
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 5c6e25b1b9b1..50c083ba978c 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -152,7 +152,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
152 int err, numa_node = bpf_map_attr_numa_node(attr); 152 int err, numa_node = bpf_map_attr_numa_node(attr);
153 struct reuseport_array *array; 153 struct reuseport_array *array;
154 struct bpf_map_memory mem; 154 struct bpf_map_memory mem;
155 u64 cost, array_size; 155 u64 array_size;
156 156
157 if (!capable(CAP_SYS_ADMIN)) 157 if (!capable(CAP_SYS_ADMIN))
158 return ERR_PTR(-EPERM); 158 return ERR_PTR(-EPERM);
@@ -160,13 +160,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
160 array_size = sizeof(*array); 160 array_size = sizeof(*array);
161 array_size += (u64)attr->max_entries * sizeof(struct sock *); 161 array_size += (u64)attr->max_entries * sizeof(struct sock *);
162 162
163 /* make sure there is no u32 overflow later in round_up() */ 163 err = bpf_map_charge_init(&mem, array_size);
164 cost = array_size;
165 if (cost >= U32_MAX - PAGE_SIZE)
166 return ERR_PTR(-ENOMEM);
167 cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
168
169 err = bpf_map_charge_init(&mem, cost);
170 if (err) 164 if (err)
171 return ERR_PTR(err); 165 return ERR_PTR(err);
172 166
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 8da24ca65d97..3d86072d8e32 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -117,14 +117,8 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
117 n_buckets = roundup_pow_of_two(attr->max_entries); 117 n_buckets = roundup_pow_of_two(attr->max_entries);
118 118
119 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap); 119 cost = n_buckets * sizeof(struct stack_map_bucket *) + sizeof(*smap);
120 if (cost >= U32_MAX - PAGE_SIZE)
121 return ERR_PTR(-E2BIG);
122 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket)); 120 cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
123 if (cost >= U32_MAX - PAGE_SIZE) 121 err = bpf_map_charge_init(&mem, cost);
124 return ERR_PTR(-E2BIG);
125
126 err = bpf_map_charge_init(&mem,
127 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
128 if (err) 122 if (err)
129 return ERR_PTR(err); 123 return ERR_PTR(err);
130 124
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 4a5ebad99154..4c53cbd3329d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -205,11 +205,16 @@ static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
205 atomic_long_sub(pages, &user->locked_vm); 205 atomic_long_sub(pages, &user->locked_vm);
206} 206}
207 207
208int bpf_map_charge_init(struct bpf_map_memory *mem, u32 pages) 208int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
209{ 209{
210 struct user_struct *user = get_current_user(); 210 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
211 struct user_struct *user;
211 int ret; 212 int ret;
212 213
214 if (size >= U32_MAX - PAGE_SIZE)
215 return -E2BIG;
216
217 user = get_current_user();
213 ret = bpf_charge_memlock(user, pages); 218 ret = bpf_charge_memlock(user, pages);
214 if (ret) { 219 if (ret) {
215 free_uid(user); 220 free_uid(user);
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index a329dab7c7a4..22066c28ba61 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -37,12 +37,9 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
37 37
38 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *); 38 cost = (u64)m->map.max_entries * sizeof(struct xdp_sock *);
39 cost += sizeof(struct list_head) * num_possible_cpus(); 39 cost += sizeof(struct list_head) * num_possible_cpus();
40 if (cost >= U32_MAX - PAGE_SIZE)
41 goto free_m;
42 40
43 /* Notice returns -EPERM on if map size is larger than memlock limit */ 41 /* Notice returns -EPERM on if map size is larger than memlock limit */
44 err = bpf_map_charge_init(&m->map.memory, 42 err = bpf_map_charge_init(&m->map.memory, cost);
45 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
46 if (err) 43 if (err)
47 goto free_m; 44 goto free_m;
48 45
diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c
index 621a0b07ff11..f40e3d35fd9c 100644
--- a/net/core/bpf_sk_storage.c
+++ b/net/core/bpf_sk_storage.c
@@ -626,7 +626,6 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
626 struct bpf_sk_storage_map *smap; 626 struct bpf_sk_storage_map *smap;
627 unsigned int i; 627 unsigned int i;
628 u32 nbuckets; 628 u32 nbuckets;
629 u32 pages;
630 u64 cost; 629 u64 cost;
631 int ret; 630 int ret;
632 631
@@ -638,9 +637,8 @@ static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
638 smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus())); 637 smap->bucket_log = ilog2(roundup_pow_of_two(num_possible_cpus()));
639 nbuckets = 1U << smap->bucket_log; 638 nbuckets = 1U << smap->bucket_log;
640 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap); 639 cost = sizeof(*smap->buckets) * nbuckets + sizeof(*smap);
641 pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
642 640
643 ret = bpf_map_charge_init(&smap->map.memory, pages); 641 ret = bpf_map_charge_init(&smap->map.memory, cost);
644 if (ret < 0) { 642 if (ret < 0) {
645 kfree(smap); 643 kfree(smap);
646 return ERR_PTR(ret); 644 return ERR_PTR(ret);
diff --git a/net/core/sock_map.c b/net/core/sock_map.c
index 1028c922a149..52d4faeee18b 100644
--- a/net/core/sock_map.c
+++ b/net/core/sock_map.c
@@ -44,13 +44,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
44 44
45 /* Make sure page count doesn't overflow. */ 45 /* Make sure page count doesn't overflow. */
46 cost = (u64) stab->map.max_entries * sizeof(struct sock *); 46 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
47 if (cost >= U32_MAX - PAGE_SIZE) { 47 err = bpf_map_charge_init(&stab->map.memory, cost);
48 err = -EINVAL;
49 goto free_stab;
50 }
51
52 err = bpf_map_charge_init(&stab->map.memory,
53 round_up(cost, PAGE_SIZE) >> PAGE_SHIFT);
54 if (err) 48 if (err)
55 goto free_stab; 49 goto free_stab;
56 50