aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c103
1 files changed, 60 insertions, 43 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cb5440b02e82..4c53cbd3329d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -188,19 +188,6 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
188 map->numa_node = bpf_map_attr_numa_node(attr); 188 map->numa_node = bpf_map_attr_numa_node(attr);
189} 189}
190 190
191int bpf_map_precharge_memlock(u32 pages)
192{
193 struct user_struct *user = get_current_user();
194 unsigned long memlock_limit, cur;
195
196 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
197 cur = atomic_long_read(&user->locked_vm);
198 free_uid(user);
199 if (cur + pages > memlock_limit)
200 return -EPERM;
201 return 0;
202}
203
204static int bpf_charge_memlock(struct user_struct *user, u32 pages) 191static int bpf_charge_memlock(struct user_struct *user, u32 pages)
205{ 192{
206 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 193 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -214,45 +201,62 @@ static int bpf_charge_memlock(struct user_struct *user, u32 pages)
214 201
215static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 202static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
216{ 203{
217 atomic_long_sub(pages, &user->locked_vm); 204 if (user)
205 atomic_long_sub(pages, &user->locked_vm);
218} 206}
219 207
220static int bpf_map_init_memlock(struct bpf_map *map) 208int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
221{ 209{
222 struct user_struct *user = get_current_user(); 210 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
211 struct user_struct *user;
223 int ret; 212 int ret;
224 213
225 ret = bpf_charge_memlock(user, map->pages); 214 if (size >= U32_MAX - PAGE_SIZE)
215 return -E2BIG;
216
217 user = get_current_user();
218 ret = bpf_charge_memlock(user, pages);
226 if (ret) { 219 if (ret) {
227 free_uid(user); 220 free_uid(user);
228 return ret; 221 return ret;
229 } 222 }
230 map->user = user; 223
231 return ret; 224 mem->pages = pages;
225 mem->user = user;
226
227 return 0;
232} 228}
233 229
234static void bpf_map_release_memlock(struct bpf_map *map) 230void bpf_map_charge_finish(struct bpf_map_memory *mem)
235{ 231{
236 struct user_struct *user = map->user; 232 bpf_uncharge_memlock(mem->user, mem->pages);
237 bpf_uncharge_memlock(user, map->pages); 233 free_uid(mem->user);
238 free_uid(user); 234}
235
236void bpf_map_charge_move(struct bpf_map_memory *dst,
237 struct bpf_map_memory *src)
238{
239 *dst = *src;
240
241 /* Make sure src will not be used for the redundant uncharging. */
242 memset(src, 0, sizeof(struct bpf_map_memory));
239} 243}
240 244
241int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 245int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
242{ 246{
243 int ret; 247 int ret;
244 248
245 ret = bpf_charge_memlock(map->user, pages); 249 ret = bpf_charge_memlock(map->memory.user, pages);
246 if (ret) 250 if (ret)
247 return ret; 251 return ret;
248 map->pages += pages; 252 map->memory.pages += pages;
249 return ret; 253 return ret;
250} 254}
251 255
252void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 256void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
253{ 257{
254 bpf_uncharge_memlock(map->user, pages); 258 bpf_uncharge_memlock(map->memory.user, pages);
255 map->pages -= pages; 259 map->memory.pages -= pages;
256} 260}
257 261
258static int bpf_map_alloc_id(struct bpf_map *map) 262static int bpf_map_alloc_id(struct bpf_map *map)
@@ -303,11 +307,13 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
303static void bpf_map_free_deferred(struct work_struct *work) 307static void bpf_map_free_deferred(struct work_struct *work)
304{ 308{
305 struct bpf_map *map = container_of(work, struct bpf_map, work); 309 struct bpf_map *map = container_of(work, struct bpf_map, work);
310 struct bpf_map_memory mem;
306 311
307 bpf_map_release_memlock(map); 312 bpf_map_charge_move(&mem, &map->memory);
308 security_bpf_map_free(map); 313 security_bpf_map_free(map);
309 /* implementation dependent freeing */ 314 /* implementation dependent freeing */
310 map->ops->map_free(map); 315 map->ops->map_free(map);
316 bpf_map_charge_finish(&mem);
311} 317}
312 318
313static void bpf_map_put_uref(struct bpf_map *map) 319static void bpf_map_put_uref(struct bpf_map *map)
@@ -395,7 +401,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
395 map->value_size, 401 map->value_size,
396 map->max_entries, 402 map->max_entries,
397 map->map_flags, 403 map->map_flags,
398 map->pages * 1ULL << PAGE_SHIFT, 404 map->memory.pages * 1ULL << PAGE_SHIFT,
399 map->id, 405 map->id,
400 READ_ONCE(map->frozen)); 406 READ_ONCE(map->frozen));
401 407
@@ -549,6 +555,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
549static int map_create(union bpf_attr *attr) 555static int map_create(union bpf_attr *attr)
550{ 556{
551 int numa_node = bpf_map_attr_numa_node(attr); 557 int numa_node = bpf_map_attr_numa_node(attr);
558 struct bpf_map_memory mem;
552 struct bpf_map *map; 559 struct bpf_map *map;
553 int f_flags; 560 int f_flags;
554 int err; 561 int err;
@@ -573,7 +580,7 @@ static int map_create(union bpf_attr *attr)
573 580
574 err = bpf_obj_name_cpy(map->name, attr->map_name); 581 err = bpf_obj_name_cpy(map->name, attr->map_name);
575 if (err) 582 if (err)
576 goto free_map_nouncharge; 583 goto free_map;
577 584
578 atomic_set(&map->refcnt, 1); 585 atomic_set(&map->refcnt, 1);
579 atomic_set(&map->usercnt, 1); 586 atomic_set(&map->usercnt, 1);
@@ -583,20 +590,20 @@ static int map_create(union bpf_attr *attr)
583 590
584 if (!attr->btf_value_type_id) { 591 if (!attr->btf_value_type_id) {
585 err = -EINVAL; 592 err = -EINVAL;
586 goto free_map_nouncharge; 593 goto free_map;
587 } 594 }
588 595
589 btf = btf_get_by_fd(attr->btf_fd); 596 btf = btf_get_by_fd(attr->btf_fd);
590 if (IS_ERR(btf)) { 597 if (IS_ERR(btf)) {
591 err = PTR_ERR(btf); 598 err = PTR_ERR(btf);
592 goto free_map_nouncharge; 599 goto free_map;
593 } 600 }
594 601
595 err = map_check_btf(map, btf, attr->btf_key_type_id, 602 err = map_check_btf(map, btf, attr->btf_key_type_id,
596 attr->btf_value_type_id); 603 attr->btf_value_type_id);
597 if (err) { 604 if (err) {
598 btf_put(btf); 605 btf_put(btf);
599 goto free_map_nouncharge; 606 goto free_map;
600 } 607 }
601 608
602 map->btf = btf; 609 map->btf = btf;
@@ -608,15 +615,11 @@ static int map_create(union bpf_attr *attr)
608 615
609 err = security_bpf_map_alloc(map); 616 err = security_bpf_map_alloc(map);
610 if (err) 617 if (err)
611 goto free_map_nouncharge; 618 goto free_map;
612
613 err = bpf_map_init_memlock(map);
614 if (err)
615 goto free_map_sec;
616 619
617 err = bpf_map_alloc_id(map); 620 err = bpf_map_alloc_id(map);
618 if (err) 621 if (err)
619 goto free_map; 622 goto free_map_sec;
620 623
621 err = bpf_map_new_fd(map, f_flags); 624 err = bpf_map_new_fd(map, f_flags);
622 if (err < 0) { 625 if (err < 0) {
@@ -632,13 +635,13 @@ static int map_create(union bpf_attr *attr)
632 635
633 return err; 636 return err;
634 637
635free_map:
636 bpf_map_release_memlock(map);
637free_map_sec: 638free_map_sec:
638 security_bpf_map_free(map); 639 security_bpf_map_free(map);
639free_map_nouncharge: 640free_map:
640 btf_put(map->btf); 641 btf_put(map->btf);
642 bpf_map_charge_move(&mem, &map->memory);
641 map->ops->map_free(map); 643 map->ops->map_free(map);
644 bpf_map_charge_finish(&mem);
642 return err; 645 return err;
643} 646}
644 647
@@ -1585,6 +1588,14 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1585 default: 1588 default:
1586 return -EINVAL; 1589 return -EINVAL;
1587 } 1590 }
1591 case BPF_PROG_TYPE_CGROUP_SKB:
1592 switch (expected_attach_type) {
1593 case BPF_CGROUP_INET_INGRESS:
1594 case BPF_CGROUP_INET_EGRESS:
1595 return 0;
1596 default:
1597 return -EINVAL;
1598 }
1588 default: 1599 default:
1589 return 0; 1600 return 0;
1590 } 1601 }
@@ -1604,7 +1615,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1604 if (CHECK_ATTR(BPF_PROG_LOAD)) 1615 if (CHECK_ATTR(BPF_PROG_LOAD))
1605 return -EINVAL; 1616 return -EINVAL;
1606 1617
1607 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) 1618 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1619 BPF_F_ANY_ALIGNMENT |
1620 BPF_F_TEST_RND_HI32))
1608 return -EINVAL; 1621 return -EINVAL;
1609 1622
1610 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 1623 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -1834,6 +1847,10 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
1834 case BPF_PROG_TYPE_CGROUP_SOCK: 1847 case BPF_PROG_TYPE_CGROUP_SOCK:
1835 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1848 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1836 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1849 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
1850 case BPF_PROG_TYPE_CGROUP_SKB:
1851 return prog->enforce_expected_attach_type &&
1852 prog->expected_attach_type != attach_type ?
1853 -EINVAL : 0;
1837 default: 1854 default:
1838 return 0; 1855 return 0;
1839 } 1856 }