summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c122
1 files changed, 79 insertions, 43 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5b30f8baaf02..5d141f16f6fa 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -180,19 +180,6 @@ void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
180 map->numa_node = bpf_map_attr_numa_node(attr); 180 map->numa_node = bpf_map_attr_numa_node(attr);
181} 181}
182 182
183int bpf_map_precharge_memlock(u32 pages)
184{
185 struct user_struct *user = get_current_user();
186 unsigned long memlock_limit, cur;
187
188 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
189 cur = atomic_long_read(&user->locked_vm);
190 free_uid(user);
191 if (cur + pages > memlock_limit)
192 return -EPERM;
193 return 0;
194}
195
196static int bpf_charge_memlock(struct user_struct *user, u32 pages) 183static int bpf_charge_memlock(struct user_struct *user, u32 pages)
197{ 184{
198 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 185 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
@@ -206,45 +193,62 @@ static int bpf_charge_memlock(struct user_struct *user, u32 pages)
206 193
207static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) 194static void bpf_uncharge_memlock(struct user_struct *user, u32 pages)
208{ 195{
209 atomic_long_sub(pages, &user->locked_vm); 196 if (user)
197 atomic_long_sub(pages, &user->locked_vm);
210} 198}
211 199
212static int bpf_map_init_memlock(struct bpf_map *map) 200int bpf_map_charge_init(struct bpf_map_memory *mem, size_t size)
213{ 201{
214 struct user_struct *user = get_current_user(); 202 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT;
203 struct user_struct *user;
215 int ret; 204 int ret;
216 205
217 ret = bpf_charge_memlock(user, map->pages); 206 if (size >= U32_MAX - PAGE_SIZE)
207 return -E2BIG;
208
209 user = get_current_user();
210 ret = bpf_charge_memlock(user, pages);
218 if (ret) { 211 if (ret) {
219 free_uid(user); 212 free_uid(user);
220 return ret; 213 return ret;
221 } 214 }
222 map->user = user; 215
223 return ret; 216 mem->pages = pages;
217 mem->user = user;
218
219 return 0;
224} 220}
225 221
226static void bpf_map_release_memlock(struct bpf_map *map) 222void bpf_map_charge_finish(struct bpf_map_memory *mem)
227{ 223{
228 struct user_struct *user = map->user; 224 bpf_uncharge_memlock(mem->user, mem->pages);
229 bpf_uncharge_memlock(user, map->pages); 225 free_uid(mem->user);
230 free_uid(user); 226}
227
228void bpf_map_charge_move(struct bpf_map_memory *dst,
229 struct bpf_map_memory *src)
230{
231 *dst = *src;
232
233 /* Make sure src will not be used for the redundant uncharging. */
234 memset(src, 0, sizeof(struct bpf_map_memory));
231} 235}
232 236
233int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) 237int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
234{ 238{
235 int ret; 239 int ret;
236 240
237 ret = bpf_charge_memlock(map->user, pages); 241 ret = bpf_charge_memlock(map->memory.user, pages);
238 if (ret) 242 if (ret)
239 return ret; 243 return ret;
240 map->pages += pages; 244 map->memory.pages += pages;
241 return ret; 245 return ret;
242} 246}
243 247
244void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 248void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
245{ 249{
246 bpf_uncharge_memlock(map->user, pages); 250 bpf_uncharge_memlock(map->memory.user, pages);
247 map->pages -= pages; 251 map->memory.pages -= pages;
248} 252}
249 253
250static int bpf_map_alloc_id(struct bpf_map *map) 254static int bpf_map_alloc_id(struct bpf_map *map)
@@ -295,11 +299,13 @@ void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
295static void bpf_map_free_deferred(struct work_struct *work) 299static void bpf_map_free_deferred(struct work_struct *work)
296{ 300{
297 struct bpf_map *map = container_of(work, struct bpf_map, work); 301 struct bpf_map *map = container_of(work, struct bpf_map, work);
302 struct bpf_map_memory mem;
298 303
299 bpf_map_release_memlock(map); 304 bpf_map_charge_move(&mem, &map->memory);
300 security_bpf_map_free(map); 305 security_bpf_map_free(map);
301 /* implementation dependent freeing */ 306 /* implementation dependent freeing */
302 map->ops->map_free(map); 307 map->ops->map_free(map);
308 bpf_map_charge_finish(&mem);
303} 309}
304 310
305static void bpf_map_put_uref(struct bpf_map *map) 311static void bpf_map_put_uref(struct bpf_map *map)
@@ -387,7 +393,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
387 map->value_size, 393 map->value_size,
388 map->max_entries, 394 map->max_entries,
389 map->map_flags, 395 map->map_flags,
390 map->pages * 1ULL << PAGE_SHIFT, 396 map->memory.pages * 1ULL << PAGE_SHIFT,
391 map->id, 397 map->id,
392 READ_ONCE(map->frozen)); 398 READ_ONCE(map->frozen));
393 399
@@ -541,6 +547,7 @@ static int map_check_btf(struct bpf_map *map, const struct btf *btf,
541static int map_create(union bpf_attr *attr) 547static int map_create(union bpf_attr *attr)
542{ 548{
543 int numa_node = bpf_map_attr_numa_node(attr); 549 int numa_node = bpf_map_attr_numa_node(attr);
550 struct bpf_map_memory mem;
544 struct bpf_map *map; 551 struct bpf_map *map;
545 int f_flags; 552 int f_flags;
546 int err; 553 int err;
@@ -565,7 +572,7 @@ static int map_create(union bpf_attr *attr)
565 572
566 err = bpf_obj_name_cpy(map->name, attr->map_name); 573 err = bpf_obj_name_cpy(map->name, attr->map_name);
567 if (err) 574 if (err)
568 goto free_map_nouncharge; 575 goto free_map;
569 576
570 atomic_set(&map->refcnt, 1); 577 atomic_set(&map->refcnt, 1);
571 atomic_set(&map->usercnt, 1); 578 atomic_set(&map->usercnt, 1);
@@ -575,20 +582,20 @@ static int map_create(union bpf_attr *attr)
575 582
576 if (!attr->btf_value_type_id) { 583 if (!attr->btf_value_type_id) {
577 err = -EINVAL; 584 err = -EINVAL;
578 goto free_map_nouncharge; 585 goto free_map;
579 } 586 }
580 587
581 btf = btf_get_by_fd(attr->btf_fd); 588 btf = btf_get_by_fd(attr->btf_fd);
582 if (IS_ERR(btf)) { 589 if (IS_ERR(btf)) {
583 err = PTR_ERR(btf); 590 err = PTR_ERR(btf);
584 goto free_map_nouncharge; 591 goto free_map;
585 } 592 }
586 593
587 err = map_check_btf(map, btf, attr->btf_key_type_id, 594 err = map_check_btf(map, btf, attr->btf_key_type_id,
588 attr->btf_value_type_id); 595 attr->btf_value_type_id);
589 if (err) { 596 if (err) {
590 btf_put(btf); 597 btf_put(btf);
591 goto free_map_nouncharge; 598 goto free_map;
592 } 599 }
593 600
594 map->btf = btf; 601 map->btf = btf;
@@ -600,15 +607,11 @@ static int map_create(union bpf_attr *attr)
600 607
601 err = security_bpf_map_alloc(map); 608 err = security_bpf_map_alloc(map);
602 if (err) 609 if (err)
603 goto free_map_nouncharge; 610 goto free_map;
604
605 err = bpf_map_init_memlock(map);
606 if (err)
607 goto free_map_sec;
608 611
609 err = bpf_map_alloc_id(map); 612 err = bpf_map_alloc_id(map);
610 if (err) 613 if (err)
611 goto free_map; 614 goto free_map_sec;
612 615
613 err = bpf_map_new_fd(map, f_flags); 616 err = bpf_map_new_fd(map, f_flags);
614 if (err < 0) { 617 if (err < 0) {
@@ -624,13 +627,13 @@ static int map_create(union bpf_attr *attr)
624 627
625 return err; 628 return err;
626 629
627free_map:
628 bpf_map_release_memlock(map);
629free_map_sec: 630free_map_sec:
630 security_bpf_map_free(map); 631 security_bpf_map_free(map);
631free_map_nouncharge: 632free_map:
632 btf_put(map->btf); 633 btf_put(map->btf);
634 bpf_map_charge_move(&mem, &map->memory);
633 map->ops->map_free(map); 635 map->ops->map_free(map);
636 bpf_map_charge_finish(&mem);
634 return err; 637 return err;
635} 638}
636 639
@@ -1579,6 +1582,22 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1579 default: 1582 default:
1580 return -EINVAL; 1583 return -EINVAL;
1581 } 1584 }
1585 case BPF_PROG_TYPE_CGROUP_SKB:
1586 switch (expected_attach_type) {
1587 case BPF_CGROUP_INET_INGRESS:
1588 case BPF_CGROUP_INET_EGRESS:
1589 return 0;
1590 default:
1591 return -EINVAL;
1592 }
1593 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1594 switch (expected_attach_type) {
1595 case BPF_CGROUP_SETSOCKOPT:
1596 case BPF_CGROUP_GETSOCKOPT:
1597 return 0;
1598 default:
1599 return -EINVAL;
1600 }
1582 default: 1601 default:
1583 return 0; 1602 return 0;
1584 } 1603 }
@@ -1598,7 +1617,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1598 if (CHECK_ATTR(BPF_PROG_LOAD)) 1617 if (CHECK_ATTR(BPF_PROG_LOAD))
1599 return -EINVAL; 1618 return -EINVAL;
1600 1619
1601 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) 1620 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1621 BPF_F_ANY_ALIGNMENT |
1622 BPF_F_TEST_RND_HI32))
1602 return -EINVAL; 1623 return -EINVAL;
1603 1624
1604 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 1625 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
@@ -1827,7 +1848,12 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
1827 switch (prog->type) { 1848 switch (prog->type) {
1828 case BPF_PROG_TYPE_CGROUP_SOCK: 1849 case BPF_PROG_TYPE_CGROUP_SOCK:
1829 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR: 1850 case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1851 case BPF_PROG_TYPE_CGROUP_SOCKOPT:
1830 return attach_type == prog->expected_attach_type ? 0 : -EINVAL; 1852 return attach_type == prog->expected_attach_type ? 0 : -EINVAL;
1853 case BPF_PROG_TYPE_CGROUP_SKB:
1854 return prog->enforce_expected_attach_type &&
1855 prog->expected_attach_type != attach_type ?
1856 -EINVAL : 0;
1831 default: 1857 default:
1832 return 0; 1858 return 0;
1833 } 1859 }
@@ -1895,6 +1921,10 @@ static int bpf_prog_attach(const union bpf_attr *attr)
1895 case BPF_CGROUP_SYSCTL: 1921 case BPF_CGROUP_SYSCTL:
1896 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 1922 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
1897 break; 1923 break;
1924 case BPF_CGROUP_GETSOCKOPT:
1925 case BPF_CGROUP_SETSOCKOPT:
1926 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
1927 break;
1898 default: 1928 default:
1899 return -EINVAL; 1929 return -EINVAL;
1900 } 1930 }
@@ -1978,6 +2008,10 @@ static int bpf_prog_detach(const union bpf_attr *attr)
1978 case BPF_CGROUP_SYSCTL: 2008 case BPF_CGROUP_SYSCTL:
1979 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL; 2009 ptype = BPF_PROG_TYPE_CGROUP_SYSCTL;
1980 break; 2010 break;
2011 case BPF_CGROUP_GETSOCKOPT:
2012 case BPF_CGROUP_SETSOCKOPT:
2013 ptype = BPF_PROG_TYPE_CGROUP_SOCKOPT;
2014 break;
1981 default: 2015 default:
1982 return -EINVAL; 2016 return -EINVAL;
1983 } 2017 }
@@ -2014,6 +2048,8 @@ static int bpf_prog_query(const union bpf_attr *attr,
2014 case BPF_CGROUP_SOCK_OPS: 2048 case BPF_CGROUP_SOCK_OPS:
2015 case BPF_CGROUP_DEVICE: 2049 case BPF_CGROUP_DEVICE:
2016 case BPF_CGROUP_SYSCTL: 2050 case BPF_CGROUP_SYSCTL:
2051 case BPF_CGROUP_GETSOCKOPT:
2052 case BPF_CGROUP_SETSOCKOPT:
2017 break; 2053 break;
2018 case BPF_LIRC_MODE2: 2054 case BPF_LIRC_MODE2:
2019 return lirc_prog_query(attr, uattr); 2055 return lirc_prog_query(attr, uattr);