aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c160
1 files changed, 137 insertions, 23 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cf5040fd5434..0607db304def 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -456,6 +456,7 @@ static int bpf_obj_name_cpy(char *dst, const char *src)
456} 456}
457 457
458int map_check_no_btf(const struct bpf_map *map, 458int map_check_no_btf(const struct bpf_map *map,
459 const struct btf *btf,
459 const struct btf_type *key_type, 460 const struct btf_type *key_type,
460 const struct btf_type *value_type) 461 const struct btf_type *value_type)
461{ 462{
@@ -478,7 +479,7 @@ static int map_check_btf(const struct bpf_map *map, const struct btf *btf,
478 return -EINVAL; 479 return -EINVAL;
479 480
480 if (map->ops->map_check_btf) 481 if (map->ops->map_check_btf)
481 ret = map->ops->map_check_btf(map, key_type, value_type); 482 ret = map->ops->map_check_btf(map, btf, key_type, value_type);
482 483
483 return ret; 484 return ret;
484} 485}
@@ -1213,6 +1214,9 @@ static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
1213 /* bpf_prog_free_id() must be called first */ 1214 /* bpf_prog_free_id() must be called first */
1214 bpf_prog_free_id(prog, do_idr_lock); 1215 bpf_prog_free_id(prog, do_idr_lock);
1215 bpf_prog_kallsyms_del_all(prog); 1216 bpf_prog_kallsyms_del_all(prog);
1217 btf_put(prog->aux->btf);
1218 kvfree(prog->aux->func_info);
1219 bpf_prog_free_linfo(prog);
1216 1220
1217 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 1221 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
1218 } 1222 }
@@ -1437,9 +1441,9 @@ bpf_prog_load_check_attach_type(enum bpf_prog_type prog_type,
1437} 1441}
1438 1442
1439/* last field in 'union bpf_attr' used by this command */ 1443/* last field in 'union bpf_attr' used by this command */
1440#define BPF_PROG_LOAD_LAST_FIELD expected_attach_type 1444#define BPF_PROG_LOAD_LAST_FIELD line_info_cnt
1441 1445
1442static int bpf_prog_load(union bpf_attr *attr) 1446static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1443{ 1447{
1444 enum bpf_prog_type type = attr->prog_type; 1448 enum bpf_prog_type type = attr->prog_type;
1445 struct bpf_prog *prog; 1449 struct bpf_prog *prog;
@@ -1450,9 +1454,14 @@ static int bpf_prog_load(union bpf_attr *attr)
1450 if (CHECK_ATTR(BPF_PROG_LOAD)) 1454 if (CHECK_ATTR(BPF_PROG_LOAD))
1451 return -EINVAL; 1455 return -EINVAL;
1452 1456
1453 if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT) 1457 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT))
1454 return -EINVAL; 1458 return -EINVAL;
1455 1459
1460 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&
1461 (attr->prog_flags & BPF_F_ANY_ALIGNMENT) &&
1462 !capable(CAP_SYS_ADMIN))
1463 return -EPERM;
1464
1456 /* copy eBPF program license from user space */ 1465 /* copy eBPF program license from user space */
1457 if (strncpy_from_user(license, u64_to_user_ptr(attr->license), 1466 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
1458 sizeof(license) - 1) < 0) 1467 sizeof(license) - 1) < 0)
@@ -1464,11 +1473,6 @@ static int bpf_prog_load(union bpf_attr *attr)
1464 1473
1465 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS) 1474 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
1466 return -E2BIG; 1475 return -E2BIG;
1467
1468 if (type == BPF_PROG_TYPE_KPROBE &&
1469 attr->kern_version != LINUX_VERSION_CODE)
1470 return -EINVAL;
1471
1472 if (type != BPF_PROG_TYPE_SOCKET_FILTER && 1476 if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
1473 type != BPF_PROG_TYPE_CGROUP_SKB && 1477 type != BPF_PROG_TYPE_CGROUP_SKB &&
1474 !capable(CAP_SYS_ADMIN)) 1478 !capable(CAP_SYS_ADMIN))
@@ -1525,7 +1529,7 @@ static int bpf_prog_load(union bpf_attr *attr)
1525 goto free_prog; 1529 goto free_prog;
1526 1530
1527 /* run eBPF verifier */ 1531 /* run eBPF verifier */
1528 err = bpf_check(&prog, attr); 1532 err = bpf_check(&prog, attr, uattr);
1529 if (err < 0) 1533 if (err < 0)
1530 goto free_used_maps; 1534 goto free_used_maps;
1531 1535
@@ -1553,6 +1557,9 @@ static int bpf_prog_load(union bpf_attr *attr)
1553 return err; 1557 return err;
1554 1558
1555free_used_maps: 1559free_used_maps:
1560 bpf_prog_free_linfo(prog);
1561 kvfree(prog->aux->func_info);
1562 btf_put(prog->aux->btf);
1556 bpf_prog_kallsyms_del_subprogs(prog); 1563 bpf_prog_kallsyms_del_subprogs(prog);
1557 free_used_maps(prog->aux); 1564 free_used_maps(prog->aux);
1558free_prog: 1565free_prog:
@@ -1597,6 +1604,7 @@ static int bpf_raw_tracepoint_release(struct inode *inode, struct file *filp)
1597 bpf_probe_unregister(raw_tp->btp, raw_tp->prog); 1604 bpf_probe_unregister(raw_tp->btp, raw_tp->prog);
1598 bpf_prog_put(raw_tp->prog); 1605 bpf_prog_put(raw_tp->prog);
1599 } 1606 }
1607 bpf_put_raw_tracepoint(raw_tp->btp);
1600 kfree(raw_tp); 1608 kfree(raw_tp);
1601 return 0; 1609 return 0;
1602} 1610}
@@ -1622,13 +1630,15 @@ static int bpf_raw_tracepoint_open(const union bpf_attr *attr)
1622 return -EFAULT; 1630 return -EFAULT;
1623 tp_name[sizeof(tp_name) - 1] = 0; 1631 tp_name[sizeof(tp_name) - 1] = 0;
1624 1632
1625 btp = bpf_find_raw_tracepoint(tp_name); 1633 btp = bpf_get_raw_tracepoint(tp_name);
1626 if (!btp) 1634 if (!btp)
1627 return -ENOENT; 1635 return -ENOENT;
1628 1636
1629 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER); 1637 raw_tp = kzalloc(sizeof(*raw_tp), GFP_USER);
1630 if (!raw_tp) 1638 if (!raw_tp) {
1631 return -ENOMEM; 1639 err = -ENOMEM;
1640 goto out_put_btp;
1641 }
1632 raw_tp->btp = btp; 1642 raw_tp->btp = btp;
1633 1643
1634 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd, 1644 prog = bpf_prog_get_type(attr->raw_tracepoint.prog_fd,
@@ -1656,6 +1666,8 @@ out_put_prog:
1656 bpf_prog_put(prog); 1666 bpf_prog_put(prog);
1657out_free_tp: 1667out_free_tp:
1658 kfree(raw_tp); 1668 kfree(raw_tp);
1669out_put_btp:
1670 bpf_put_raw_tracepoint(btp);
1659 return err; 1671 return err;
1660} 1672}
1661 1673
@@ -2020,18 +2032,42 @@ static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
2020 insns[i + 1].imm = 0; 2032 insns[i + 1].imm = 0;
2021 continue; 2033 continue;
2022 } 2034 }
2023
2024 if (!bpf_dump_raw_ok() &&
2025 imm == (unsigned long)prog->aux) {
2026 insns[i].imm = 0;
2027 insns[i + 1].imm = 0;
2028 continue;
2029 }
2030 } 2035 }
2031 2036
2032 return insns; 2037 return insns;
2033} 2038}
2034 2039
2040static int set_info_rec_size(struct bpf_prog_info *info)
2041{
2042 /*
2043 * Ensure info.*_rec_size is the same as kernel expected size
2044 *
2045 * or
2046 *
2047 * Only allow zero *_rec_size if both _rec_size and _cnt are
2048 * zero. In this case, the kernel will set the expected
2049 * _rec_size back to the info.
2050 */
2051
2052 if ((info->nr_func_info || info->func_info_rec_size) &&
2053 info->func_info_rec_size != sizeof(struct bpf_func_info))
2054 return -EINVAL;
2055
2056 if ((info->nr_line_info || info->line_info_rec_size) &&
2057 info->line_info_rec_size != sizeof(struct bpf_line_info))
2058 return -EINVAL;
2059
2060 if ((info->nr_jited_line_info || info->jited_line_info_rec_size) &&
2061 info->jited_line_info_rec_size != sizeof(__u64))
2062 return -EINVAL;
2063
2064 info->func_info_rec_size = sizeof(struct bpf_func_info);
2065 info->line_info_rec_size = sizeof(struct bpf_line_info);
2066 info->jited_line_info_rec_size = sizeof(__u64);
2067
2068 return 0;
2069}
2070
2035static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 2071static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2036 const union bpf_attr *attr, 2072 const union bpf_attr *attr,
2037 union bpf_attr __user *uattr) 2073 union bpf_attr __user *uattr)
@@ -2074,11 +2110,18 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2074 return -EFAULT; 2110 return -EFAULT;
2075 } 2111 }
2076 2112
2113 err = set_info_rec_size(&info);
2114 if (err)
2115 return err;
2116
2077 if (!capable(CAP_SYS_ADMIN)) { 2117 if (!capable(CAP_SYS_ADMIN)) {
2078 info.jited_prog_len = 0; 2118 info.jited_prog_len = 0;
2079 info.xlated_prog_len = 0; 2119 info.xlated_prog_len = 0;
2080 info.nr_jited_ksyms = 0; 2120 info.nr_jited_ksyms = 0;
2081 info.nr_jited_func_lens = 0; 2121 info.nr_jited_func_lens = 0;
2122 info.nr_func_info = 0;
2123 info.nr_line_info = 0;
2124 info.nr_jited_line_info = 0;
2082 goto done; 2125 goto done;
2083 } 2126 }
2084 2127
@@ -2160,7 +2203,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2160 2203
2161 ulen = info.nr_jited_ksyms; 2204 ulen = info.nr_jited_ksyms;
2162 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1; 2205 info.nr_jited_ksyms = prog->aux->func_cnt ? : 1;
2163 if (info.nr_jited_ksyms && ulen) { 2206 if (ulen) {
2164 if (bpf_dump_raw_ok()) { 2207 if (bpf_dump_raw_ok()) {
2165 unsigned long ksym_addr; 2208 unsigned long ksym_addr;
2166 u64 __user *user_ksyms; 2209 u64 __user *user_ksyms;
@@ -2191,7 +2234,7 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2191 2234
2192 ulen = info.nr_jited_func_lens; 2235 ulen = info.nr_jited_func_lens;
2193 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1; 2236 info.nr_jited_func_lens = prog->aux->func_cnt ? : 1;
2194 if (info.nr_jited_func_lens && ulen) { 2237 if (ulen) {
2195 if (bpf_dump_raw_ok()) { 2238 if (bpf_dump_raw_ok()) {
2196 u32 __user *user_lens; 2239 u32 __user *user_lens;
2197 u32 func_len, i; 2240 u32 func_len, i;
@@ -2216,6 +2259,77 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
2216 } 2259 }
2217 } 2260 }
2218 2261
2262 if (prog->aux->btf)
2263 info.btf_id = btf_id(prog->aux->btf);
2264
2265 ulen = info.nr_func_info;
2266 info.nr_func_info = prog->aux->func_info_cnt;
2267 if (info.nr_func_info && ulen) {
2268 char __user *user_finfo;
2269
2270 user_finfo = u64_to_user_ptr(info.func_info);
2271 ulen = min_t(u32, info.nr_func_info, ulen);
2272 if (copy_to_user(user_finfo, prog->aux->func_info,
2273 info.func_info_rec_size * ulen))
2274 return -EFAULT;
2275 }
2276
2277 ulen = info.nr_line_info;
2278 info.nr_line_info = prog->aux->nr_linfo;
2279 if (info.nr_line_info && ulen) {
2280 __u8 __user *user_linfo;
2281
2282 user_linfo = u64_to_user_ptr(info.line_info);
2283 ulen = min_t(u32, info.nr_line_info, ulen);
2284 if (copy_to_user(user_linfo, prog->aux->linfo,
2285 info.line_info_rec_size * ulen))
2286 return -EFAULT;
2287 }
2288
2289 ulen = info.nr_jited_line_info;
2290 if (prog->aux->jited_linfo)
2291 info.nr_jited_line_info = prog->aux->nr_linfo;
2292 else
2293 info.nr_jited_line_info = 0;
2294 if (info.nr_jited_line_info && ulen) {
2295 if (bpf_dump_raw_ok()) {
2296 __u64 __user *user_linfo;
2297 u32 i;
2298
2299 user_linfo = u64_to_user_ptr(info.jited_line_info);
2300 ulen = min_t(u32, info.nr_jited_line_info, ulen);
2301 for (i = 0; i < ulen; i++) {
2302 if (put_user((__u64)(long)prog->aux->jited_linfo[i],
2303 &user_linfo[i]))
2304 return -EFAULT;
2305 }
2306 } else {
2307 info.jited_line_info = 0;
2308 }
2309 }
2310
2311 ulen = info.nr_prog_tags;
2312 info.nr_prog_tags = prog->aux->func_cnt ? : 1;
2313 if (ulen) {
2314 __u8 __user (*user_prog_tags)[BPF_TAG_SIZE];
2315 u32 i;
2316
2317 user_prog_tags = u64_to_user_ptr(info.prog_tags);
2318 ulen = min_t(u32, info.nr_prog_tags, ulen);
2319 if (prog->aux->func_cnt) {
2320 for (i = 0; i < ulen; i++) {
2321 if (copy_to_user(user_prog_tags[i],
2322 prog->aux->func[i]->tag,
2323 BPF_TAG_SIZE))
2324 return -EFAULT;
2325 }
2326 } else {
2327 if (copy_to_user(user_prog_tags[0],
2328 prog->tag, BPF_TAG_SIZE))
2329 return -EFAULT;
2330 }
2331 }
2332
2219done: 2333done:
2220 if (copy_to_user(uinfo, &info, info_len) || 2334 if (copy_to_user(uinfo, &info, info_len) ||
2221 put_user(info_len, &uattr->info.info_len)) 2335 put_user(info_len, &uattr->info.info_len))
@@ -2501,7 +2615,7 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
2501 err = map_get_next_key(&attr); 2615 err = map_get_next_key(&attr);
2502 break; 2616 break;
2503 case BPF_PROG_LOAD: 2617 case BPF_PROG_LOAD:
2504 err = bpf_prog_load(&attr); 2618 err = bpf_prog_load(&attr, uattr);
2505 break; 2619 break;
2506 case BPF_OBJ_PIN: 2620 case BPF_OBJ_PIN:
2507 err = bpf_obj_pin(&attr); 2621 err = bpf_obj_pin(&attr);