aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c115
1 files changed, 106 insertions, 9 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5cb783fc8224..2bac0dc8baba 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -905,9 +905,13 @@ static int bpf_prog_alloc_id(struct bpf_prog *prog)
905 return id > 0 ? 0 : id; 905 return id > 0 ? 0 : id;
906} 906}
907 907
908static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock) 908void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
909{ 909{
910 /* cBPF to eBPF migrations are currently not in the idr store. */ 910 /* cBPF to eBPF migrations are currently not in the idr store.
911 * Offloaded programs are removed from the store when their device
912 * disappears - even if someone grabs an fd to them they are unusable,
913 * simply waiting for refcnt to drop to be freed.
914 */
911 if (!prog->aux->id) 915 if (!prog->aux->id)
912 return; 916 return;
913 917
@@ -917,6 +921,7 @@ static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
917 __acquire(&prog_idr_lock); 921 __acquire(&prog_idr_lock);
918 922
919 idr_remove(&prog_idr, prog->aux->id); 923 idr_remove(&prog_idr, prog->aux->id);
924 prog->aux->id = 0;
920 925
921 if (do_idr_lock) 926 if (do_idr_lock)
922 spin_unlock_bh(&prog_idr_lock); 927 spin_unlock_bh(&prog_idr_lock);
@@ -937,10 +942,16 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
937static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock) 942static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
938{ 943{
939 if (atomic_dec_and_test(&prog->aux->refcnt)) { 944 if (atomic_dec_and_test(&prog->aux->refcnt)) {
945 int i;
946
940 trace_bpf_prog_put_rcu(prog); 947 trace_bpf_prog_put_rcu(prog);
941 /* bpf_prog_free_id() must be called first */ 948 /* bpf_prog_free_id() must be called first */
942 bpf_prog_free_id(prog, do_idr_lock); 949 bpf_prog_free_id(prog, do_idr_lock);
950
951 for (i = 0; i < prog->aux->func_cnt; i++)
952 bpf_prog_kallsyms_del(prog->aux->func[i]);
943 bpf_prog_kallsyms_del(prog); 953 bpf_prog_kallsyms_del(prog);
954
944 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu); 955 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
945 } 956 }
946} 957}
@@ -1151,6 +1162,8 @@ static int bpf_prog_load(union bpf_attr *attr)
1151 if (!prog) 1162 if (!prog)
1152 return -ENOMEM; 1163 return -ENOMEM;
1153 1164
1165 prog->aux->offload_requested = !!attr->prog_ifindex;
1166
1154 err = security_bpf_prog_alloc(prog->aux); 1167 err = security_bpf_prog_alloc(prog->aux);
1155 if (err) 1168 if (err)
1156 goto free_prog_nouncharge; 1169 goto free_prog_nouncharge;
@@ -1172,7 +1185,7 @@ static int bpf_prog_load(union bpf_attr *attr)
1172 atomic_set(&prog->aux->refcnt, 1); 1185 atomic_set(&prog->aux->refcnt, 1);
1173 prog->gpl_compatible = is_gpl ? 1 : 0; 1186 prog->gpl_compatible = is_gpl ? 1 : 0;
1174 1187
1175 if (attr->prog_ifindex) { 1188 if (bpf_prog_is_dev_bound(prog->aux)) {
1176 err = bpf_prog_offload_init(prog, attr); 1189 err = bpf_prog_offload_init(prog, attr);
1177 if (err) 1190 if (err)
1178 goto free_prog; 1191 goto free_prog;
@@ -1194,7 +1207,8 @@ static int bpf_prog_load(union bpf_attr *attr)
1194 goto free_used_maps; 1207 goto free_used_maps;
1195 1208
1196 /* eBPF program is ready to be JITed */ 1209 /* eBPF program is ready to be JITed */
1197 prog = bpf_prog_select_runtime(prog, &err); 1210 if (!prog->bpf_func)
1211 prog = bpf_prog_select_runtime(prog, &err);
1198 if (err < 0) 1212 if (err < 0)
1199 goto free_used_maps; 1213 goto free_used_maps;
1200 1214
@@ -1551,6 +1565,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1551 return fd; 1565 return fd;
1552} 1566}
1553 1567
1568static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
1569 unsigned long addr)
1570{
1571 int i;
1572
1573 for (i = 0; i < prog->aux->used_map_cnt; i++)
1574 if (prog->aux->used_maps[i] == (void *)addr)
1575 return prog->aux->used_maps[i];
1576 return NULL;
1577}
1578
1579static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
1580{
1581 const struct bpf_map *map;
1582 struct bpf_insn *insns;
1583 u64 imm;
1584 int i;
1585
1586 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
1587 GFP_USER);
1588 if (!insns)
1589 return insns;
1590
1591 for (i = 0; i < prog->len; i++) {
1592 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
1593 insns[i].code = BPF_JMP | BPF_CALL;
1594 insns[i].imm = BPF_FUNC_tail_call;
1595 /* fall-through */
1596 }
1597 if (insns[i].code == (BPF_JMP | BPF_CALL) ||
1598 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
1599 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
1600 insns[i].code = BPF_JMP | BPF_CALL;
1601 if (!bpf_dump_raw_ok())
1602 insns[i].imm = 0;
1603 continue;
1604 }
1605
1606 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
1607 continue;
1608
1609 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
1610 map = bpf_map_from_imm(prog, imm);
1611 if (map) {
1612 insns[i].src_reg = BPF_PSEUDO_MAP_FD;
1613 insns[i].imm = map->id;
1614 insns[i + 1].imm = 0;
1615 continue;
1616 }
1617
1618 if (!bpf_dump_raw_ok() &&
1619 imm == (unsigned long)prog->aux) {
1620 insns[i].imm = 0;
1621 insns[i + 1].imm = 0;
1622 continue;
1623 }
1624 }
1625
1626 return insns;
1627}
1628
1554static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 1629static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1555 const union bpf_attr *attr, 1630 const union bpf_attr *attr,
1556 union bpf_attr __user *uattr) 1631 union bpf_attr __user *uattr)
@@ -1601,21 +1676,43 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1601 ulen = info.jited_prog_len; 1676 ulen = info.jited_prog_len;
1602 info.jited_prog_len = prog->jited_len; 1677 info.jited_prog_len = prog->jited_len;
1603 if (info.jited_prog_len && ulen) { 1678 if (info.jited_prog_len && ulen) {
1604 uinsns = u64_to_user_ptr(info.jited_prog_insns); 1679 if (bpf_dump_raw_ok()) {
1605 ulen = min_t(u32, info.jited_prog_len, ulen); 1680 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1606 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 1681 ulen = min_t(u32, info.jited_prog_len, ulen);
1607 return -EFAULT; 1682 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1683 return -EFAULT;
1684 } else {
1685 info.jited_prog_insns = 0;
1686 }
1608 } 1687 }
1609 1688
1610 ulen = info.xlated_prog_len; 1689 ulen = info.xlated_prog_len;
1611 info.xlated_prog_len = bpf_prog_insn_size(prog); 1690 info.xlated_prog_len = bpf_prog_insn_size(prog);
1612 if (info.xlated_prog_len && ulen) { 1691 if (info.xlated_prog_len && ulen) {
1692 struct bpf_insn *insns_sanitized;
1693 bool fault;
1694
1695 if (prog->blinded && !bpf_dump_raw_ok()) {
1696 info.xlated_prog_insns = 0;
1697 goto done;
1698 }
1699 insns_sanitized = bpf_insn_prepare_dump(prog);
1700 if (!insns_sanitized)
1701 return -ENOMEM;
1613 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1702 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1614 ulen = min_t(u32, info.xlated_prog_len, ulen); 1703 ulen = min_t(u32, info.xlated_prog_len, ulen);
1615 if (copy_to_user(uinsns, prog->insnsi, ulen)) 1704 fault = copy_to_user(uinsns, insns_sanitized, ulen);
1705 kfree(insns_sanitized);
1706 if (fault)
1616 return -EFAULT; 1707 return -EFAULT;
1617 } 1708 }
1618 1709
1710 if (bpf_prog_is_dev_bound(prog->aux)) {
1711 err = bpf_prog_offload_info_fill(&info, prog);
1712 if (err)
1713 return err;
1714 }
1715
1619done: 1716done:
1620 if (copy_to_user(uinfo, &info, info_len) || 1717 if (copy_to_user(uinfo, &info, info_len) ||
1621 put_user(info_len, &uattr->info.info_len)) 1718 put_user(info_len, &uattr->info.info_len))