aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c87
1 files changed, 82 insertions, 5 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 30e728dcd35d..007802c5ca7d 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1558,6 +1558,67 @@ static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
1558 return fd; 1558 return fd;
1559} 1559}
1560 1560
1561static const struct bpf_map *bpf_map_from_imm(const struct bpf_prog *prog,
1562 unsigned long addr)
1563{
1564 int i;
1565
1566 for (i = 0; i < prog->aux->used_map_cnt; i++)
1567 if (prog->aux->used_maps[i] == (void *)addr)
1568 return prog->aux->used_maps[i];
1569 return NULL;
1570}
1571
1572static struct bpf_insn *bpf_insn_prepare_dump(const struct bpf_prog *prog)
1573{
1574 const struct bpf_map *map;
1575 struct bpf_insn *insns;
1576 u64 imm;
1577 int i;
1578
1579 insns = kmemdup(prog->insnsi, bpf_prog_insn_size(prog),
1580 GFP_USER);
1581 if (!insns)
1582 return insns;
1583
1584 for (i = 0; i < prog->len; i++) {
1585 if (insns[i].code == (BPF_JMP | BPF_TAIL_CALL)) {
1586 insns[i].code = BPF_JMP | BPF_CALL;
1587 insns[i].imm = BPF_FUNC_tail_call;
1588 /* fall-through */
1589 }
1590 if (insns[i].code == (BPF_JMP | BPF_CALL) ||
1591 insns[i].code == (BPF_JMP | BPF_CALL_ARGS)) {
1592 if (insns[i].code == (BPF_JMP | BPF_CALL_ARGS))
1593 insns[i].code = BPF_JMP | BPF_CALL;
1594 if (!bpf_dump_raw_ok())
1595 insns[i].imm = 0;
1596 continue;
1597 }
1598
1599 if (insns[i].code != (BPF_LD | BPF_IMM | BPF_DW))
1600 continue;
1601
1602 imm = ((u64)insns[i + 1].imm << 32) | (u32)insns[i].imm;
1603 map = bpf_map_from_imm(prog, imm);
1604 if (map) {
1605 insns[i].src_reg = BPF_PSEUDO_MAP_FD;
1606 insns[i].imm = map->id;
1607 insns[i + 1].imm = 0;
1608 continue;
1609 }
1610
1611 if (!bpf_dump_raw_ok() &&
1612 imm == (unsigned long)prog->aux) {
1613 insns[i].imm = 0;
1614 insns[i + 1].imm = 0;
1615 continue;
1616 }
1617 }
1618
1619 return insns;
1620}
1621
1561static int bpf_prog_get_info_by_fd(struct bpf_prog *prog, 1622static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1562 const union bpf_attr *attr, 1623 const union bpf_attr *attr,
1563 union bpf_attr __user *uattr) 1624 union bpf_attr __user *uattr)
@@ -1608,18 +1669,34 @@ static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
1608 ulen = info.jited_prog_len; 1669 ulen = info.jited_prog_len;
1609 info.jited_prog_len = prog->jited_len; 1670 info.jited_prog_len = prog->jited_len;
1610 if (info.jited_prog_len && ulen) { 1671 if (info.jited_prog_len && ulen) {
1611 uinsns = u64_to_user_ptr(info.jited_prog_insns); 1672 if (bpf_dump_raw_ok()) {
1612 ulen = min_t(u32, info.jited_prog_len, ulen); 1673 uinsns = u64_to_user_ptr(info.jited_prog_insns);
1613 if (copy_to_user(uinsns, prog->bpf_func, ulen)) 1674 ulen = min_t(u32, info.jited_prog_len, ulen);
1614 return -EFAULT; 1675 if (copy_to_user(uinsns, prog->bpf_func, ulen))
1676 return -EFAULT;
1677 } else {
1678 info.jited_prog_insns = 0;
1679 }
1615 } 1680 }
1616 1681
1617 ulen = info.xlated_prog_len; 1682 ulen = info.xlated_prog_len;
1618 info.xlated_prog_len = bpf_prog_insn_size(prog); 1683 info.xlated_prog_len = bpf_prog_insn_size(prog);
1619 if (info.xlated_prog_len && ulen) { 1684 if (info.xlated_prog_len && ulen) {
1685 struct bpf_insn *insns_sanitized;
1686 bool fault;
1687
1688 if (prog->blinded && !bpf_dump_raw_ok()) {
1689 info.xlated_prog_insns = 0;
1690 goto done;
1691 }
1692 insns_sanitized = bpf_insn_prepare_dump(prog);
1693 if (!insns_sanitized)
1694 return -ENOMEM;
1620 uinsns = u64_to_user_ptr(info.xlated_prog_insns); 1695 uinsns = u64_to_user_ptr(info.xlated_prog_insns);
1621 ulen = min_t(u32, info.xlated_prog_len, ulen); 1696 ulen = min_t(u32, info.xlated_prog_len, ulen);
1622 if (copy_to_user(uinsns, prog->insnsi, ulen)) 1697 fault = copy_to_user(uinsns, insns_sanitized, ulen);
1698 kfree(insns_sanitized);
1699 if (fault)
1623 return -EFAULT; 1700 return -EFAULT;
1624 } 1701 }
1625 1702