diff options
| author | David S. Miller <davem@davemloft.net> | 2019-02-17 01:56:34 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2019-02-17 01:56:34 -0500 |
| commit | 885e63195980ab25abc67336f0c44d4cb4e6e72b (patch) | |
| tree | e16c90fe7e0400646dc73771790b5be47d143012 /kernel | |
| parent | f186a82b10dc229f9cd1e9f27f90cb0ce86e879d (diff) | |
| parent | 5aab392c55c96f9bb26d9294f965f156a87ee81c (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Alexei Starovoitov says:
====================
pull-request: bpf-next 2019-02-16
The following pull-request contains BPF updates for your *net-next* tree.
The main changes are:
1) numerous libbpf API improvements, from Andrii, Andrey, Yonghong.
2) test all bpf progs in alu32 mode, from Jiong.
3) skb->sk access and bpf_sk_fullsock(), bpf_tcp_sock() helpers, from Martin.
4) support for IP encap in lwt bpf progs, from Peter.
5) remove XDP_QUERY_XSK_UMEM dead code, from Jan.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/bpf/offload.c | 10 | ||||
| -rw-r--r-- | kernel/bpf/verifier.c | 168 |
2 files changed, 134 insertions, 44 deletions
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c index 39dba8c90331..ba635209ae9a 100644 --- a/kernel/bpf/offload.c +++ b/kernel/bpf/offload.c | |||
| @@ -35,6 +35,7 @@ static DECLARE_RWSEM(bpf_devs_lock); | |||
| 35 | struct bpf_offload_dev { | 35 | struct bpf_offload_dev { |
| 36 | const struct bpf_prog_offload_ops *ops; | 36 | const struct bpf_prog_offload_ops *ops; |
| 37 | struct list_head netdevs; | 37 | struct list_head netdevs; |
| 38 | void *priv; | ||
| 38 | }; | 39 | }; |
| 39 | 40 | ||
| 40 | struct bpf_offload_netdev { | 41 | struct bpf_offload_netdev { |
| @@ -669,7 +670,7 @@ unlock: | |||
| 669 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); | 670 | EXPORT_SYMBOL_GPL(bpf_offload_dev_netdev_unregister); |
| 670 | 671 | ||
| 671 | struct bpf_offload_dev * | 672 | struct bpf_offload_dev * |
| 672 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops) | 673 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv) |
| 673 | { | 674 | { |
| 674 | struct bpf_offload_dev *offdev; | 675 | struct bpf_offload_dev *offdev; |
| 675 | int err; | 676 | int err; |
| @@ -688,6 +689,7 @@ bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops) | |||
| 688 | return ERR_PTR(-ENOMEM); | 689 | return ERR_PTR(-ENOMEM); |
| 689 | 690 | ||
| 690 | offdev->ops = ops; | 691 | offdev->ops = ops; |
| 692 | offdev->priv = priv; | ||
| 691 | INIT_LIST_HEAD(&offdev->netdevs); | 693 | INIT_LIST_HEAD(&offdev->netdevs); |
| 692 | 694 | ||
| 693 | return offdev; | 695 | return offdev; |
| @@ -700,3 +702,9 @@ void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev) | |||
| 700 | kfree(offdev); | 702 | kfree(offdev); |
| 701 | } | 703 | } |
| 702 | EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); | 704 | EXPORT_SYMBOL_GPL(bpf_offload_dev_destroy); |
| 705 | |||
| 706 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev) | ||
| 707 | { | ||
| 708 | return offdev->priv; | ||
| 709 | } | ||
| 710 | EXPORT_SYMBOL_GPL(bpf_offload_dev_priv); | ||
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index b63bc77af2d1..1b9496c41383 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c | |||
| @@ -331,10 +331,19 @@ static bool type_is_pkt_pointer(enum bpf_reg_type type) | |||
| 331 | type == PTR_TO_PACKET_META; | 331 | type == PTR_TO_PACKET_META; |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | static bool type_is_sk_pointer(enum bpf_reg_type type) | ||
| 335 | { | ||
| 336 | return type == PTR_TO_SOCKET || | ||
| 337 | type == PTR_TO_SOCK_COMMON || | ||
| 338 | type == PTR_TO_TCP_SOCK; | ||
| 339 | } | ||
| 340 | |||
| 334 | static bool reg_type_may_be_null(enum bpf_reg_type type) | 341 | static bool reg_type_may_be_null(enum bpf_reg_type type) |
| 335 | { | 342 | { |
| 336 | return type == PTR_TO_MAP_VALUE_OR_NULL || | 343 | return type == PTR_TO_MAP_VALUE_OR_NULL || |
| 337 | type == PTR_TO_SOCKET_OR_NULL; | 344 | type == PTR_TO_SOCKET_OR_NULL || |
| 345 | type == PTR_TO_SOCK_COMMON_OR_NULL || | ||
| 346 | type == PTR_TO_TCP_SOCK_OR_NULL; | ||
| 338 | } | 347 | } |
| 339 | 348 | ||
| 340 | static bool type_is_refcounted(enum bpf_reg_type type) | 349 | static bool type_is_refcounted(enum bpf_reg_type type) |
| @@ -377,6 +386,12 @@ static bool is_release_function(enum bpf_func_id func_id) | |||
| 377 | return func_id == BPF_FUNC_sk_release; | 386 | return func_id == BPF_FUNC_sk_release; |
| 378 | } | 387 | } |
| 379 | 388 | ||
| 389 | static bool is_acquire_function(enum bpf_func_id func_id) | ||
| 390 | { | ||
| 391 | return func_id == BPF_FUNC_sk_lookup_tcp || | ||
| 392 | func_id == BPF_FUNC_sk_lookup_udp; | ||
| 393 | } | ||
| 394 | |||
| 380 | /* string representation of 'enum bpf_reg_type' */ | 395 | /* string representation of 'enum bpf_reg_type' */ |
| 381 | static const char * const reg_type_str[] = { | 396 | static const char * const reg_type_str[] = { |
| 382 | [NOT_INIT] = "?", | 397 | [NOT_INIT] = "?", |
| @@ -392,6 +407,10 @@ static const char * const reg_type_str[] = { | |||
| 392 | [PTR_TO_FLOW_KEYS] = "flow_keys", | 407 | [PTR_TO_FLOW_KEYS] = "flow_keys", |
| 393 | [PTR_TO_SOCKET] = "sock", | 408 | [PTR_TO_SOCKET] = "sock", |
| 394 | [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", | 409 | [PTR_TO_SOCKET_OR_NULL] = "sock_or_null", |
| 410 | [PTR_TO_SOCK_COMMON] = "sock_common", | ||
| 411 | [PTR_TO_SOCK_COMMON_OR_NULL] = "sock_common_or_null", | ||
| 412 | [PTR_TO_TCP_SOCK] = "tcp_sock", | ||
| 413 | [PTR_TO_TCP_SOCK_OR_NULL] = "tcp_sock_or_null", | ||
| 395 | }; | 414 | }; |
| 396 | 415 | ||
| 397 | static char slot_type_char[] = { | 416 | static char slot_type_char[] = { |
| @@ -618,13 +637,10 @@ static int acquire_reference_state(struct bpf_verifier_env *env, int insn_idx) | |||
| 618 | } | 637 | } |
| 619 | 638 | ||
| 620 | /* release function corresponding to acquire_reference_state(). Idempotent. */ | 639 | /* release function corresponding to acquire_reference_state(). Idempotent. */ |
| 621 | static int __release_reference_state(struct bpf_func_state *state, int ptr_id) | 640 | static int release_reference_state(struct bpf_func_state *state, int ptr_id) |
| 622 | { | 641 | { |
| 623 | int i, last_idx; | 642 | int i, last_idx; |
| 624 | 643 | ||
| 625 | if (!ptr_id) | ||
| 626 | return -EFAULT; | ||
| 627 | |||
| 628 | last_idx = state->acquired_refs - 1; | 644 | last_idx = state->acquired_refs - 1; |
| 629 | for (i = 0; i < state->acquired_refs; i++) { | 645 | for (i = 0; i < state->acquired_refs; i++) { |
| 630 | if (state->refs[i].id == ptr_id) { | 646 | if (state->refs[i].id == ptr_id) { |
| @@ -636,21 +652,7 @@ static int __release_reference_state(struct bpf_func_state *state, int ptr_id) | |||
| 636 | return 0; | 652 | return 0; |
| 637 | } | 653 | } |
| 638 | } | 654 | } |
| 639 | return -EFAULT; | 655 | return -EINVAL; |
| 640 | } | ||
| 641 | |||
| 642 | /* variation on the above for cases where we expect that there must be an | ||
| 643 | * outstanding reference for the specified ptr_id. | ||
| 644 | */ | ||
| 645 | static int release_reference_state(struct bpf_verifier_env *env, int ptr_id) | ||
| 646 | { | ||
| 647 | struct bpf_func_state *state = cur_func(env); | ||
| 648 | int err; | ||
| 649 | |||
| 650 | err = __release_reference_state(state, ptr_id); | ||
| 651 | if (WARN_ON_ONCE(err != 0)) | ||
| 652 | verbose(env, "verifier internal error: can't release reference\n"); | ||
| 653 | return err; | ||
| 654 | } | 656 | } |
| 655 | 657 | ||
| 656 | static int transfer_reference_state(struct bpf_func_state *dst, | 658 | static int transfer_reference_state(struct bpf_func_state *dst, |
| @@ -1209,6 +1211,10 @@ static bool is_spillable_regtype(enum bpf_reg_type type) | |||
| 1209 | case CONST_PTR_TO_MAP: | 1211 | case CONST_PTR_TO_MAP: |
| 1210 | case PTR_TO_SOCKET: | 1212 | case PTR_TO_SOCKET: |
| 1211 | case PTR_TO_SOCKET_OR_NULL: | 1213 | case PTR_TO_SOCKET_OR_NULL: |
| 1214 | case PTR_TO_SOCK_COMMON: | ||
| 1215 | case PTR_TO_SOCK_COMMON_OR_NULL: | ||
| 1216 | case PTR_TO_TCP_SOCK: | ||
| 1217 | case PTR_TO_TCP_SOCK_OR_NULL: | ||
| 1212 | return true; | 1218 | return true; |
| 1213 | default: | 1219 | default: |
| 1214 | return false; | 1220 | return false; |
| @@ -1640,12 +1646,14 @@ static int check_flow_keys_access(struct bpf_verifier_env *env, int off, | |||
| 1640 | return 0; | 1646 | return 0; |
| 1641 | } | 1647 | } |
| 1642 | 1648 | ||
| 1643 | static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | 1649 | static int check_sock_access(struct bpf_verifier_env *env, int insn_idx, |
| 1644 | int size, enum bpf_access_type t) | 1650 | u32 regno, int off, int size, |
| 1651 | enum bpf_access_type t) | ||
| 1645 | { | 1652 | { |
| 1646 | struct bpf_reg_state *regs = cur_regs(env); | 1653 | struct bpf_reg_state *regs = cur_regs(env); |
| 1647 | struct bpf_reg_state *reg = ®s[regno]; | 1654 | struct bpf_reg_state *reg = ®s[regno]; |
| 1648 | struct bpf_insn_access_aux info; | 1655 | struct bpf_insn_access_aux info = {}; |
| 1656 | bool valid; | ||
| 1649 | 1657 | ||
| 1650 | if (reg->smin_value < 0) { | 1658 | if (reg->smin_value < 0) { |
| 1651 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", | 1659 | verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n", |
| @@ -1653,13 +1661,31 @@ static int check_sock_access(struct bpf_verifier_env *env, u32 regno, int off, | |||
| 1653 | return -EACCES; | 1661 | return -EACCES; |
| 1654 | } | 1662 | } |
| 1655 | 1663 | ||
| 1656 | if (!bpf_sock_is_valid_access(off, size, t, &info)) { | 1664 | switch (reg->type) { |
| 1657 | verbose(env, "invalid bpf_sock access off=%d size=%d\n", | 1665 | case PTR_TO_SOCK_COMMON: |
| 1658 | off, size); | 1666 | valid = bpf_sock_common_is_valid_access(off, size, t, &info); |
| 1659 | return -EACCES; | 1667 | break; |
| 1668 | case PTR_TO_SOCKET: | ||
| 1669 | valid = bpf_sock_is_valid_access(off, size, t, &info); | ||
| 1670 | break; | ||
| 1671 | case PTR_TO_TCP_SOCK: | ||
| 1672 | valid = bpf_tcp_sock_is_valid_access(off, size, t, &info); | ||
| 1673 | break; | ||
| 1674 | default: | ||
| 1675 | valid = false; | ||
| 1660 | } | 1676 | } |
| 1661 | 1677 | ||
| 1662 | return 0; | 1678 | |
| 1679 | if (valid) { | ||
| 1680 | env->insn_aux_data[insn_idx].ctx_field_size = | ||
| 1681 | info.ctx_field_size; | ||
| 1682 | return 0; | ||
| 1683 | } | ||
| 1684 | |||
| 1685 | verbose(env, "R%d invalid %s access off=%d size=%d\n", | ||
| 1686 | regno, reg_type_str[reg->type], off, size); | ||
| 1687 | |||
| 1688 | return -EACCES; | ||
| 1663 | } | 1689 | } |
| 1664 | 1690 | ||
| 1665 | static bool __is_pointer_value(bool allow_ptr_leaks, | 1691 | static bool __is_pointer_value(bool allow_ptr_leaks, |
| @@ -1685,8 +1711,14 @@ static bool is_ctx_reg(struct bpf_verifier_env *env, int regno) | |||
| 1685 | { | 1711 | { |
| 1686 | const struct bpf_reg_state *reg = reg_state(env, regno); | 1712 | const struct bpf_reg_state *reg = reg_state(env, regno); |
| 1687 | 1713 | ||
| 1688 | return reg->type == PTR_TO_CTX || | 1714 | return reg->type == PTR_TO_CTX; |
| 1689 | reg->type == PTR_TO_SOCKET; | 1715 | } |
| 1716 | |||
| 1717 | static bool is_sk_reg(struct bpf_verifier_env *env, int regno) | ||
| 1718 | { | ||
| 1719 | const struct bpf_reg_state *reg = reg_state(env, regno); | ||
| 1720 | |||
| 1721 | return type_is_sk_pointer(reg->type); | ||
| 1690 | } | 1722 | } |
| 1691 | 1723 | ||
| 1692 | static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) | 1724 | static bool is_pkt_reg(struct bpf_verifier_env *env, int regno) |
| @@ -1797,6 +1829,12 @@ static int check_ptr_alignment(struct bpf_verifier_env *env, | |||
| 1797 | case PTR_TO_SOCKET: | 1829 | case PTR_TO_SOCKET: |
| 1798 | pointer_desc = "sock "; | 1830 | pointer_desc = "sock "; |
| 1799 | break; | 1831 | break; |
| 1832 | case PTR_TO_SOCK_COMMON: | ||
| 1833 | pointer_desc = "sock_common "; | ||
| 1834 | break; | ||
| 1835 | case PTR_TO_TCP_SOCK: | ||
| 1836 | pointer_desc = "tcp_sock "; | ||
| 1837 | break; | ||
| 1800 | default: | 1838 | default: |
| 1801 | break; | 1839 | break; |
| 1802 | } | 1840 | } |
| @@ -2000,11 +2038,14 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
| 2000 | * PTR_TO_PACKET[_META,_END]. In the latter | 2038 | * PTR_TO_PACKET[_META,_END]. In the latter |
| 2001 | * case, we know the offset is zero. | 2039 | * case, we know the offset is zero. |
| 2002 | */ | 2040 | */ |
| 2003 | if (reg_type == SCALAR_VALUE) | 2041 | if (reg_type == SCALAR_VALUE) { |
| 2004 | mark_reg_unknown(env, regs, value_regno); | 2042 | mark_reg_unknown(env, regs, value_regno); |
| 2005 | else | 2043 | } else { |
| 2006 | mark_reg_known_zero(env, regs, | 2044 | mark_reg_known_zero(env, regs, |
| 2007 | value_regno); | 2045 | value_regno); |
| 2046 | if (reg_type_may_be_null(reg_type)) | ||
| 2047 | regs[value_regno].id = ++env->id_gen; | ||
| 2048 | } | ||
| 2008 | regs[value_regno].type = reg_type; | 2049 | regs[value_regno].type = reg_type; |
| 2009 | } | 2050 | } |
| 2010 | 2051 | ||
| @@ -2050,12 +2091,13 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn | |||
| 2050 | err = check_flow_keys_access(env, off, size); | 2091 | err = check_flow_keys_access(env, off, size); |
| 2051 | if (!err && t == BPF_READ && value_regno >= 0) | 2092 | if (!err && t == BPF_READ && value_regno >= 0) |
| 2052 | mark_reg_unknown(env, regs, value_regno); | 2093 | mark_reg_unknown(env, regs, value_regno); |
| 2053 | } else if (reg->type == PTR_TO_SOCKET) { | 2094 | } else if (type_is_sk_pointer(reg->type)) { |
| 2054 | if (t == BPF_WRITE) { | 2095 | if (t == BPF_WRITE) { |
| 2055 | verbose(env, "cannot write into socket\n"); | 2096 | verbose(env, "R%d cannot write into %s\n", |
| 2097 | regno, reg_type_str[reg->type]); | ||
| 2056 | return -EACCES; | 2098 | return -EACCES; |
| 2057 | } | 2099 | } |
| 2058 | err = check_sock_access(env, regno, off, size, t); | 2100 | err = check_sock_access(env, insn_idx, regno, off, size, t); |
| 2059 | if (!err && value_regno >= 0) | 2101 | if (!err && value_regno >= 0) |
| 2060 | mark_reg_unknown(env, regs, value_regno); | 2102 | mark_reg_unknown(env, regs, value_regno); |
| 2061 | } else { | 2103 | } else { |
| @@ -2099,7 +2141,8 @@ static int check_xadd(struct bpf_verifier_env *env, int insn_idx, struct bpf_ins | |||
| 2099 | 2141 | ||
| 2100 | if (is_ctx_reg(env, insn->dst_reg) || | 2142 | if (is_ctx_reg(env, insn->dst_reg) || |
| 2101 | is_pkt_reg(env, insn->dst_reg) || | 2143 | is_pkt_reg(env, insn->dst_reg) || |
| 2102 | is_flow_key_reg(env, insn->dst_reg)) { | 2144 | is_flow_key_reg(env, insn->dst_reg) || |
| 2145 | is_sk_reg(env, insn->dst_reg)) { | ||
| 2103 | verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", | 2146 | verbose(env, "BPF_XADD stores into R%d %s is not allowed\n", |
| 2104 | insn->dst_reg, | 2147 | insn->dst_reg, |
| 2105 | reg_type_str[reg_state(env, insn->dst_reg)->type]); | 2148 | reg_type_str[reg_state(env, insn->dst_reg)->type]); |
| @@ -2366,6 +2409,11 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 regno, | |||
| 2366 | err = check_ctx_reg(env, reg, regno); | 2409 | err = check_ctx_reg(env, reg, regno); |
| 2367 | if (err < 0) | 2410 | if (err < 0) |
| 2368 | return err; | 2411 | return err; |
| 2412 | } else if (arg_type == ARG_PTR_TO_SOCK_COMMON) { | ||
| 2413 | expected_type = PTR_TO_SOCK_COMMON; | ||
| 2414 | /* Any sk pointer can be ARG_PTR_TO_SOCK_COMMON */ | ||
| 2415 | if (!type_is_sk_pointer(type)) | ||
| 2416 | goto err_type; | ||
| 2369 | } else if (arg_type == ARG_PTR_TO_SOCKET) { | 2417 | } else if (arg_type == ARG_PTR_TO_SOCKET) { |
| 2370 | expected_type = PTR_TO_SOCKET; | 2418 | expected_type = PTR_TO_SOCKET; |
| 2371 | if (type != expected_type) | 2419 | if (type != expected_type) |
| @@ -2780,7 +2828,7 @@ static int release_reference(struct bpf_verifier_env *env, | |||
| 2780 | for (i = 0; i <= vstate->curframe; i++) | 2828 | for (i = 0; i <= vstate->curframe; i++) |
| 2781 | release_reg_references(env, vstate->frame[i], meta->ptr_id); | 2829 | release_reg_references(env, vstate->frame[i], meta->ptr_id); |
| 2782 | 2830 | ||
| 2783 | return release_reference_state(env, meta->ptr_id); | 2831 | return release_reference_state(cur_func(env), meta->ptr_id); |
| 2784 | } | 2832 | } |
| 2785 | 2833 | ||
| 2786 | static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, | 2834 | static int check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, |
| @@ -3046,8 +3094,11 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3046 | } | 3094 | } |
| 3047 | } else if (is_release_function(func_id)) { | 3095 | } else if (is_release_function(func_id)) { |
| 3048 | err = release_reference(env, &meta); | 3096 | err = release_reference(env, &meta); |
| 3049 | if (err) | 3097 | if (err) { |
| 3098 | verbose(env, "func %s#%d reference has not been acquired before\n", | ||
| 3099 | func_id_name(func_id), func_id); | ||
| 3050 | return err; | 3100 | return err; |
| 3101 | } | ||
| 3051 | } | 3102 | } |
| 3052 | 3103 | ||
| 3053 | regs = cur_regs(env); | 3104 | regs = cur_regs(env); |
| @@ -3096,12 +3147,23 @@ static int check_helper_call(struct bpf_verifier_env *env, int func_id, int insn | |||
| 3096 | regs[BPF_REG_0].id = ++env->id_gen; | 3147 | regs[BPF_REG_0].id = ++env->id_gen; |
| 3097 | } | 3148 | } |
| 3098 | } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { | 3149 | } else if (fn->ret_type == RET_PTR_TO_SOCKET_OR_NULL) { |
| 3099 | int id = acquire_reference_state(env, insn_idx); | ||
| 3100 | if (id < 0) | ||
| 3101 | return id; | ||
| 3102 | mark_reg_known_zero(env, regs, BPF_REG_0); | 3150 | mark_reg_known_zero(env, regs, BPF_REG_0); |
| 3103 | regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; | 3151 | regs[BPF_REG_0].type = PTR_TO_SOCKET_OR_NULL; |
| 3104 | regs[BPF_REG_0].id = id; | 3152 | if (is_acquire_function(func_id)) { |
| 3153 | int id = acquire_reference_state(env, insn_idx); | ||
| 3154 | |||
| 3155 | if (id < 0) | ||
| 3156 | return id; | ||
| 3157 | /* For release_reference() */ | ||
| 3158 | regs[BPF_REG_0].id = id; | ||
| 3159 | } else { | ||
| 3160 | /* For mark_ptr_or_null_reg() */ | ||
| 3161 | regs[BPF_REG_0].id = ++env->id_gen; | ||
| 3162 | } | ||
| 3163 | } else if (fn->ret_type == RET_PTR_TO_TCP_SOCK_OR_NULL) { | ||
| 3164 | mark_reg_known_zero(env, regs, BPF_REG_0); | ||
| 3165 | regs[BPF_REG_0].type = PTR_TO_TCP_SOCK_OR_NULL; | ||
| 3166 | regs[BPF_REG_0].id = ++env->id_gen; | ||
| 3105 | } else { | 3167 | } else { |
| 3106 | verbose(env, "unknown return type %d of func %s#%d\n", | 3168 | verbose(env, "unknown return type %d of func %s#%d\n", |
| 3107 | fn->ret_type, func_id_name(func_id), func_id); | 3169 | fn->ret_type, func_id_name(func_id), func_id); |
| @@ -3361,6 +3423,10 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, | |||
| 3361 | case PTR_TO_PACKET_END: | 3423 | case PTR_TO_PACKET_END: |
| 3362 | case PTR_TO_SOCKET: | 3424 | case PTR_TO_SOCKET: |
| 3363 | case PTR_TO_SOCKET_OR_NULL: | 3425 | case PTR_TO_SOCKET_OR_NULL: |
| 3426 | case PTR_TO_SOCK_COMMON: | ||
| 3427 | case PTR_TO_SOCK_COMMON_OR_NULL: | ||
| 3428 | case PTR_TO_TCP_SOCK: | ||
| 3429 | case PTR_TO_TCP_SOCK_OR_NULL: | ||
| 3364 | verbose(env, "R%d pointer arithmetic on %s prohibited\n", | 3430 | verbose(env, "R%d pointer arithmetic on %s prohibited\n", |
| 3365 | dst, reg_type_str[ptr_reg->type]); | 3431 | dst, reg_type_str[ptr_reg->type]); |
| 3366 | return -EACCES; | 3432 | return -EACCES; |
| @@ -4594,6 +4660,10 @@ static void mark_ptr_or_null_reg(struct bpf_func_state *state, | |||
| 4594 | } | 4660 | } |
| 4595 | } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { | 4661 | } else if (reg->type == PTR_TO_SOCKET_OR_NULL) { |
| 4596 | reg->type = PTR_TO_SOCKET; | 4662 | reg->type = PTR_TO_SOCKET; |
| 4663 | } else if (reg->type == PTR_TO_SOCK_COMMON_OR_NULL) { | ||
| 4664 | reg->type = PTR_TO_SOCK_COMMON; | ||
| 4665 | } else if (reg->type == PTR_TO_TCP_SOCK_OR_NULL) { | ||
| 4666 | reg->type = PTR_TO_TCP_SOCK; | ||
| 4597 | } | 4667 | } |
| 4598 | if (is_null || !(reg_is_refcounted(reg) || | 4668 | if (is_null || !(reg_is_refcounted(reg) || |
| 4599 | reg_may_point_to_spin_lock(reg))) { | 4669 | reg_may_point_to_spin_lock(reg))) { |
| @@ -4618,7 +4688,7 @@ static void mark_ptr_or_null_regs(struct bpf_verifier_state *vstate, u32 regno, | |||
| 4618 | int i, j; | 4688 | int i, j; |
| 4619 | 4689 | ||
| 4620 | if (reg_is_refcounted_or_null(®s[regno]) && is_null) | 4690 | if (reg_is_refcounted_or_null(®s[regno]) && is_null) |
| 4621 | __release_reference_state(state, id); | 4691 | release_reference_state(state, id); |
| 4622 | 4692 | ||
| 4623 | for (i = 0; i < MAX_BPF_REG; i++) | 4693 | for (i = 0; i < MAX_BPF_REG; i++) |
| 4624 | mark_ptr_or_null_reg(state, ®s[i], id, is_null); | 4694 | mark_ptr_or_null_reg(state, ®s[i], id, is_null); |
| @@ -5787,6 +5857,10 @@ static bool regsafe(struct bpf_reg_state *rold, struct bpf_reg_state *rcur, | |||
| 5787 | case PTR_TO_FLOW_KEYS: | 5857 | case PTR_TO_FLOW_KEYS: |
| 5788 | case PTR_TO_SOCKET: | 5858 | case PTR_TO_SOCKET: |
| 5789 | case PTR_TO_SOCKET_OR_NULL: | 5859 | case PTR_TO_SOCKET_OR_NULL: |
| 5860 | case PTR_TO_SOCK_COMMON: | ||
| 5861 | case PTR_TO_SOCK_COMMON_OR_NULL: | ||
| 5862 | case PTR_TO_TCP_SOCK: | ||
| 5863 | case PTR_TO_TCP_SOCK_OR_NULL: | ||
| 5790 | /* Only valid matches are exact, which memcmp() above | 5864 | /* Only valid matches are exact, which memcmp() above |
| 5791 | * would have accepted | 5865 | * would have accepted |
| 5792 | */ | 5866 | */ |
| @@ -6107,6 +6181,10 @@ static bool reg_type_mismatch_ok(enum bpf_reg_type type) | |||
| 6107 | case PTR_TO_CTX: | 6181 | case PTR_TO_CTX: |
| 6108 | case PTR_TO_SOCKET: | 6182 | case PTR_TO_SOCKET: |
| 6109 | case PTR_TO_SOCKET_OR_NULL: | 6183 | case PTR_TO_SOCKET_OR_NULL: |
| 6184 | case PTR_TO_SOCK_COMMON: | ||
| 6185 | case PTR_TO_SOCK_COMMON_OR_NULL: | ||
| 6186 | case PTR_TO_TCP_SOCK: | ||
| 6187 | case PTR_TO_TCP_SOCK_OR_NULL: | ||
| 6110 | return false; | 6188 | return false; |
| 6111 | default: | 6189 | default: |
| 6112 | return true; | 6190 | return true; |
| @@ -7109,8 +7187,12 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env) | |||
| 7109 | convert_ctx_access = ops->convert_ctx_access; | 7187 | convert_ctx_access = ops->convert_ctx_access; |
| 7110 | break; | 7188 | break; |
| 7111 | case PTR_TO_SOCKET: | 7189 | case PTR_TO_SOCKET: |
| 7190 | case PTR_TO_SOCK_COMMON: | ||
| 7112 | convert_ctx_access = bpf_sock_convert_ctx_access; | 7191 | convert_ctx_access = bpf_sock_convert_ctx_access; |
| 7113 | break; | 7192 | break; |
| 7193 | case PTR_TO_TCP_SOCK: | ||
| 7194 | convert_ctx_access = bpf_tcp_sock_convert_ctx_access; | ||
| 7195 | break; | ||
| 7114 | default: | 7196 | default: |
| 7115 | continue; | 7197 | continue; |
| 7116 | } | 7198 | } |
