diff options
author | David S. Miller <davem@davemloft.net> | 2018-03-23 11:24:57 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-23 11:31:58 -0400 |
commit | 03fe2debbb2771fb90881e4ce8109b09cf772a5c (patch) | |
tree | fbaf8738296b2e9dcba81c6daef2d515b6c4948c /kernel/trace/bpf_trace.c | |
parent | 6686c459e1449a3ee5f3fd313b0a559ace7a700e (diff) | |
parent | f36b7534b83357cf52e747905de6d65b4f7c2512 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Fun set of conflict resolutions here...
For the mac80211 stuff, these were fortunately just parallel
adds. Trivially resolved.
In drivers/net/phy/phy.c we had a bug fix in 'net' that moved the
function phy_disable_interrupts() earlier in the file, whilst in
'net-next' the phy_error() call from this function was removed.
In net/ipv4/xfrm4_policy.c, David Ahern's changes to remove the
'rt_table_id' member of rtable collided with a bug fix in 'net' that
added a new struct member "rt_mtu_locked" which needs to be copied
over here.
The mlxsw driver conflict consisted of net-next separating
the span code and definitions into separate files, whilst
a 'net' bug fix made some changes to that moved code.
The mlx5 infiniband conflict resolution was quite non-trivial,
the RDMA tree's merge commit was used as a guide here, and
here are their notes:
====================
Due to bug fixes found by the syzkaller bot and taken into the for-rc
branch after development for the 4.17 merge window had already started
being taken into the for-next branch, there were fairly non-trivial
merge issues that would need to be resolved between the for-rc branch
and the for-next branch. This merge resolves those conflicts and
provides a unified base upon which ongoing development for 4.17 can
be based.
Conflicts:
drivers/infiniband/hw/mlx5/main.c - Commit 42cea83f9524
(IB/mlx5: Fix cleanup order on unload) added to for-rc and
commit b5ca15ad7e61 (IB/mlx5: Add proper representors support)
add as part of the devel cycle both needed to modify the
init/de-init functions used by mlx5. To support the new
representors, the new functions added by the cleanup patch
needed to be made non-static, and the init/de-init list
added by the representors patch needed to be modified to
match the init/de-init list changes made by the cleanup
patch.
Updates:
drivers/infiniband/hw/mlx5/mlx5_ib.h - Update function
prototypes added by representors patch to reflect new function
names as changed by cleanup patch
drivers/infiniband/hw/mlx5/ib_rep.c - Update init/de-init
stage list to match new order from cleanup patch
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/trace/bpf_trace.c')
-rw-r--r-- | kernel/trace/bpf_trace.c | 68 |
1 files changed, 40 insertions, 28 deletions
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index c634e093951f..7f9691c86b6e 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c | |||
@@ -661,7 +661,41 @@ static const struct bpf_func_proto bpf_get_stackid_proto_tp = { | |||
661 | .arg3_type = ARG_ANYTHING, | 661 | .arg3_type = ARG_ANYTHING, |
662 | }; | 662 | }; |
663 | 663 | ||
664 | BPF_CALL_3(bpf_perf_prog_read_value_tp, struct bpf_perf_event_data_kern *, ctx, | 664 | static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) |
665 | { | ||
666 | switch (func_id) { | ||
667 | case BPF_FUNC_perf_event_output: | ||
668 | return &bpf_perf_event_output_proto_tp; | ||
669 | case BPF_FUNC_get_stackid: | ||
670 | return &bpf_get_stackid_proto_tp; | ||
671 | default: | ||
672 | return tracing_func_proto(func_id); | ||
673 | } | ||
674 | } | ||
675 | |||
676 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | ||
677 | struct bpf_insn_access_aux *info) | ||
678 | { | ||
679 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | ||
680 | return false; | ||
681 | if (type != BPF_READ) | ||
682 | return false; | ||
683 | if (off % size != 0) | ||
684 | return false; | ||
685 | |||
686 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | ||
687 | return true; | ||
688 | } | ||
689 | |||
690 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | ||
691 | .get_func_proto = tp_prog_func_proto, | ||
692 | .is_valid_access = tp_prog_is_valid_access, | ||
693 | }; | ||
694 | |||
695 | const struct bpf_prog_ops tracepoint_prog_ops = { | ||
696 | }; | ||
697 | |||
698 | BPF_CALL_3(bpf_perf_prog_read_value, struct bpf_perf_event_data_kern *, ctx, | ||
665 | struct bpf_perf_event_value *, buf, u32, size) | 699 | struct bpf_perf_event_value *, buf, u32, size) |
666 | { | 700 | { |
667 | int err = -EINVAL; | 701 | int err = -EINVAL; |
@@ -678,8 +712,8 @@ clear: | |||
678 | return err; | 712 | return err; |
679 | } | 713 | } |
680 | 714 | ||
681 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { | 715 | static const struct bpf_func_proto bpf_perf_prog_read_value_proto = { |
682 | .func = bpf_perf_prog_read_value_tp, | 716 | .func = bpf_perf_prog_read_value, |
683 | .gpl_only = true, | 717 | .gpl_only = true, |
684 | .ret_type = RET_INTEGER, | 718 | .ret_type = RET_INTEGER, |
685 | .arg1_type = ARG_PTR_TO_CTX, | 719 | .arg1_type = ARG_PTR_TO_CTX, |
@@ -687,7 +721,7 @@ static const struct bpf_func_proto bpf_perf_prog_read_value_proto_tp = { | |||
687 | .arg3_type = ARG_CONST_SIZE, | 721 | .arg3_type = ARG_CONST_SIZE, |
688 | }; | 722 | }; |
689 | 723 | ||
690 | static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) | 724 | static const struct bpf_func_proto *pe_prog_func_proto(enum bpf_func_id func_id) |
691 | { | 725 | { |
692 | switch (func_id) { | 726 | switch (func_id) { |
693 | case BPF_FUNC_perf_event_output: | 727 | case BPF_FUNC_perf_event_output: |
@@ -695,34 +729,12 @@ static const struct bpf_func_proto *tp_prog_func_proto(enum bpf_func_id func_id) | |||
695 | case BPF_FUNC_get_stackid: | 729 | case BPF_FUNC_get_stackid: |
696 | return &bpf_get_stackid_proto_tp; | 730 | return &bpf_get_stackid_proto_tp; |
697 | case BPF_FUNC_perf_prog_read_value: | 731 | case BPF_FUNC_perf_prog_read_value: |
698 | return &bpf_perf_prog_read_value_proto_tp; | 732 | return &bpf_perf_prog_read_value_proto; |
699 | default: | 733 | default: |
700 | return tracing_func_proto(func_id); | 734 | return tracing_func_proto(func_id); |
701 | } | 735 | } |
702 | } | 736 | } |
703 | 737 | ||
704 | static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type, | ||
705 | struct bpf_insn_access_aux *info) | ||
706 | { | ||
707 | if (off < sizeof(void *) || off >= PERF_MAX_TRACE_SIZE) | ||
708 | return false; | ||
709 | if (type != BPF_READ) | ||
710 | return false; | ||
711 | if (off % size != 0) | ||
712 | return false; | ||
713 | |||
714 | BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64)); | ||
715 | return true; | ||
716 | } | ||
717 | |||
718 | const struct bpf_verifier_ops tracepoint_verifier_ops = { | ||
719 | .get_func_proto = tp_prog_func_proto, | ||
720 | .is_valid_access = tp_prog_is_valid_access, | ||
721 | }; | ||
722 | |||
723 | const struct bpf_prog_ops tracepoint_prog_ops = { | ||
724 | }; | ||
725 | |||
726 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, | 738 | static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type, |
727 | struct bpf_insn_access_aux *info) | 739 | struct bpf_insn_access_aux *info) |
728 | { | 740 | { |
@@ -791,7 +803,7 @@ static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, | |||
791 | } | 803 | } |
792 | 804 | ||
793 | const struct bpf_verifier_ops perf_event_verifier_ops = { | 805 | const struct bpf_verifier_ops perf_event_verifier_ops = { |
794 | .get_func_proto = tp_prog_func_proto, | 806 | .get_func_proto = pe_prog_func_proto, |
795 | .is_valid_access = pe_prog_is_valid_access, | 807 | .is_valid_access = pe_prog_is_valid_access, |
796 | .convert_ctx_access = pe_prog_convert_ctx_access, | 808 | .convert_ctx_access = pe_prog_convert_ctx_access, |
797 | }; | 809 | }; |