aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
committerDavid S. Miller <davem@davemloft.net>2017-12-27 20:40:32 -0500
commitfcffe2edbd390cad499b27d20512ef000d7ecf54 (patch)
tree13120e1efcf0ad226785b721f4b38272ffdd2028 /include
parent4f83435ad777358d9cdc138868feebbe2a23f577 (diff)
parent624588d9d6cc0a1a270a65fb4d5220f1ceddcf38 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/bpf/bpf-next
Daniel Borkmann says: ==================== pull-request: bpf-next 2017-12-28 The following pull-request contains BPF updates for your *net-next* tree. The main changes are: 1) Fix incorrect state pruning related to recognition of zero initialized stack slots, where stacksafe exploration would mistakenly return a positive pruning verdict too early ignoring other slots, from Gianluca. 2) Various BPF to BPF calls related follow-up fixes. Fix an off-by-one in maximum call depth check, and rework maximum stack depth tracking logic to fix a bypass of the total stack size check reported by Jann. Also fix a bug in arm64 JIT where prog->jited_len was uninitialized. Addition of various test cases to BPF selftests, from Alexei. 3) Addition of a BPF selftest to test_verifier that is related to BPF to BPF calls which demonstrates a late caller stack size increase and thus out of bounds access. Fixed above in 2). Test case from Jann. 4) Addition of correlating BPF helper calls, BPF to BPF calls as well as BPF maps to bpftool xlated dump in order to allow for better BPF program introspection and debugging, from Daniel. 5) Fixing several bugs in BPF to BPF calls kallsyms handling in order to get it actually to work for subprogs, from Daniel. 6) Extending sparc64 JIT support for BPF to BPF calls and fix a couple of build errors for libbpf on sparc64, from David. 7) Allow narrower context access for BPF dev cgroup typed programs in order to adapt to LLVM code generation. Also adjust memlock rlimit in the test_dev_cgroup BPF selftest, from Yonghong. 8) Add netdevsim Kconfig entry to BPF selftests since test_offload.py relies on netdevsim device being available, from Jakub. 9) Reduce scope of xdp_do_generic_redirect_map() to being static, from Xiongwei. 10) Minor cleanups and spelling fixes in BPF verifier, from Colin. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/bpf_verifier.h1
-rw-r--r--include/linux/filter.h9
-rw-r--r--include/uapi/linux/bpf.h3
3 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index c009e472f647..883a35d50cd5 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -194,6 +194,7 @@ struct bpf_verifier_env {
194 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ 194 struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
195 struct bpf_verifer_log log; 195 struct bpf_verifer_log log;
196 u32 subprog_starts[BPF_MAX_SUBPROGS]; 196 u32 subprog_starts[BPF_MAX_SUBPROGS];
197 /* computes the stack depth of each bpf function */
197 u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; 198 u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1];
198 u32 subprog_cnt; 199 u32 subprog_cnt;
199}; 200};
diff --git a/include/linux/filter.h b/include/linux/filter.h
index e872b4ebaa57..2b0df2703671 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -18,6 +18,7 @@
18#include <linux/capability.h> 18#include <linux/capability.h>
19#include <linux/cryptohash.h> 19#include <linux/cryptohash.h>
20#include <linux/set_memory.h> 20#include <linux/set_memory.h>
21#include <linux/kallsyms.h>
21 22
22#include <net/sch_generic.h> 23#include <net/sch_generic.h>
23 24
@@ -724,6 +725,14 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog);
724void bpf_jit_compile(struct bpf_prog *prog); 725void bpf_jit_compile(struct bpf_prog *prog);
725bool bpf_helper_changes_pkt_data(void *func); 726bool bpf_helper_changes_pkt_data(void *func);
726 727
728static inline bool bpf_dump_raw_ok(void)
729{
730 /* Reconstruction of call-sites is dependent on kallsyms,
731 * thus make dump the same restriction.
732 */
733 return kallsyms_show_value() == 1;
734}
735
727struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, 736struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
728 const struct bpf_insn *patch, u32 len); 737 const struct bpf_insn *patch, u32 len);
729 738
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index d01f1cb3cfc0..69eabfcb9bdb 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1012,7 +1012,8 @@ struct bpf_perf_event_value {
1012#define BPF_DEVCG_DEV_CHAR (1ULL << 1) 1012#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
1013 1013
1014struct bpf_cgroup_dev_ctx { 1014struct bpf_cgroup_dev_ctx {
1015 __u32 access_type; /* (access << 16) | type */ 1015 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1016 __u32 access_type;
1016 __u32 major; 1017 __u32 major;
1017 __u32 minor; 1018 __u32 minor;
1018}; 1019};