summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorJiong Wang <jiong.wang@netronome.com>2019-05-24 18:25:16 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-05-24 21:58:37 -0400
commitc240eff63a1cf1c4edc768e0cfc374811c02f069 (patch)
treeff7a2b44d31ef19583b4ea657219866c4382887f /kernel/bpf/syscall.c
parenta4b1d3c1ddf6cb441187b6c130a473c16a05a356 (diff)
bpf: introduce new bpf prog load flags "BPF_F_TEST_RND_HI32"
x86_64 and AArch64 perhaps are two arches that running bpf testsuite frequently, however the zero extension insertion pass is not enabled for them because of their hardware support. It is critical to guarantee the pass correction as it is supposed to be enabled at default for a couple of other arches, for example PowerPC, SPARC, arm, NFP etc. Therefore, it would be very useful if there is a way to test this pass on for example x86_64. The test methodology employed by this set is "poisoning" useless bits. High 32-bit of a definition is randomized if it is identified as not used by any later insn. Such randomization is only enabled under testing mode which is gated by the new bpf prog load flags "BPF_F_TEST_RND_HI32". Suggested-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Jiong Wang <jiong.wang@netronome.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c4
1 files changed, 3 insertions, 1 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index cb5440b02e82..3d546b6f4646 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1604,7 +1604,9 @@ static int bpf_prog_load(union bpf_attr *attr, union bpf_attr __user *uattr)
1604 if (CHECK_ATTR(BPF_PROG_LOAD)) 1604 if (CHECK_ATTR(BPF_PROG_LOAD))
1605 return -EINVAL; 1605 return -EINVAL;
1606 1606
1607 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT | BPF_F_ANY_ALIGNMENT)) 1607 if (attr->prog_flags & ~(BPF_F_STRICT_ALIGNMENT |
1608 BPF_F_ANY_ALIGNMENT |
1609 BPF_F_TEST_RND_HI32))
1608 return -EINVAL; 1610 return -EINVAL;
1609 1611
1610 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && 1612 if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) &&