summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorDaniel Borkmann <daniel@iogearbox.net>2015-10-07 19:20:39 -0400
committerDavid S. Miller <davem@davemloft.net>2015-10-08 08:26:39 -0400
commit3ad0040573b0c00f88488bc31958acd07a55ee2e (patch)
tree3afa13e8acbdf49a9f8c1a7993065c3836997a01 /kernel/bpf
parent897ece56e714a2cc64e6914cb89a362d7021b36e (diff)
bpf: split state from prandom_u32() and consolidate {c, e}BPF prngs
While recently arguing on a seccomp discussion that raw prandom_u32() access shouldn't be exposed to unpriviledged user space, I forgot the fact that SKF_AD_RANDOM extension actually already does it for some time in cBPF via commit 4cd3675ebf74 ("filter: added BPF random opcode"). Since prandom_u32() is being used in a lot of critical networking code, lets be more conservative and split their states. Furthermore, consolidate eBPF and cBPF prandom handlers to use the new internal PRNG. For eBPF, bpf_get_prandom_u32() was only accessible for priviledged users, but should that change one day, we also don't want to leak raw sequences through things like eBPF maps. One thought was also to have own per bpf_prog states, but due to ABI reasons this is not easily possible, i.e. the program code currently cannot access bpf_prog itself, and copying the rnd_state to/from the stack scratch space whenever a program uses the prng seems not really worth the trouble and seems too hacky. If needed, taus113 could in such cases be implemented within eBPF using a map entry to keep the state space, or get_random_bytes() could become a second helper in cases where performance would not be critical. Both sides can trigger a one-time late init via prandom_init_once() on the shared state. Performance-wise, there should even be a tiny gain as bpf_user_rnd_u32() saves one function call. The PRNG needs to live inside the BPF core since kernels could have a NET-less config as well. Signed-off-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Hannes Frederic Sowa <hannes@stressinduktion.org> Acked-by: Alexei Starovoitov <ast@plumgrid.com> Cc: Chema Gonzalez <chema@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/core.c26
-rw-r--r--kernel/bpf/helpers.c7
-rw-r--r--kernel/bpf/syscall.c2
3 files changed, 29 insertions, 6 deletions
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index c8855c2a7a48..80864712d2c4 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -731,6 +731,32 @@ void bpf_prog_free(struct bpf_prog *fp)
731} 731}
732EXPORT_SYMBOL_GPL(bpf_prog_free); 732EXPORT_SYMBOL_GPL(bpf_prog_free);
733 733
734/* RNG for unpriviledged user space with separated state from prandom_u32(). */
735static DEFINE_PER_CPU(struct rnd_state, bpf_user_rnd_state);
736
737void bpf_user_rnd_init_once(void)
738{
739 prandom_init_once(&bpf_user_rnd_state);
740}
741
742u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
743{
744 /* Should someone ever have the rather unwise idea to use some
745 * of the registers passed into this function, then note that
746 * this function is called from native eBPF and classic-to-eBPF
747 * transformations. Register assignments from both sides are
748 * different, f.e. classic always sets fn(ctx, A, X) here.
749 */
750 struct rnd_state *state;
751 u32 res;
752
753 state = &get_cpu_var(bpf_user_rnd_state);
754 res = prandom_u32_state(state);
755 put_cpu_var(state);
756
757 return res;
758}
759
734/* Weak definitions of helper functions in case we don't have bpf syscall. */ 760/* Weak definitions of helper functions in case we don't have bpf syscall. */
735const struct bpf_func_proto bpf_map_lookup_elem_proto __weak; 761const struct bpf_func_proto bpf_map_lookup_elem_proto __weak;
736const struct bpf_func_proto bpf_map_update_elem_proto __weak; 762const struct bpf_func_proto bpf_map_update_elem_proto __weak;
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1447ec09421e..4504ca66118d 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -93,13 +93,8 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = {
93 .arg2_type = ARG_PTR_TO_MAP_KEY, 93 .arg2_type = ARG_PTR_TO_MAP_KEY,
94}; 94};
95 95
96static u64 bpf_get_prandom_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
97{
98 return prandom_u32();
99}
100
101const struct bpf_func_proto bpf_get_prandom_u32_proto = { 96const struct bpf_func_proto bpf_get_prandom_u32_proto = {
102 .func = bpf_get_prandom_u32, 97 .func = bpf_user_rnd_u32,
103 .gpl_only = false, 98 .gpl_only = false,
104 .ret_type = RET_INTEGER, 99 .ret_type = RET_INTEGER,
105}; 100};
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 5f35f420c12f..c868cafbc00c 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -404,6 +404,8 @@ static void fixup_bpf_calls(struct bpf_prog *prog)
404 404
405 if (insn->imm == BPF_FUNC_get_route_realm) 405 if (insn->imm == BPF_FUNC_get_route_realm)
406 prog->dst_needed = 1; 406 prog->dst_needed = 1;
407 if (insn->imm == BPF_FUNC_get_prandom_u32)
408 bpf_user_rnd_init_once();
407 if (insn->imm == BPF_FUNC_tail_call) { 409 if (insn->imm == BPF_FUNC_tail_call) {
408 /* mark bpf_tail_call as different opcode 410 /* mark bpf_tail_call as different opcode
409 * to avoid conditional branch in 411 * to avoid conditional branch in