diff options
author | Mauricio Vasquez B <mauricio.vasquez@polito.it> | 2018-10-18 09:16:25 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-10-19 16:24:31 -0400 |
commit | f1a2e44a3aeccb3ff18d3ccc0b0203e70b95bd92 (patch) | |
tree | 454766bd47fa6030b9e60c96da4536413e661fb7 /kernel/bpf/helpers.c | |
parent | 2ea864c58f19bf70a0e2415f9f1c53814e07f1b4 (diff) |
bpf: add queue and stack maps
Queue/stack maps implement a FIFO/LIFO data storage for ebpf programs.
These maps support peek, pop and push operations that are exposed to eBPF
programs through the new bpf_map[peek/pop/push] helpers. Those operations
are exposed to userspace applications through the already existing
syscalls in the following way:
BPF_MAP_LOOKUP_ELEM -> peek
BPF_MAP_LOOKUP_AND_DELETE_ELEM -> pop
BPF_MAP_UPDATE_ELEM -> push
Queue/stack maps are implemented using a buffer, tail and head indexes,
hence BPF_F_NO_PREALLOC is not supported.
As opposite to other maps, queue and stack do not use RCU for protecting
maps values, the bpf_map[peek/pop] have a ARG_PTR_TO_UNINIT_MAP_VALUE
argument that is a pointer to a memory zone where to save the value of a
map. Basically the same as ARG_PTR_TO_UNINIT_MEM, but the size has not
be passed as an extra argument.
Our main motivation for implementing queue/stack maps was to keep track
of a pool of elements, like network ports in a SNAT, however we forsee
other use cases, like for exampling saving last N kernel events in a map
and then analysing from userspace.
Signed-off-by: Mauricio Vasquez B <mauricio.vasquez@polito.it>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/helpers.c')
-rw-r--r-- | kernel/bpf/helpers.c | 43 |
1 files changed, 43 insertions, 0 deletions
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 6502115e8f55..ab0d5e3f9892 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c | |||
@@ -76,6 +76,49 @@ const struct bpf_func_proto bpf_map_delete_elem_proto = { | |||
76 | .arg2_type = ARG_PTR_TO_MAP_KEY, | 76 | .arg2_type = ARG_PTR_TO_MAP_KEY, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | BPF_CALL_3(bpf_map_push_elem, struct bpf_map *, map, void *, value, u64, flags) | ||
80 | { | ||
81 | return map->ops->map_push_elem(map, value, flags); | ||
82 | } | ||
83 | |||
84 | const struct bpf_func_proto bpf_map_push_elem_proto = { | ||
85 | .func = bpf_map_push_elem, | ||
86 | .gpl_only = false, | ||
87 | .pkt_access = true, | ||
88 | .ret_type = RET_INTEGER, | ||
89 | .arg1_type = ARG_CONST_MAP_PTR, | ||
90 | .arg2_type = ARG_PTR_TO_MAP_VALUE, | ||
91 | .arg3_type = ARG_ANYTHING, | ||
92 | }; | ||
93 | |||
94 | BPF_CALL_2(bpf_map_pop_elem, struct bpf_map *, map, void *, value) | ||
95 | { | ||
96 | return map->ops->map_pop_elem(map, value); | ||
97 | } | ||
98 | |||
99 | const struct bpf_func_proto bpf_map_pop_elem_proto = { | ||
100 | .func = bpf_map_pop_elem, | ||
101 | .gpl_only = false, | ||
102 | .pkt_access = true, | ||
103 | .ret_type = RET_INTEGER, | ||
104 | .arg1_type = ARG_CONST_MAP_PTR, | ||
105 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, | ||
106 | }; | ||
107 | |||
108 | BPF_CALL_2(bpf_map_peek_elem, struct bpf_map *, map, void *, value) | ||
109 | { | ||
110 | return map->ops->map_peek_elem(map, value); | ||
111 | } | ||
112 | |||
113 | const struct bpf_func_proto bpf_map_peek_elem_proto = { | ||
114 | .func = bpf_map_pop_elem, | ||
115 | .gpl_only = false, | ||
116 | .pkt_access = true, | ||
117 | .ret_type = RET_INTEGER, | ||
118 | .arg1_type = ARG_CONST_MAP_PTR, | ||
119 | .arg2_type = ARG_PTR_TO_UNINIT_MAP_VALUE, | ||
120 | }; | ||
121 | |||
79 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { | 122 | const struct bpf_func_proto bpf_get_prandom_u32_proto = { |
80 | .func = bpf_user_rnd_u32, | 123 | .func = bpf_user_rnd_u32, |
81 | .gpl_only = false, | 124 | .gpl_only = false, |