diff options
author | Mauricio Vasquez B <mauricio.vasquez@polito.it> | 2018-10-18 09:16:25 -0400 |
---|---|---|
committer | Alexei Starovoitov <ast@kernel.org> | 2018-10-19 16:24:31 -0400 |
commit | f1a2e44a3aeccb3ff18d3ccc0b0203e70b95bd92 (patch) | |
tree | 454766bd47fa6030b9e60c96da4536413e661fb7 /kernel/bpf/queue_stack_maps.c | |
parent | 2ea864c58f19bf70a0e2415f9f1c53814e07f1b4 (diff) |
bpf: add queue and stack maps
Queue/stack maps implement a FIFO/LIFO data storage for ebpf programs.
These maps support peek, pop and push operations that are exposed to eBPF
programs through the new bpf_map[peek/pop/push] helpers. Those operations
are exposed to userspace applications through the already existing
syscalls in the following way:
BPF_MAP_LOOKUP_ELEM -> peek
BPF_MAP_LOOKUP_AND_DELETE_ELEM -> pop
BPF_MAP_UPDATE_ELEM -> push
Queue/stack maps are implemented using a buffer, tail and head indexes,
hence BPF_F_NO_PREALLOC is not supported.
As opposite to other maps, queue and stack do not use RCU for protecting
maps values, the bpf_map[peek/pop] have a ARG_PTR_TO_UNINIT_MAP_VALUE
argument that is a pointer to a memory zone where to save the value of a
map. Basically the same as ARG_PTR_TO_UNINIT_MEM, but the size has not
be passed as an extra argument.
Our main motivation for implementing queue/stack maps was to keep track
of a pool of elements, like network ports in a SNAT, however we forsee
other use cases, like for exampling saving last N kernel events in a map
and then analysing from userspace.
Signed-off-by: Mauricio Vasquez B <mauricio.vasquez@polito.it>
Acked-by: Song Liu <songliubraving@fb.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/queue_stack_maps.c')
-rw-r--r-- | kernel/bpf/queue_stack_maps.c | 288 |
1 files changed, 288 insertions, 0 deletions
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c new file mode 100644 index 000000000000..12a93fb37449 --- /dev/null +++ b/kernel/bpf/queue_stack_maps.c | |||
@@ -0,0 +1,288 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * queue_stack_maps.c: BPF queue and stack maps | ||
4 | * | ||
5 | * Copyright (c) 2018 Politecnico di Torino | ||
6 | */ | ||
7 | #include <linux/bpf.h> | ||
8 | #include <linux/list.h> | ||
9 | #include <linux/slab.h> | ||
10 | #include "percpu_freelist.h" | ||
11 | |||
12 | #define QUEUE_STACK_CREATE_FLAG_MASK \ | ||
13 | (BPF_F_NUMA_NODE | BPF_F_RDONLY | BPF_F_WRONLY) | ||
14 | |||
15 | |||
16 | struct bpf_queue_stack { | ||
17 | struct bpf_map map; | ||
18 | raw_spinlock_t lock; | ||
19 | u32 head, tail; | ||
20 | u32 size; /* max_entries + 1 */ | ||
21 | |||
22 | char elements[0] __aligned(8); | ||
23 | }; | ||
24 | |||
25 | static struct bpf_queue_stack *bpf_queue_stack(struct bpf_map *map) | ||
26 | { | ||
27 | return container_of(map, struct bpf_queue_stack, map); | ||
28 | } | ||
29 | |||
30 | static bool queue_stack_map_is_empty(struct bpf_queue_stack *qs) | ||
31 | { | ||
32 | return qs->head == qs->tail; | ||
33 | } | ||
34 | |||
35 | static bool queue_stack_map_is_full(struct bpf_queue_stack *qs) | ||
36 | { | ||
37 | u32 head = qs->head + 1; | ||
38 | |||
39 | if (unlikely(head >= qs->size)) | ||
40 | head = 0; | ||
41 | |||
42 | return head == qs->tail; | ||
43 | } | ||
44 | |||
45 | /* Called from syscall */ | ||
46 | static int queue_stack_map_alloc_check(union bpf_attr *attr) | ||
47 | { | ||
48 | /* check sanity of attributes */ | ||
49 | if (attr->max_entries == 0 || attr->key_size != 0 || | ||
50 | attr->map_flags & ~QUEUE_STACK_CREATE_FLAG_MASK) | ||
51 | return -EINVAL; | ||
52 | |||
53 | if (attr->value_size > KMALLOC_MAX_SIZE) | ||
54 | /* if value_size is bigger, the user space won't be able to | ||
55 | * access the elements. | ||
56 | */ | ||
57 | return -E2BIG; | ||
58 | |||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr) | ||
63 | { | ||
64 | int ret, numa_node = bpf_map_attr_numa_node(attr); | ||
65 | struct bpf_queue_stack *qs; | ||
66 | u32 size, value_size; | ||
67 | u64 queue_size, cost; | ||
68 | |||
69 | size = attr->max_entries + 1; | ||
70 | value_size = attr->value_size; | ||
71 | |||
72 | queue_size = sizeof(*qs) + (u64) value_size * size; | ||
73 | |||
74 | cost = queue_size; | ||
75 | if (cost >= U32_MAX - PAGE_SIZE) | ||
76 | return ERR_PTR(-E2BIG); | ||
77 | |||
78 | cost = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | ||
79 | |||
80 | ret = bpf_map_precharge_memlock(cost); | ||
81 | if (ret < 0) | ||
82 | return ERR_PTR(ret); | ||
83 | |||
84 | qs = bpf_map_area_alloc(queue_size, numa_node); | ||
85 | if (!qs) | ||
86 | return ERR_PTR(-ENOMEM); | ||
87 | |||
88 | memset(qs, 0, sizeof(*qs)); | ||
89 | |||
90 | bpf_map_init_from_attr(&qs->map, attr); | ||
91 | |||
92 | qs->map.pages = cost; | ||
93 | qs->size = size; | ||
94 | |||
95 | raw_spin_lock_init(&qs->lock); | ||
96 | |||
97 | return &qs->map; | ||
98 | } | ||
99 | |||
100 | /* Called when map->refcnt goes to zero, either from workqueue or from syscall */ | ||
101 | static void queue_stack_map_free(struct bpf_map *map) | ||
102 | { | ||
103 | struct bpf_queue_stack *qs = bpf_queue_stack(map); | ||
104 | |||
105 | /* at this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0, | ||
106 | * so the programs (can be more than one that used this map) were | ||
107 | * disconnected from events. Wait for outstanding critical sections in | ||
108 | * these programs to complete | ||
109 | */ | ||
110 | synchronize_rcu(); | ||
111 | |||
112 | bpf_map_area_free(qs); | ||
113 | } | ||
114 | |||
115 | static int __queue_map_get(struct bpf_map *map, void *value, bool delete) | ||
116 | { | ||
117 | struct bpf_queue_stack *qs = bpf_queue_stack(map); | ||
118 | unsigned long flags; | ||
119 | int err = 0; | ||
120 | void *ptr; | ||
121 | |||
122 | raw_spin_lock_irqsave(&qs->lock, flags); | ||
123 | |||
124 | if (queue_stack_map_is_empty(qs)) { | ||
125 | err = -ENOENT; | ||
126 | goto out; | ||
127 | } | ||
128 | |||
129 | ptr = &qs->elements[qs->tail * qs->map.value_size]; | ||
130 | memcpy(value, ptr, qs->map.value_size); | ||
131 | |||
132 | if (delete) { | ||
133 | if (unlikely(++qs->tail >= qs->size)) | ||
134 | qs->tail = 0; | ||
135 | } | ||
136 | |||
137 | out: | ||
138 | raw_spin_unlock_irqrestore(&qs->lock, flags); | ||
139 | return err; | ||
140 | } | ||
141 | |||
142 | |||
143 | static int __stack_map_get(struct bpf_map *map, void *value, bool delete) | ||
144 | { | ||
145 | struct bpf_queue_stack *qs = bpf_queue_stack(map); | ||
146 | unsigned long flags; | ||
147 | int err = 0; | ||
148 | void *ptr; | ||
149 | u32 index; | ||
150 | |||
151 | raw_spin_lock_irqsave(&qs->lock, flags); | ||
152 | |||
153 | if (queue_stack_map_is_empty(qs)) { | ||
154 | err = -ENOENT; | ||
155 | goto out; | ||
156 | } | ||
157 | |||
158 | index = qs->head - 1; | ||
159 | if (unlikely(index >= qs->size)) | ||
160 | index = qs->size - 1; | ||
161 | |||
162 | ptr = &qs->elements[index * qs->map.value_size]; | ||
163 | memcpy(value, ptr, qs->map.value_size); | ||
164 | |||
165 | if (delete) | ||
166 | qs->head = index; | ||
167 | |||
168 | out: | ||
169 | raw_spin_unlock_irqrestore(&qs->lock, flags); | ||
170 | return err; | ||
171 | } | ||
172 | |||
173 | /* Called from syscall or from eBPF program */ | ||
174 | static int queue_map_peek_elem(struct bpf_map *map, void *value) | ||
175 | { | ||
176 | return __queue_map_get(map, value, false); | ||
177 | } | ||
178 | |||
179 | /* Called from syscall or from eBPF program */ | ||
180 | static int stack_map_peek_elem(struct bpf_map *map, void *value) | ||
181 | { | ||
182 | return __stack_map_get(map, value, false); | ||
183 | } | ||
184 | |||
185 | /* Called from syscall or from eBPF program */ | ||
186 | static int queue_map_pop_elem(struct bpf_map *map, void *value) | ||
187 | { | ||
188 | return __queue_map_get(map, value, true); | ||
189 | } | ||
190 | |||
191 | /* Called from syscall or from eBPF program */ | ||
192 | static int stack_map_pop_elem(struct bpf_map *map, void *value) | ||
193 | { | ||
194 | return __stack_map_get(map, value, true); | ||
195 | } | ||
196 | |||
197 | /* Called from syscall or from eBPF program */ | ||
198 | static int queue_stack_map_push_elem(struct bpf_map *map, void *value, | ||
199 | u64 flags) | ||
200 | { | ||
201 | struct bpf_queue_stack *qs = bpf_queue_stack(map); | ||
202 | unsigned long irq_flags; | ||
203 | int err = 0; | ||
204 | void *dst; | ||
205 | |||
206 | /* BPF_EXIST is used to force making room for a new element in case the | ||
207 | * map is full | ||
208 | */ | ||
209 | bool replace = (flags & BPF_EXIST); | ||
210 | |||
211 | /* Check supported flags for queue and stack maps */ | ||
212 | if (flags & BPF_NOEXIST || flags > BPF_EXIST) | ||
213 | return -EINVAL; | ||
214 | |||
215 | raw_spin_lock_irqsave(&qs->lock, irq_flags); | ||
216 | |||
217 | if (queue_stack_map_is_full(qs)) { | ||
218 | if (!replace) { | ||
219 | err = -E2BIG; | ||
220 | goto out; | ||
221 | } | ||
222 | /* advance tail pointer to overwrite oldest element */ | ||
223 | if (unlikely(++qs->tail >= qs->size)) | ||
224 | qs->tail = 0; | ||
225 | } | ||
226 | |||
227 | dst = &qs->elements[qs->head * qs->map.value_size]; | ||
228 | memcpy(dst, value, qs->map.value_size); | ||
229 | |||
230 | if (unlikely(++qs->head >= qs->size)) | ||
231 | qs->head = 0; | ||
232 | |||
233 | out: | ||
234 | raw_spin_unlock_irqrestore(&qs->lock, irq_flags); | ||
235 | return err; | ||
236 | } | ||
237 | |||
238 | /* Called from syscall or from eBPF program */ | ||
239 | static void *queue_stack_map_lookup_elem(struct bpf_map *map, void *key) | ||
240 | { | ||
241 | return NULL; | ||
242 | } | ||
243 | |||
244 | /* Called from syscall or from eBPF program */ | ||
245 | static int queue_stack_map_update_elem(struct bpf_map *map, void *key, | ||
246 | void *value, u64 flags) | ||
247 | { | ||
248 | return -EINVAL; | ||
249 | } | ||
250 | |||
251 | /* Called from syscall or from eBPF program */ | ||
252 | static int queue_stack_map_delete_elem(struct bpf_map *map, void *key) | ||
253 | { | ||
254 | return -EINVAL; | ||
255 | } | ||
256 | |||
257 | /* Called from syscall */ | ||
258 | static int queue_stack_map_get_next_key(struct bpf_map *map, void *key, | ||
259 | void *next_key) | ||
260 | { | ||
261 | return -EINVAL; | ||
262 | } | ||
263 | |||
264 | const struct bpf_map_ops queue_map_ops = { | ||
265 | .map_alloc_check = queue_stack_map_alloc_check, | ||
266 | .map_alloc = queue_stack_map_alloc, | ||
267 | .map_free = queue_stack_map_free, | ||
268 | .map_lookup_elem = queue_stack_map_lookup_elem, | ||
269 | .map_update_elem = queue_stack_map_update_elem, | ||
270 | .map_delete_elem = queue_stack_map_delete_elem, | ||
271 | .map_push_elem = queue_stack_map_push_elem, | ||
272 | .map_pop_elem = queue_map_pop_elem, | ||
273 | .map_peek_elem = queue_map_peek_elem, | ||
274 | .map_get_next_key = queue_stack_map_get_next_key, | ||
275 | }; | ||
276 | |||
277 | const struct bpf_map_ops stack_map_ops = { | ||
278 | .map_alloc_check = queue_stack_map_alloc_check, | ||
279 | .map_alloc = queue_stack_map_alloc, | ||
280 | .map_free = queue_stack_map_free, | ||
281 | .map_lookup_elem = queue_stack_map_lookup_elem, | ||
282 | .map_update_elem = queue_stack_map_update_elem, | ||
283 | .map_delete_elem = queue_stack_map_delete_elem, | ||
284 | .map_push_elem = queue_stack_map_push_elem, | ||
285 | .map_pop_elem = stack_map_pop_elem, | ||
286 | .map_peek_elem = stack_map_peek_elem, | ||
287 | .map_get_next_key = queue_stack_map_get_next_key, | ||
288 | }; | ||