aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/arraymap.c
diff options
context:
space:
mode:
authorAlexei Starovoitov <ast@kernel.org>2019-01-31 18:40:09 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2019-02-01 14:55:39 -0500
commit96049f3afd50fe8db69fa0068cdca822e747b1e4 (patch)
treeb082ac077ea0bb78a073a25e540be72034ce0451 /kernel/bpf/arraymap.c
parentab963beb9f5db303b4fd7e34e422b96270e5b972 (diff)
bpf: introduce BPF_F_LOCK flag
Introduce BPF_F_LOCK flag for map_lookup and map_update syscall commands and for map_update() helper function. In all these cases take a lock of existing element (which was provided in BTF description) before copying (in or out) the rest of map value. Implementation details that are part of uapi: Array: The array map takes the element lock for lookup/update. Hash: hash map also takes the lock for lookup/update and tries to avoid the bucket lock. If old element exists it takes the element lock and updates the element in place. If element doesn't exist it allocates new one and inserts into hash table while holding the bucket lock. In rare case the hashmap has to take both the bucket lock and the element lock to update old value in place. Cgroup local storage: It is similar to array. update in place and lookup are done with lock taken. Signed-off-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf/arraymap.c')
-rw-r--r--kernel/bpf/arraymap.c24
1 files changed, 16 insertions, 8 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index d6d979910a2a..c72e0d8e1e65 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -253,8 +253,9 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
253{ 253{
254 struct bpf_array *array = container_of(map, struct bpf_array, map); 254 struct bpf_array *array = container_of(map, struct bpf_array, map);
255 u32 index = *(u32 *)key; 255 u32 index = *(u32 *)key;
256 char *val;
256 257
257 if (unlikely(map_flags > BPF_EXIST)) 258 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
258 /* unknown flags */ 259 /* unknown flags */
259 return -EINVAL; 260 return -EINVAL;
260 261
@@ -262,18 +263,25 @@ static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
262 /* all elements were pre-allocated, cannot insert a new one */ 263 /* all elements were pre-allocated, cannot insert a new one */
263 return -E2BIG; 264 return -E2BIG;
264 265
265 if (unlikely(map_flags == BPF_NOEXIST)) 266 if (unlikely(map_flags & BPF_NOEXIST))
266 /* all elements already exist */ 267 /* all elements already exist */
267 return -EEXIST; 268 return -EEXIST;
268 269
269 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) 270 if (unlikely((map_flags & BPF_F_LOCK) &&
271 !map_value_has_spin_lock(map)))
272 return -EINVAL;
273
274 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
270 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]), 275 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
271 value, map->value_size); 276 value, map->value_size);
272 else 277 } else {
273 copy_map_value(map, 278 val = array->value +
274 array->value + 279 array->elem_size * (index & array->index_mask);
275 array->elem_size * (index & array->index_mask), 280 if (map_flags & BPF_F_LOCK)
276 value); 281 copy_map_value_locked(map, val, value, false);
282 else
283 copy_map_value(map, val, value);
284 }
277 return 0; 285 return 0;
278} 286}
279 287