diff options
author | Martin KaFai Lau <kafai@fb.com> | 2017-03-22 13:00:34 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-22 18:45:45 -0400 |
commit | bcc6b1b7ebf857a9fe56202e2be3361131588c15 (patch) | |
tree | bc23c28ffe4866226ca8d768f67e74ddd9721076 /kernel/bpf/syscall.c | |
parent | 56f668dfe00dcf086734f1c42ea999398fad6572 (diff) |
bpf: Add hash of maps support
This patch adds hash of maps support (hashmap->bpf_map).
BPF_MAP_TYPE_HASH_OF_MAPS is added.
A map-in-map contains a pointer to another map and lets call
this pointer 'inner_map_ptr'.
Notes on deleting inner_map_ptr from a hash map:
1. For BPF_F_NO_PREALLOC map-in-map, when deleting
an inner_map_ptr, the htab_elem itself will go through
a rcu grace period and the inner_map_ptr resides
in the htab_elem.
2. For pre-allocated htab_elem (!BPF_F_NO_PREALLOC),
when deleting an inner_map_ptr, the htab_elem may
get reused immediately. This situation is similar
to the existing prealloc-ated use cases.
However, the bpf_map_fd_put_ptr() calls bpf_map_put() which calls
inner_map->ops->map_free(inner_map) which will go
through a rcu grace period (i.e. all bpf_map's map_free
currently goes through a rcu grace period). Hence,
the inner_map_ptr is still safe for the rcu reader side.
This patch also includes BPF_MAP_TYPE_HASH_OF_MAPS to the
check_map_prealloc() in the verifier. preallocation is a
must for BPF_PROG_TYPE_PERF_EVENT. Hence, even we don't expect
heavy updates to map-in-map, enforcing BPF_F_NO_PREALLOC for map-in-map
is impossible without disallowing BPF_PROG_TYPE_PERF_EVENT from using
map-in-map first.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Alexei Starovoitov <ast@kernel.org>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r-- | kernel/bpf/syscall.c | 8 |
1 files changed, 7 insertions, 1 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index 6e24fdf1f373..c35ebfe6d84d 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -352,7 +352,8 @@ static int map_lookup_elem(union bpf_attr *attr) | |||
352 | err = bpf_percpu_array_copy(map, key, value); | 352 | err = bpf_percpu_array_copy(map, key, value); |
353 | } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { | 353 | } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) { |
354 | err = bpf_stackmap_copy(map, key, value); | 354 | err = bpf_stackmap_copy(map, key, value); |
355 | } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) { | 355 | } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS || |
356 | map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { | ||
356 | err = -ENOTSUPP; | 357 | err = -ENOTSUPP; |
357 | } else { | 358 | } else { |
358 | rcu_read_lock(); | 359 | rcu_read_lock(); |
@@ -446,6 +447,11 @@ static int map_update_elem(union bpf_attr *attr) | |||
446 | err = bpf_fd_array_map_update_elem(map, f.file, key, value, | 447 | err = bpf_fd_array_map_update_elem(map, f.file, key, value, |
447 | attr->flags); | 448 | attr->flags); |
448 | rcu_read_unlock(); | 449 | rcu_read_unlock(); |
450 | } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) { | ||
451 | rcu_read_lock(); | ||
452 | err = bpf_fd_htab_map_update_elem(map, f.file, key, value, | ||
453 | attr->flags); | ||
454 | rcu_read_unlock(); | ||
449 | } else { | 455 | } else { |
450 | rcu_read_lock(); | 456 | rcu_read_lock(); |
451 | err = map->ops->map_update_elem(map, key, value, attr->flags); | 457 | err = map->ops->map_update_elem(map, key, value, attr->flags); |