diff options
author | Martin KaFai Lau <kafai@fb.com> | 2017-08-18 14:28:00 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-08-20 00:35:43 -0400 |
commit | 96eabe7a40aa17e613cf3db2c742ee8b1fc764d0 (patch) | |
tree | 6364b3b63eeb2707e74c3bd770b01342016d936a /kernel/bpf/stackmap.c | |
parent | bd76b87962833f6e55264030a227be0f090b1286 (diff) |
bpf: Allow selecting numa node during map creation
The current map creation API does not allow to provide the numa-node
preference. The memory usually comes from where the map-creation-process
is running. The performance is not ideal if the bpf_prog is known to
always run in a numa node different from the map-creation-process.
One of the use case is sharding on CPU to different LRU maps (i.e.
an array of LRU maps). Here is the test result of map_perf_test on
the INNER_LRU_HASH_PREALLOC test if we force the lru map used by
CPU0 to be allocated from a remote numa node:
[ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ]
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec
4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec
3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec
6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec
2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec
1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec
7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec
0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<<
After specifying numa node:
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec
3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec
1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec
6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec
2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec
4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec
7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec
0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<<
This patch adds one field, numa_node, to the bpf_attr. Since numa node 0
is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node
field is honored if and only if the BPF_F_NUMA_NODE flag is set.
Numa node selection is not supported for percpu map.
This patch does not change all the kmalloc. F.e.
'htab = kzalloc()' is not changed since the object
is small enough to stay in the cache.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/stackmap.c')
-rw-r--r-- | kernel/bpf/stackmap.c | 8 |
1 files changed, 5 insertions, 3 deletions
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c index 31147d730abf..135be433e9a0 100644 --- a/kernel/bpf/stackmap.c +++ b/kernel/bpf/stackmap.c | |||
@@ -31,7 +31,8 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap) | |||
31 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; | 31 | u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size; |
32 | int err; | 32 | int err; |
33 | 33 | ||
34 | smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries); | 34 | smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries, |
35 | smap->map.numa_node); | ||
35 | if (!smap->elems) | 36 | if (!smap->elems) |
36 | return -ENOMEM; | 37 | return -ENOMEM; |
37 | 38 | ||
@@ -59,7 +60,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
59 | if (!capable(CAP_SYS_ADMIN)) | 60 | if (!capable(CAP_SYS_ADMIN)) |
60 | return ERR_PTR(-EPERM); | 61 | return ERR_PTR(-EPERM); |
61 | 62 | ||
62 | if (attr->map_flags) | 63 | if (attr->map_flags & ~BPF_F_NUMA_NODE) |
63 | return ERR_PTR(-EINVAL); | 64 | return ERR_PTR(-EINVAL); |
64 | 65 | ||
65 | /* check sanity of attributes */ | 66 | /* check sanity of attributes */ |
@@ -75,7 +76,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
75 | if (cost >= U32_MAX - PAGE_SIZE) | 76 | if (cost >= U32_MAX - PAGE_SIZE) |
76 | return ERR_PTR(-E2BIG); | 77 | return ERR_PTR(-E2BIG); |
77 | 78 | ||
78 | smap = bpf_map_area_alloc(cost); | 79 | smap = bpf_map_area_alloc(cost, bpf_map_attr_numa_node(attr)); |
79 | if (!smap) | 80 | if (!smap) |
80 | return ERR_PTR(-ENOMEM); | 81 | return ERR_PTR(-ENOMEM); |
81 | 82 | ||
@@ -91,6 +92,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr) | |||
91 | smap->map.map_flags = attr->map_flags; | 92 | smap->map.map_flags = attr->map_flags; |
92 | smap->n_buckets = n_buckets; | 93 | smap->n_buckets = n_buckets; |
93 | smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; | 94 | smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; |
95 | smap->map.numa_node = bpf_map_attr_numa_node(attr); | ||
94 | 96 | ||
95 | err = bpf_map_precharge_memlock(smap->map.pages); | 97 | err = bpf_map_precharge_memlock(smap->map.pages); |
96 | if (err) | 98 | if (err) |