diff options
author | Martin KaFai Lau <kafai@fb.com> | 2017-08-18 14:28:00 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-08-20 00:35:43 -0400 |
commit | 96eabe7a40aa17e613cf3db2c742ee8b1fc764d0 (patch) | |
tree | 6364b3b63eeb2707e74c3bd770b01342016d936a /kernel/bpf/syscall.c | |
parent | bd76b87962833f6e55264030a227be0f090b1286 (diff) |
bpf: Allow selecting numa node during map creation
The current map creation API does not allow to provide the numa-node
preference. The memory usually comes from where the map-creation-process
is running. The performance is not ideal if the bpf_prog is known to
always run in a numa node different from the map-creation-process.
One of the use case is sharding on CPU to different LRU maps (i.e.
an array of LRU maps). Here is the test result of map_perf_test on
the INNER_LRU_HASH_PREALLOC test if we force the lru map used by
CPU0 to be allocated from a remote numa node:
[ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ]
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec
4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec
3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec
6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec
2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec
1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec
7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec
0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<<
After specifying numa node:
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec
3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec
1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec
6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec
2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec
4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec
7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec
0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<<
This patch adds one field, numa_node, to the bpf_attr. Since numa node 0
is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node
field is honored if and only if the BPF_F_NUMA_NODE flag is set.
Numa node selection is not supported for percpu map.
This patch does not change all the kmalloc. F.e.
'htab = kzalloc()' is not changed since the object
is small enough to stay in the cache.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r-- | kernel/bpf/syscall.c | 14 |
1 files changed, 10 insertions, 4 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index b8cb1b3c9bfb..9378f3ba2cbf 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c | |||
@@ -105,7 +105,7 @@ static struct bpf_map *find_and_alloc_map(union bpf_attr *attr) | |||
105 | return map; | 105 | return map; |
106 | } | 106 | } |
107 | 107 | ||
108 | void *bpf_map_area_alloc(size_t size) | 108 | void *bpf_map_area_alloc(size_t size, int numa_node) |
109 | { | 109 | { |
110 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't | 110 | /* We definitely need __GFP_NORETRY, so OOM killer doesn't |
111 | * trigger under memory pressure as we really just want to | 111 | * trigger under memory pressure as we really just want to |
@@ -115,12 +115,13 @@ void *bpf_map_area_alloc(size_t size) | |||
115 | void *area; | 115 | void *area; |
116 | 116 | ||
117 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { | 117 | if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) { |
118 | area = kmalloc(size, GFP_USER | flags); | 118 | area = kmalloc_node(size, GFP_USER | flags, numa_node); |
119 | if (area != NULL) | 119 | if (area != NULL) |
120 | return area; | 120 | return area; |
121 | } | 121 | } |
122 | 122 | ||
123 | return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL); | 123 | return __vmalloc_node_flags_caller(size, numa_node, GFP_KERNEL | flags, |
124 | __builtin_return_address(0)); | ||
124 | } | 125 | } |
125 | 126 | ||
126 | void bpf_map_area_free(void *area) | 127 | void bpf_map_area_free(void *area) |
@@ -309,10 +310,11 @@ int bpf_map_new_fd(struct bpf_map *map) | |||
309 | offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ | 310 | offsetof(union bpf_attr, CMD##_LAST_FIELD) - \ |
310 | sizeof(attr->CMD##_LAST_FIELD)) != NULL | 311 | sizeof(attr->CMD##_LAST_FIELD)) != NULL |
311 | 312 | ||
312 | #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd | 313 | #define BPF_MAP_CREATE_LAST_FIELD numa_node |
313 | /* called via syscall */ | 314 | /* called via syscall */ |
314 | static int map_create(union bpf_attr *attr) | 315 | static int map_create(union bpf_attr *attr) |
315 | { | 316 | { |
317 | int numa_node = bpf_map_attr_numa_node(attr); | ||
316 | struct bpf_map *map; | 318 | struct bpf_map *map; |
317 | int err; | 319 | int err; |
318 | 320 | ||
@@ -320,6 +322,10 @@ static int map_create(union bpf_attr *attr) | |||
320 | if (err) | 322 | if (err) |
321 | return -EINVAL; | 323 | return -EINVAL; |
322 | 324 | ||
325 | if (numa_node != NUMA_NO_NODE && | ||
326 | (numa_node >= nr_node_ids || !node_online(numa_node))) | ||
327 | return -EINVAL; | ||
328 | |||
323 | /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ | 329 | /* find map type and init map: hashtable vs rbtree vs bloom vs ... */ |
324 | map = find_and_alloc_map(attr); | 330 | map = find_and_alloc_map(attr); |
325 | if (IS_ERR(map)) | 331 | if (IS_ERR(map)) |