diff options
author | Martin KaFai Lau <kafai@fb.com> | 2017-08-18 14:28:00 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-08-20 00:35:43 -0400 |
commit | 96eabe7a40aa17e613cf3db2c742ee8b1fc764d0 (patch) | |
tree | 6364b3b63eeb2707e74c3bd770b01342016d936a /kernel/bpf/lpm_trie.c | |
parent | bd76b87962833f6e55264030a227be0f090b1286 (diff) |
bpf: Allow selecting numa node during map creation
The current map creation API does not allow to provide the numa-node
preference. The memory usually comes from where the map-creation-process
is running. The performance is not ideal if the bpf_prog is known to
always run in a numa node different from the map-creation-process.
One of the use case is sharding on CPU to different LRU maps (i.e.
an array of LRU maps). Here is the test result of map_perf_test on
the INNER_LRU_HASH_PREALLOC test if we force the lru map used by
CPU0 to be allocated from a remote numa node:
[ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ]
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec
4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec
3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec
6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec
2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec
1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec
7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec
0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<<
After specifying numa node:
># taskset -c 10 ./map_perf_test 512 8 1260000 8000000
5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec
3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec
1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec
6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec
2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec
4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec
7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec
0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<<
This patch adds one field, numa_node, to the bpf_attr. Since numa node 0
is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node
field is honored if and only if the BPF_F_NUMA_NODE flag is set.
Numa node selection is not supported for percpu map.
This patch does not change all the kmalloc. F.e.
'htab = kzalloc()' is not changed since the object
is small enough to stay in the cache.
Signed-off-by: Martin KaFai Lau <kafai@fb.com>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Alexei Starovoitov <ast@fb.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/lpm_trie.c')
-rw-r--r-- | kernel/bpf/lpm_trie.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c index b09185f0f17d..1b767844a76f 100644 --- a/kernel/bpf/lpm_trie.c +++ b/kernel/bpf/lpm_trie.c | |||
@@ -244,7 +244,8 @@ static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie, | |||
244 | if (value) | 244 | if (value) |
245 | size += trie->map.value_size; | 245 | size += trie->map.value_size; |
246 | 246 | ||
247 | node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN); | 247 | node = kmalloc_node(size, GFP_ATOMIC | __GFP_NOWARN, |
248 | trie->map.numa_node); | ||
248 | if (!node) | 249 | if (!node) |
249 | return NULL; | 250 | return NULL; |
250 | 251 | ||
@@ -405,6 +406,8 @@ static int trie_delete_elem(struct bpf_map *map, void *key) | |||
405 | #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) | 406 | #define LPM_KEY_SIZE_MAX LPM_KEY_SIZE(LPM_DATA_SIZE_MAX) |
406 | #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) | 407 | #define LPM_KEY_SIZE_MIN LPM_KEY_SIZE(LPM_DATA_SIZE_MIN) |
407 | 408 | ||
409 | #define LPM_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_NUMA_NODE) | ||
410 | |||
408 | static struct bpf_map *trie_alloc(union bpf_attr *attr) | 411 | static struct bpf_map *trie_alloc(union bpf_attr *attr) |
409 | { | 412 | { |
410 | struct lpm_trie *trie; | 413 | struct lpm_trie *trie; |
@@ -416,7 +419,8 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) | |||
416 | 419 | ||
417 | /* check sanity of attributes */ | 420 | /* check sanity of attributes */ |
418 | if (attr->max_entries == 0 || | 421 | if (attr->max_entries == 0 || |
419 | attr->map_flags != BPF_F_NO_PREALLOC || | 422 | !(attr->map_flags & BPF_F_NO_PREALLOC) || |
423 | attr->map_flags & ~LPM_CREATE_FLAG_MASK || | ||
420 | attr->key_size < LPM_KEY_SIZE_MIN || | 424 | attr->key_size < LPM_KEY_SIZE_MIN || |
421 | attr->key_size > LPM_KEY_SIZE_MAX || | 425 | attr->key_size > LPM_KEY_SIZE_MAX || |
422 | attr->value_size < LPM_VAL_SIZE_MIN || | 426 | attr->value_size < LPM_VAL_SIZE_MIN || |
@@ -433,6 +437,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr) | |||
433 | trie->map.value_size = attr->value_size; | 437 | trie->map.value_size = attr->value_size; |
434 | trie->map.max_entries = attr->max_entries; | 438 | trie->map.max_entries = attr->max_entries; |
435 | trie->map.map_flags = attr->map_flags; | 439 | trie->map.map_flags = attr->map_flags; |
440 | trie->map.numa_node = bpf_map_attr_numa_node(attr); | ||
436 | trie->data_size = attr->key_size - | 441 | trie->data_size = attr->key_size - |
437 | offsetof(struct bpf_lpm_trie_key, data); | 442 | offsetof(struct bpf_lpm_trie_key, data); |
438 | trie->max_prefixlen = trie->data_size * 8; | 443 | trie->max_prefixlen = trie->data_size * 8; |