aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf/hashtab.c
diff options
context:
space:
mode:
authorMartin KaFai Lau <kafai@fb.com>2017-08-18 14:28:00 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-20 00:35:43 -0400
commit96eabe7a40aa17e613cf3db2c742ee8b1fc764d0 (patch)
tree6364b3b63eeb2707e74c3bd770b01342016d936a /kernel/bpf/hashtab.c
parentbd76b87962833f6e55264030a227be0f090b1286 (diff)
bpf: Allow selecting numa node during map creation
The current map creation API does not allow to provide the numa-node preference. The memory usually comes from where the map-creation-process is running. The performance is not ideal if the bpf_prog is known to always run in a numa node different from the map-creation-process. One of the use case is sharding on CPU to different LRU maps (i.e. an array of LRU maps). Here is the test result of map_perf_test on the INNER_LRU_HASH_PREALLOC test if we force the lru map used by CPU0 to be allocated from a remote numa node: [ The machine has 20 cores. CPU0-9 at node 0. CPU10-19 at node 1 ] ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1628380 events per sec 4:inner_lru_hash_map_perf pre-alloc 1626396 events per sec 3:inner_lru_hash_map_perf pre-alloc 1626144 events per sec 6:inner_lru_hash_map_perf pre-alloc 1621657 events per sec 2:inner_lru_hash_map_perf pre-alloc 1621534 events per sec 1:inner_lru_hash_map_perf pre-alloc 1620292 events per sec 7:inner_lru_hash_map_perf pre-alloc 1613305 events per sec 0:inner_lru_hash_map_perf pre-alloc 1239150 events per sec #<<< After specifying numa node: ># taskset -c 10 ./map_perf_test 512 8 1260000 8000000 5:inner_lru_hash_map_perf pre-alloc 1629627 events per sec 3:inner_lru_hash_map_perf pre-alloc 1628057 events per sec 1:inner_lru_hash_map_perf pre-alloc 1623054 events per sec 6:inner_lru_hash_map_perf pre-alloc 1616033 events per sec 2:inner_lru_hash_map_perf pre-alloc 1614630 events per sec 4:inner_lru_hash_map_perf pre-alloc 1612651 events per sec 7:inner_lru_hash_map_perf pre-alloc 1609337 events per sec 0:inner_lru_hash_map_perf pre-alloc 1619340 events per sec #<<< This patch adds one field, numa_node, to the bpf_attr. Since numa node 0 is a valid node, a new flag BPF_F_NUMA_NODE is also added. The numa_node field is honored if and only if the BPF_F_NUMA_NODE flag is set. Numa node selection is not supported for percpu map. This patch does not change all the kmalloc. F.e. 'htab = kzalloc()' is not changed since the object is small enough to stay in the cache. Signed-off-by: Martin KaFai Lau <kafai@fb.com> Acked-by: Daniel Borkmann <daniel@iogearbox.net> Acked-by: Alexei Starovoitov <ast@fb.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel/bpf/hashtab.c')
-rw-r--r--kernel/bpf/hashtab.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 4fb463172aa8..47ae748c3a49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -18,6 +18,9 @@
18#include "bpf_lru_list.h" 18#include "bpf_lru_list.h"
19#include "map_in_map.h" 19#include "map_in_map.h"
20 20
21#define HTAB_CREATE_FLAG_MASK \
22 (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE)
23
21struct bucket { 24struct bucket {
22 struct hlist_nulls_head head; 25 struct hlist_nulls_head head;
23 raw_spinlock_t lock; 26 raw_spinlock_t lock;
@@ -138,7 +141,8 @@ static int prealloc_init(struct bpf_htab *htab)
138 if (!htab_is_percpu(htab) && !htab_is_lru(htab)) 141 if (!htab_is_percpu(htab) && !htab_is_lru(htab))
139 num_entries += num_possible_cpus(); 142 num_entries += num_possible_cpus();
140 143
141 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries); 144 htab->elems = bpf_map_area_alloc(htab->elem_size * num_entries,
145 htab->map.numa_node);
142 if (!htab->elems) 146 if (!htab->elems)
143 return -ENOMEM; 147 return -ENOMEM;
144 148
@@ -233,6 +237,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
233 */ 237 */
234 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 238 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
235 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 239 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
240 int numa_node = bpf_map_attr_numa_node(attr);
236 struct bpf_htab *htab; 241 struct bpf_htab *htab;
237 int err, i; 242 int err, i;
238 u64 cost; 243 u64 cost;
@@ -248,7 +253,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
248 */ 253 */
249 return ERR_PTR(-EPERM); 254 return ERR_PTR(-EPERM);
250 255
251 if (attr->map_flags & ~(BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU)) 256 if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK)
252 /* reserved bits should not be used */ 257 /* reserved bits should not be used */
253 return ERR_PTR(-EINVAL); 258 return ERR_PTR(-EINVAL);
254 259
@@ -258,6 +263,9 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
258 if (lru && !prealloc) 263 if (lru && !prealloc)
259 return ERR_PTR(-ENOTSUPP); 264 return ERR_PTR(-ENOTSUPP);
260 265
266 if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
267 return ERR_PTR(-EINVAL);
268
261 htab = kzalloc(sizeof(*htab), GFP_USER); 269 htab = kzalloc(sizeof(*htab), GFP_USER);
262 if (!htab) 270 if (!htab)
263 return ERR_PTR(-ENOMEM); 271 return ERR_PTR(-ENOMEM);
@@ -268,6 +276,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
268 htab->map.value_size = attr->value_size; 276 htab->map.value_size = attr->value_size;
269 htab->map.max_entries = attr->max_entries; 277 htab->map.max_entries = attr->max_entries;
270 htab->map.map_flags = attr->map_flags; 278 htab->map.map_flags = attr->map_flags;
279 htab->map.numa_node = numa_node;
271 280
272 /* check sanity of attributes. 281 /* check sanity of attributes.
273 * value_size == 0 may be allowed in the future to use map as a set 282 * value_size == 0 may be allowed in the future to use map as a set
@@ -346,7 +355,8 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
346 355
347 err = -ENOMEM; 356 err = -ENOMEM;
348 htab->buckets = bpf_map_area_alloc(htab->n_buckets * 357 htab->buckets = bpf_map_area_alloc(htab->n_buckets *
349 sizeof(struct bucket)); 358 sizeof(struct bucket),
359 htab->map.numa_node);
350 if (!htab->buckets) 360 if (!htab->buckets)
351 goto free_htab; 361 goto free_htab;
352 362
@@ -689,7 +699,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
689 atomic_dec(&htab->count); 699 atomic_dec(&htab->count);
690 return ERR_PTR(-E2BIG); 700 return ERR_PTR(-E2BIG);
691 } 701 }
692 l_new = kmalloc(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN); 702 l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
703 htab->map.numa_node);
693 if (!l_new) 704 if (!l_new)
694 return ERR_PTR(-ENOMEM); 705 return ERR_PTR(-ENOMEM);
695 } 706 }