aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorJakub Kicinski <jakub.kicinski@netronome.com>2018-01-11 23:29:06 -0500
committerDaniel Borkmann <daniel@iogearbox.net>2018-01-14 17:36:29 -0500
commitbd475643d74e8ed78bfd36d941053b0e45974e8e (patch)
tree30fdfb7335e61f5d34c864f2abce9bf74f0f89ad /kernel/bpf
parent9328e0d1bc09e96bd7dc85374f5c2a1e0e04e539 (diff)
bpf: add helper for copying attrs to struct bpf_map
All map types reimplement the field-by-field copy of union bpf_attr members into struct bpf_map. Add a helper to perform this operation. Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com> Reviewed-by: Quentin Monnet <quentin.monnet@netronome.com> Acked-by: Alexei Starovoitov <ast@kernel.org> Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/cpumap.c8
-rw-r--r--kernel/bpf/devmap.c8
-rw-r--r--kernel/bpf/hashtab.c9
-rw-r--r--kernel/bpf/lpm_trie.c7
-rw-r--r--kernel/bpf/sockmap.c8
-rw-r--r--kernel/bpf/stackmap.c6
-rw-r--r--kernel/bpf/syscall.c10
7 files changed, 16 insertions, 40 deletions
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index ce5b669003b2..192151ec9d12 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -94,13 +94,7 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
94 if (!cmap) 94 if (!cmap)
95 return ERR_PTR(-ENOMEM); 95 return ERR_PTR(-ENOMEM);
96 96
97 /* mandatory map attributes */ 97 bpf_map_init_from_attr(&cmap->map, attr);
98 cmap->map.map_type = attr->map_type;
99 cmap->map.key_size = attr->key_size;
100 cmap->map.value_size = attr->value_size;
101 cmap->map.max_entries = attr->max_entries;
102 cmap->map.map_flags = attr->map_flags;
103 cmap->map.numa_node = bpf_map_attr_numa_node(attr);
104 98
105 /* Pre-limit array size based on NR_CPUS, not final CPU check */ 99 /* Pre-limit array size based on NR_CPUS, not final CPU check */
106 if (cmap->map.max_entries > NR_CPUS) { 100 if (cmap->map.max_entries > NR_CPUS) {
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index ebdef54bf7df..565f9ece9115 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -93,13 +93,7 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
93 if (!dtab) 93 if (!dtab)
94 return ERR_PTR(-ENOMEM); 94 return ERR_PTR(-ENOMEM);
95 95
96 /* mandatory map attributes */ 96 bpf_map_init_from_attr(&dtab->map, attr);
97 dtab->map.map_type = attr->map_type;
98 dtab->map.key_size = attr->key_size;
99 dtab->map.value_size = attr->value_size;
100 dtab->map.max_entries = attr->max_entries;
101 dtab->map.map_flags = attr->map_flags;
102 dtab->map.numa_node = bpf_map_attr_numa_node(attr);
103 97
104 /* make sure page count doesn't overflow */ 98 /* make sure page count doesn't overflow */
105 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *); 99 cost = (u64) dtab->map.max_entries * sizeof(struct bpf_dtab_netdev *);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 7fd6519444d3..b76828f23b49 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -304,7 +304,6 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
304 */ 304 */
305 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU); 305 bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
306 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC); 306 bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
307 int numa_node = bpf_map_attr_numa_node(attr);
308 struct bpf_htab *htab; 307 struct bpf_htab *htab;
309 int err, i; 308 int err, i;
310 u64 cost; 309 u64 cost;
@@ -313,13 +312,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
313 if (!htab) 312 if (!htab)
314 return ERR_PTR(-ENOMEM); 313 return ERR_PTR(-ENOMEM);
315 314
316 /* mandatory map attributes */ 315 bpf_map_init_from_attr(&htab->map, attr);
317 htab->map.map_type = attr->map_type;
318 htab->map.key_size = attr->key_size;
319 htab->map.value_size = attr->value_size;
320 htab->map.max_entries = attr->max_entries;
321 htab->map.map_flags = attr->map_flags;
322 htab->map.numa_node = numa_node;
323 316
324 if (percpu_lru) { 317 if (percpu_lru) {
325 /* ensure each CPU's lru list has >=1 elements. 318 /* ensure each CPU's lru list has >=1 elements.
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index 885e45479680..584e02227671 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -522,12 +522,7 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
522 return ERR_PTR(-ENOMEM); 522 return ERR_PTR(-ENOMEM);
523 523
524 /* copy mandatory map attributes */ 524 /* copy mandatory map attributes */
525 trie->map.map_type = attr->map_type; 525 bpf_map_init_from_attr(&trie->map, attr);
526 trie->map.key_size = attr->key_size;
527 trie->map.value_size = attr->value_size;
528 trie->map.max_entries = attr->max_entries;
529 trie->map.map_flags = attr->map_flags;
530 trie->map.numa_node = bpf_map_attr_numa_node(attr);
531 trie->data_size = attr->key_size - 526 trie->data_size = attr->key_size -
532 offsetof(struct bpf_lpm_trie_key, data); 527 offsetof(struct bpf_lpm_trie_key, data);
533 trie->max_prefixlen = trie->data_size * 8; 528 trie->max_prefixlen = trie->data_size * 8;
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 079968680bc3..0314d1783d77 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -513,13 +513,7 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr)
513 if (!stab) 513 if (!stab)
514 return ERR_PTR(-ENOMEM); 514 return ERR_PTR(-ENOMEM);
515 515
516 /* mandatory map attributes */ 516 bpf_map_init_from_attr(&stab->map, attr);
517 stab->map.map_type = attr->map_type;
518 stab->map.key_size = attr->key_size;
519 stab->map.value_size = attr->value_size;
520 stab->map.max_entries = attr->max_entries;
521 stab->map.map_flags = attr->map_flags;
522 stab->map.numa_node = bpf_map_attr_numa_node(attr);
523 517
524 /* make sure page count doesn't overflow */ 518 /* make sure page count doesn't overflow */
525 cost = (u64) stab->map.max_entries * sizeof(struct sock *); 519 cost = (u64) stab->map.max_entries * sizeof(struct sock *);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 6c63c2222ea8..b0ecf43f5894 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -88,14 +88,10 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
88 if (cost >= U32_MAX - PAGE_SIZE) 88 if (cost >= U32_MAX - PAGE_SIZE)
89 goto free_smap; 89 goto free_smap;
90 90
91 smap->map.map_type = attr->map_type; 91 bpf_map_init_from_attr(&smap->map, attr);
92 smap->map.key_size = attr->key_size;
93 smap->map.value_size = value_size; 92 smap->map.value_size = value_size;
94 smap->map.max_entries = attr->max_entries;
95 smap->map.map_flags = attr->map_flags;
96 smap->n_buckets = n_buckets; 93 smap->n_buckets = n_buckets;
97 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 94 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
98 smap->map.numa_node = bpf_map_attr_numa_node(attr);
99 95
100 err = bpf_map_precharge_memlock(smap->map.pages); 96 err = bpf_map_precharge_memlock(smap->map.pages);
101 if (err) 97 if (err)
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index c0ac03a04880..a3f726bb42ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -143,6 +143,16 @@ void bpf_map_area_free(void *area)
143 kvfree(area); 143 kvfree(area);
144} 144}
145 145
146void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr)
147{
148 map->map_type = attr->map_type;
149 map->key_size = attr->key_size;
150 map->value_size = attr->value_size;
151 map->max_entries = attr->max_entries;
152 map->map_flags = attr->map_flags;
153 map->numa_node = bpf_map_attr_numa_node(attr);
154}
155
146int bpf_map_precharge_memlock(u32 pages) 156int bpf_map_precharge_memlock(u32 pages)
147{ 157{
148 struct user_struct *user = get_current_user(); 158 struct user_struct *user = get_current_user();