summaryrefslogtreecommitdiffstats
path: root/kernel/bpf
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-05-29 21:03:57 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 19:52:56 -0400
commit3539b96e041c06e4317082816d90ec09160aeb11 (patch)
tree7e2ccf8b5526d8f13e8c5996b3b7ef76869a3012 /kernel/bpf
parentd50836cda698f6966e63c2c7f718d7c2f687ec8a (diff)
bpf: group memory related fields in struct bpf_map_memory
Group "user" and "pages" fields of bpf_map into the bpf_map_memory structure. Later it can be extended with "memcg" and other related information. The main reason for a such change (beside cosmetics) is to pass bpf_map_memory structure to charging functions before the actual allocation of bpf_map. Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf')
-rw-r--r--kernel/bpf/arraymap.c2
-rw-r--r--kernel/bpf/cpumap.c4
-rw-r--r--kernel/bpf/devmap.c4
-rw-r--r--kernel/bpf/hashtab.c4
-rw-r--r--kernel/bpf/local_storage.c2
-rw-r--r--kernel/bpf/lpm_trie.c4
-rw-r--r--kernel/bpf/queue_stack_maps.c2
-rw-r--r--kernel/bpf/reuseport_array.c2
-rw-r--r--kernel/bpf/stackmap.c4
-rw-r--r--kernel/bpf/syscall.c19
-rw-r--r--kernel/bpf/xskmap.c4
11 files changed, 26 insertions, 25 deletions
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index 584636c9e2eb..8fda24e78193 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -138,7 +138,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
138 138
139 /* copy mandatory map attributes */ 139 /* copy mandatory map attributes */
140 bpf_map_init_from_attr(&array->map, attr); 140 bpf_map_init_from_attr(&array->map, attr);
141 array->map.pages = cost; 141 array->map.memory.pages = cost;
142 array->elem_size = elem_size; 142 array->elem_size = elem_size;
143 143
144 if (percpu && bpf_array_alloc_percpu(array)) { 144 if (percpu && bpf_array_alloc_percpu(array)) {
diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c
index cf727d77c6c6..035268add724 100644
--- a/kernel/bpf/cpumap.c
+++ b/kernel/bpf/cpumap.c
@@ -108,10 +108,10 @@ static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
108 cost += cpu_map_bitmap_size(attr) * num_possible_cpus(); 108 cost += cpu_map_bitmap_size(attr) * num_possible_cpus();
109 if (cost >= U32_MAX - PAGE_SIZE) 109 if (cost >= U32_MAX - PAGE_SIZE)
110 goto free_cmap; 110 goto free_cmap;
111 cmap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 111 cmap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
112 112
113 /* Notice returns -EPERM on if map size is larger than memlock limit */ 113 /* Notice returns -EPERM on if map size is larger than memlock limit */
114 ret = bpf_map_precharge_memlock(cmap->map.pages); 114 ret = bpf_map_precharge_memlock(cmap->map.memory.pages);
115 if (ret) { 115 if (ret) {
116 err = ret; 116 err = ret;
117 goto free_cmap; 117 goto free_cmap;
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index 1e525d70f833..f6c57efb1d0d 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -111,10 +111,10 @@ static struct bpf_map *dev_map_alloc(union bpf_attr *attr)
111 if (cost >= U32_MAX - PAGE_SIZE) 111 if (cost >= U32_MAX - PAGE_SIZE)
112 goto free_dtab; 112 goto free_dtab;
113 113
114 dtab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 114 dtab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
115 115
116 /* if map size is larger than memlock limit, reject it early */ 116 /* if map size is larger than memlock limit, reject it early */
117 err = bpf_map_precharge_memlock(dtab->map.pages); 117 err = bpf_map_precharge_memlock(dtab->map.memory.pages);
118 if (err) 118 if (err)
119 goto free_dtab; 119 goto free_dtab;
120 120
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 0f2708fde5f7..15bf228d2e98 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -364,10 +364,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
364 /* make sure page count doesn't overflow */ 364 /* make sure page count doesn't overflow */
365 goto free_htab; 365 goto free_htab;
366 366
367 htab->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 367 htab->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
368 368
369 /* if map size is larger than memlock limit, reject it early */ 369 /* if map size is larger than memlock limit, reject it early */
370 err = bpf_map_precharge_memlock(htab->map.pages); 370 err = bpf_map_precharge_memlock(htab->map.memory.pages);
371 if (err) 371 if (err)
372 goto free_htab; 372 goto free_htab;
373 373
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index e48302ecb389..574325276650 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -303,7 +303,7 @@ static struct bpf_map *cgroup_storage_map_alloc(union bpf_attr *attr)
303 if (!map) 303 if (!map)
304 return ERR_PTR(-ENOMEM); 304 return ERR_PTR(-ENOMEM);
305 305
306 map->map.pages = pages; 306 map->map.memory.pages = pages;
307 307
308 /* copy mandatory map attributes */ 308 /* copy mandatory map attributes */
309 bpf_map_init_from_attr(&map->map, attr); 309 bpf_map_init_from_attr(&map->map, attr);
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
index e61630c2e50b..8e423a582760 100644
--- a/kernel/bpf/lpm_trie.c
+++ b/kernel/bpf/lpm_trie.c
@@ -578,9 +578,9 @@ static struct bpf_map *trie_alloc(union bpf_attr *attr)
578 goto out_err; 578 goto out_err;
579 } 579 }
580 580
581 trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 581 trie->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
582 582
583 ret = bpf_map_precharge_memlock(trie->map.pages); 583 ret = bpf_map_precharge_memlock(trie->map.memory.pages);
584 if (ret) 584 if (ret)
585 goto out_err; 585 goto out_err;
586 586
diff --git a/kernel/bpf/queue_stack_maps.c b/kernel/bpf/queue_stack_maps.c
index 0b140d236889..8a510e71d486 100644
--- a/kernel/bpf/queue_stack_maps.c
+++ b/kernel/bpf/queue_stack_maps.c
@@ -89,7 +89,7 @@ static struct bpf_map *queue_stack_map_alloc(union bpf_attr *attr)
89 89
90 bpf_map_init_from_attr(&qs->map, attr); 90 bpf_map_init_from_attr(&qs->map, attr);
91 91
92 qs->map.pages = cost; 92 qs->map.memory.pages = cost;
93 qs->size = size; 93 qs->size = size;
94 94
95 raw_spin_lock_init(&qs->lock); 95 raw_spin_lock_init(&qs->lock);
diff --git a/kernel/bpf/reuseport_array.c b/kernel/bpf/reuseport_array.c
index 18e225de80ff..819515242739 100644
--- a/kernel/bpf/reuseport_array.c
+++ b/kernel/bpf/reuseport_array.c
@@ -176,7 +176,7 @@ static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
176 176
177 /* copy mandatory map attributes */ 177 /* copy mandatory map attributes */
178 bpf_map_init_from_attr(&array->map, attr); 178 bpf_map_init_from_attr(&array->map, attr);
179 array->map.pages = cost; 179 array->map.memory.pages = cost;
180 180
181 return &array->map; 181 return &array->map;
182} 182}
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 950ab2f28922..08d4efff73ac 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -131,9 +131,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
131 bpf_map_init_from_attr(&smap->map, attr); 131 bpf_map_init_from_attr(&smap->map, attr);
132 smap->map.value_size = value_size; 132 smap->map.value_size = value_size;
133 smap->n_buckets = n_buckets; 133 smap->n_buckets = n_buckets;
134 smap->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 134 smap->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
135 135
136 err = bpf_map_precharge_memlock(smap->map.pages); 136 err = bpf_map_precharge_memlock(smap->map.memory.pages);
137 if (err) 137 if (err)
138 goto free_smap; 138 goto free_smap;
139 139
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1539774d78c7..8289a2ce14fc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map)
222 struct user_struct *user = get_current_user(); 222 struct user_struct *user = get_current_user();
223 int ret; 223 int ret;
224 224
225 ret = bpf_charge_memlock(user, map->pages); 225 ret = bpf_charge_memlock(user, map->memory.pages);
226 if (ret) { 226 if (ret) {
227 free_uid(user); 227 free_uid(user);
228 return ret; 228 return ret;
229 } 229 }
230 map->user = user; 230 map->memory.user = user;
231 return ret; 231 return ret;
232} 232}
233 233
234static void bpf_map_release_memlock(struct bpf_map *map) 234static void bpf_map_release_memlock(struct bpf_map *map)
235{ 235{
236 struct user_struct *user = map->user; 236 struct user_struct *user = map->memory.user;
237 bpf_uncharge_memlock(user, map->pages); 237
238 bpf_uncharge_memlock(user, map->memory.pages);
238 free_uid(user); 239 free_uid(user);
239} 240}
240 241
@@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
242{ 243{
243 int ret; 244 int ret;
244 245
245 ret = bpf_charge_memlock(map->user, pages); 246 ret = bpf_charge_memlock(map->memory.user, pages);
246 if (ret) 247 if (ret)
247 return ret; 248 return ret;
248 map->pages += pages; 249 map->memory.pages += pages;
249 return ret; 250 return ret;
250} 251}
251 252
252void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 253void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
253{ 254{
254 bpf_uncharge_memlock(map->user, pages); 255 bpf_uncharge_memlock(map->memory.user, pages);
255 map->pages -= pages; 256 map->memory.pages -= pages;
256} 257}
257 258
258static int bpf_map_alloc_id(struct bpf_map *map) 259static int bpf_map_alloc_id(struct bpf_map *map)
@@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
395 map->value_size, 396 map->value_size,
396 map->max_entries, 397 map->max_entries,
397 map->map_flags, 398 map->map_flags,
398 map->pages * 1ULL << PAGE_SHIFT, 399 map->memory.pages * 1ULL << PAGE_SHIFT,
399 map->id, 400 map->id,
400 READ_ONCE(map->frozen)); 401 READ_ONCE(map->frozen));
401 402
diff --git a/kernel/bpf/xskmap.c b/kernel/bpf/xskmap.c
index 686d244e798d..f816ee1a0fa0 100644
--- a/kernel/bpf/xskmap.c
+++ b/kernel/bpf/xskmap.c
@@ -40,10 +40,10 @@ static struct bpf_map *xsk_map_alloc(union bpf_attr *attr)
40 if (cost >= U32_MAX - PAGE_SIZE) 40 if (cost >= U32_MAX - PAGE_SIZE)
41 goto free_m; 41 goto free_m;
42 42
43 m->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT; 43 m->map.memory.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
44 44
45 /* Notice returns -EPERM on if map size is larger than memlock limit */ 45 /* Notice returns -EPERM on if map size is larger than memlock limit */
46 err = bpf_map_precharge_memlock(m->map.pages); 46 err = bpf_map_precharge_memlock(m->map.memory.pages);
47 if (err) 47 if (err)
48 goto free_m; 48 goto free_m;
49 49