summaryrefslogtreecommitdiffstats
path: root/kernel/bpf/syscall.c
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2019-05-29 21:03:57 -0400
committerAlexei Starovoitov <ast@kernel.org>2019-05-31 19:52:56 -0400
commit3539b96e041c06e4317082816d90ec09160aeb11 (patch)
tree7e2ccf8b5526d8f13e8c5996b3b7ef76869a3012 /kernel/bpf/syscall.c
parentd50836cda698f6966e63c2c7f718d7c2f687ec8a (diff)
bpf: group memory related fields in struct bpf_map_memory
Group "user" and "pages" fields of bpf_map into the bpf_map_memory structure. Later it can be extended with "memcg" and other related information. The main reason for a such change (beside cosmetics) is to pass bpf_map_memory structure to charging functions before the actual allocation of bpf_map. Signed-off-by: Roman Gushchin <guro@fb.com> Acked-by: Song Liu <songliubraving@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org>
Diffstat (limited to 'kernel/bpf/syscall.c')
-rw-r--r--kernel/bpf/syscall.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 1539774d78c7..8289a2ce14fc 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -222,19 +222,20 @@ static int bpf_map_init_memlock(struct bpf_map *map)
222 struct user_struct *user = get_current_user(); 222 struct user_struct *user = get_current_user();
223 int ret; 223 int ret;
224 224
225 ret = bpf_charge_memlock(user, map->pages); 225 ret = bpf_charge_memlock(user, map->memory.pages);
226 if (ret) { 226 if (ret) {
227 free_uid(user); 227 free_uid(user);
228 return ret; 228 return ret;
229 } 229 }
230 map->user = user; 230 map->memory.user = user;
231 return ret; 231 return ret;
232} 232}
233 233
234static void bpf_map_release_memlock(struct bpf_map *map) 234static void bpf_map_release_memlock(struct bpf_map *map)
235{ 235{
236 struct user_struct *user = map->user; 236 struct user_struct *user = map->memory.user;
237 bpf_uncharge_memlock(user, map->pages); 237
238 bpf_uncharge_memlock(user, map->memory.pages);
238 free_uid(user); 239 free_uid(user);
239} 240}
240 241
@@ -242,17 +243,17 @@ int bpf_map_charge_memlock(struct bpf_map *map, u32 pages)
242{ 243{
243 int ret; 244 int ret;
244 245
245 ret = bpf_charge_memlock(map->user, pages); 246 ret = bpf_charge_memlock(map->memory.user, pages);
246 if (ret) 247 if (ret)
247 return ret; 248 return ret;
248 map->pages += pages; 249 map->memory.pages += pages;
249 return ret; 250 return ret;
250} 251}
251 252
252void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages) 253void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages)
253{ 254{
254 bpf_uncharge_memlock(map->user, pages); 255 bpf_uncharge_memlock(map->memory.user, pages);
255 map->pages -= pages; 256 map->memory.pages -= pages;
256} 257}
257 258
258static int bpf_map_alloc_id(struct bpf_map *map) 259static int bpf_map_alloc_id(struct bpf_map *map)
@@ -395,7 +396,7 @@ static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
395 map->value_size, 396 map->value_size,
396 map->max_entries, 397 map->max_entries,
397 map->map_flags, 398 map->map_flags,
398 map->pages * 1ULL << PAGE_SHIFT, 399 map->memory.pages * 1ULL << PAGE_SHIFT,
399 map->id, 400 map->id,
400 READ_ONCE(map->frozen)); 401 READ_ONCE(map->frozen));
401 402