aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/bpf-cgroup.h38
-rw-r--r--include/linux/bpf.h11
-rw-r--r--kernel/bpf/cgroup.c74
-rw-r--r--kernel/bpf/helpers.c15
-rw-r--r--kernel/bpf/local_storage.c18
-rw-r--r--kernel/bpf/syscall.c9
-rw-r--r--kernel/bpf/verifier.c8
-rw-r--r--net/bpf/test_run.c20
8 files changed, 136 insertions, 57 deletions
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index f91b0f8ff3a9..e9871b012dac 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
2#ifndef _BPF_CGROUP_H 2#ifndef _BPF_CGROUP_H
3#define _BPF_CGROUP_H 3#define _BPF_CGROUP_H
4 4
5#include <linux/bpf.h>
5#include <linux/errno.h> 6#include <linux/errno.h>
6#include <linux/jump_label.h> 7#include <linux/jump_label.h>
7#include <linux/percpu.h> 8#include <linux/percpu.h>
@@ -22,7 +23,10 @@ struct bpf_cgroup_storage;
22extern struct static_key_false cgroup_bpf_enabled_key; 23extern struct static_key_false cgroup_bpf_enabled_key;
23#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key) 24#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
24 25
25DECLARE_PER_CPU(void*, bpf_cgroup_storage); 26DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
27
28#define for_each_cgroup_storage_type(stype) \
29 for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++)
26 30
27struct bpf_cgroup_storage_map; 31struct bpf_cgroup_storage_map;
28 32
@@ -43,7 +47,7 @@ struct bpf_cgroup_storage {
43struct bpf_prog_list { 47struct bpf_prog_list {
44 struct list_head node; 48 struct list_head node;
45 struct bpf_prog *prog; 49 struct bpf_prog *prog;
46 struct bpf_cgroup_storage *storage; 50 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE];
47}; 51};
48 52
49struct bpf_prog_array; 53struct bpf_prog_array;
@@ -101,18 +105,29 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
101int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor, 105int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
102 short access, enum bpf_attach_type type); 106 short access, enum bpf_attach_type type);
103 107
104static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) 108static inline enum bpf_cgroup_storage_type cgroup_storage_type(
109 struct bpf_map *map)
105{ 110{
111 return BPF_CGROUP_STORAGE_SHARED;
112}
113
114static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage
115 *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
116{
117 enum bpf_cgroup_storage_type stype;
106 struct bpf_storage_buffer *buf; 118 struct bpf_storage_buffer *buf;
107 119
108 if (!storage) 120 for_each_cgroup_storage_type(stype) {
109 return; 121 if (!storage[stype])
122 continue;
110 123
111 buf = READ_ONCE(storage->buf); 124 buf = READ_ONCE(storage[stype]->buf);
112 this_cpu_write(bpf_cgroup_storage, &buf->data[0]); 125 this_cpu_write(bpf_cgroup_storage[stype], &buf->data[0]);
126 }
113} 127}
114 128
115struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog); 129struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
130 enum bpf_cgroup_storage_type stype);
116void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); 131void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
117void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, 132void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
118 struct cgroup *cgroup, 133 struct cgroup *cgroup,
@@ -265,13 +280,14 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
265 return -EINVAL; 280 return -EINVAL;
266} 281}
267 282
268static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {} 283static inline void bpf_cgroup_storage_set(
284 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
269static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, 285static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
270 struct bpf_map *map) { return 0; } 286 struct bpf_map *map) { return 0; }
271static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, 287static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
272 struct bpf_map *map) {} 288 struct bpf_map *map) {}
273static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( 289static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
274 struct bpf_prog *prog) { return 0; } 290 struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; }
275static inline void bpf_cgroup_storage_free( 291static inline void bpf_cgroup_storage_free(
276 struct bpf_cgroup_storage *storage) {} 292 struct bpf_cgroup_storage *storage) {}
277 293
@@ -293,6 +309,8 @@ static inline void bpf_cgroup_storage_free(
293#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; }) 309#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
294#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; }) 310#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type,major,minor,access) ({ 0; })
295 311
312#define for_each_cgroup_storage_type(stype) for (; false; )
313
296#endif /* CONFIG_CGROUP_BPF */ 314#endif /* CONFIG_CGROUP_BPF */
297 315
298#endif /* _BPF_CGROUP_H */ 316#endif /* _BPF_CGROUP_H */
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 988a00797bcd..b457fbe7b70b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -272,6 +272,13 @@ struct bpf_prog_offload {
272 u32 jited_len; 272 u32 jited_len;
273}; 273};
274 274
275enum bpf_cgroup_storage_type {
276 BPF_CGROUP_STORAGE_SHARED,
277 __BPF_CGROUP_STORAGE_MAX
278};
279
280#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
281
275struct bpf_prog_aux { 282struct bpf_prog_aux {
276 atomic_t refcnt; 283 atomic_t refcnt;
277 u32 used_map_cnt; 284 u32 used_map_cnt;
@@ -289,7 +296,7 @@ struct bpf_prog_aux {
289 struct bpf_prog *prog; 296 struct bpf_prog *prog;
290 struct user_struct *user; 297 struct user_struct *user;
291 u64 load_time; /* ns since boottime */ 298 u64 load_time; /* ns since boottime */
292 struct bpf_map *cgroup_storage; 299 struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
293 char name[BPF_OBJ_NAME_LEN]; 300 char name[BPF_OBJ_NAME_LEN];
294#ifdef CONFIG_SECURITY 301#ifdef CONFIG_SECURITY
295 void *security; 302 void *security;
@@ -358,7 +365,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
358 */ 365 */
359struct bpf_prog_array_item { 366struct bpf_prog_array_item {
360 struct bpf_prog *prog; 367 struct bpf_prog *prog;
361 struct bpf_cgroup_storage *cgroup_storage; 368 struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
362}; 369};
363 370
364struct bpf_prog_array { 371struct bpf_prog_array {
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index 549f6fbcc461..00f6ed2e4f9a 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -25,6 +25,7 @@ EXPORT_SYMBOL(cgroup_bpf_enabled_key);
25 */ 25 */
26void cgroup_bpf_put(struct cgroup *cgrp) 26void cgroup_bpf_put(struct cgroup *cgrp)
27{ 27{
28 enum bpf_cgroup_storage_type stype;
28 unsigned int type; 29 unsigned int type;
29 30
30 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) { 31 for (type = 0; type < ARRAY_SIZE(cgrp->bpf.progs); type++) {
@@ -34,8 +35,10 @@ void cgroup_bpf_put(struct cgroup *cgrp)
34 list_for_each_entry_safe(pl, tmp, progs, node) { 35 list_for_each_entry_safe(pl, tmp, progs, node) {
35 list_del(&pl->node); 36 list_del(&pl->node);
36 bpf_prog_put(pl->prog); 37 bpf_prog_put(pl->prog);
37 bpf_cgroup_storage_unlink(pl->storage); 38 for_each_cgroup_storage_type(stype) {
38 bpf_cgroup_storage_free(pl->storage); 39 bpf_cgroup_storage_unlink(pl->storage[stype]);
40 bpf_cgroup_storage_free(pl->storage[stype]);
41 }
39 kfree(pl); 42 kfree(pl);
40 static_branch_dec(&cgroup_bpf_enabled_key); 43 static_branch_dec(&cgroup_bpf_enabled_key);
41 } 44 }
@@ -97,6 +100,7 @@ static int compute_effective_progs(struct cgroup *cgrp,
97 enum bpf_attach_type type, 100 enum bpf_attach_type type,
98 struct bpf_prog_array __rcu **array) 101 struct bpf_prog_array __rcu **array)
99{ 102{
103 enum bpf_cgroup_storage_type stype;
100 struct bpf_prog_array *progs; 104 struct bpf_prog_array *progs;
101 struct bpf_prog_list *pl; 105 struct bpf_prog_list *pl;
102 struct cgroup *p = cgrp; 106 struct cgroup *p = cgrp;
@@ -125,7 +129,9 @@ static int compute_effective_progs(struct cgroup *cgrp,
125 continue; 129 continue;
126 130
127 progs->items[cnt].prog = pl->prog; 131 progs->items[cnt].prog = pl->prog;
128 progs->items[cnt].cgroup_storage = pl->storage; 132 for_each_cgroup_storage_type(stype)
133 progs->items[cnt].cgroup_storage[stype] =
134 pl->storage[stype];
129 cnt++; 135 cnt++;
130 } 136 }
131 } while ((p = cgroup_parent(p))); 137 } while ((p = cgroup_parent(p)));
@@ -232,7 +238,9 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
232{ 238{
233 struct list_head *progs = &cgrp->bpf.progs[type]; 239 struct list_head *progs = &cgrp->bpf.progs[type];
234 struct bpf_prog *old_prog = NULL; 240 struct bpf_prog *old_prog = NULL;
235 struct bpf_cgroup_storage *storage, *old_storage = NULL; 241 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE],
242 *old_storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {NULL};
243 enum bpf_cgroup_storage_type stype;
236 struct bpf_prog_list *pl; 244 struct bpf_prog_list *pl;
237 bool pl_was_allocated; 245 bool pl_was_allocated;
238 int err; 246 int err;
@@ -254,34 +262,44 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
254 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS) 262 if (prog_list_length(progs) >= BPF_CGROUP_MAX_PROGS)
255 return -E2BIG; 263 return -E2BIG;
256 264
257 storage = bpf_cgroup_storage_alloc(prog); 265 for_each_cgroup_storage_type(stype) {
258 if (IS_ERR(storage)) 266 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
259 return -ENOMEM; 267 if (IS_ERR(storage[stype])) {
268 storage[stype] = NULL;
269 for_each_cgroup_storage_type(stype)
270 bpf_cgroup_storage_free(storage[stype]);
271 return -ENOMEM;
272 }
273 }
260 274
261 if (flags & BPF_F_ALLOW_MULTI) { 275 if (flags & BPF_F_ALLOW_MULTI) {
262 list_for_each_entry(pl, progs, node) { 276 list_for_each_entry(pl, progs, node) {
263 if (pl->prog == prog) { 277 if (pl->prog == prog) {
264 /* disallow attaching the same prog twice */ 278 /* disallow attaching the same prog twice */
265 bpf_cgroup_storage_free(storage); 279 for_each_cgroup_storage_type(stype)
280 bpf_cgroup_storage_free(storage[stype]);
266 return -EINVAL; 281 return -EINVAL;
267 } 282 }
268 } 283 }
269 284
270 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 285 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
271 if (!pl) { 286 if (!pl) {
272 bpf_cgroup_storage_free(storage); 287 for_each_cgroup_storage_type(stype)
288 bpf_cgroup_storage_free(storage[stype]);
273 return -ENOMEM; 289 return -ENOMEM;
274 } 290 }
275 291
276 pl_was_allocated = true; 292 pl_was_allocated = true;
277 pl->prog = prog; 293 pl->prog = prog;
278 pl->storage = storage; 294 for_each_cgroup_storage_type(stype)
295 pl->storage[stype] = storage[stype];
279 list_add_tail(&pl->node, progs); 296 list_add_tail(&pl->node, progs);
280 } else { 297 } else {
281 if (list_empty(progs)) { 298 if (list_empty(progs)) {
282 pl = kmalloc(sizeof(*pl), GFP_KERNEL); 299 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
283 if (!pl) { 300 if (!pl) {
284 bpf_cgroup_storage_free(storage); 301 for_each_cgroup_storage_type(stype)
302 bpf_cgroup_storage_free(storage[stype]);
285 return -ENOMEM; 303 return -ENOMEM;
286 } 304 }
287 pl_was_allocated = true; 305 pl_was_allocated = true;
@@ -289,12 +307,15 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
289 } else { 307 } else {
290 pl = list_first_entry(progs, typeof(*pl), node); 308 pl = list_first_entry(progs, typeof(*pl), node);
291 old_prog = pl->prog; 309 old_prog = pl->prog;
292 old_storage = pl->storage; 310 for_each_cgroup_storage_type(stype) {
293 bpf_cgroup_storage_unlink(old_storage); 311 old_storage[stype] = pl->storage[stype];
312 bpf_cgroup_storage_unlink(old_storage[stype]);
313 }
294 pl_was_allocated = false; 314 pl_was_allocated = false;
295 } 315 }
296 pl->prog = prog; 316 pl->prog = prog;
297 pl->storage = storage; 317 for_each_cgroup_storage_type(stype)
318 pl->storage[stype] = storage[stype];
298 } 319 }
299 320
300 cgrp->bpf.flags[type] = flags; 321 cgrp->bpf.flags[type] = flags;
@@ -304,21 +325,27 @@ int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
304 goto cleanup; 325 goto cleanup;
305 326
306 static_branch_inc(&cgroup_bpf_enabled_key); 327 static_branch_inc(&cgroup_bpf_enabled_key);
307 if (old_storage) 328 for_each_cgroup_storage_type(stype) {
308 bpf_cgroup_storage_free(old_storage); 329 if (!old_storage[stype])
330 continue;
331 bpf_cgroup_storage_free(old_storage[stype]);
332 }
309 if (old_prog) { 333 if (old_prog) {
310 bpf_prog_put(old_prog); 334 bpf_prog_put(old_prog);
311 static_branch_dec(&cgroup_bpf_enabled_key); 335 static_branch_dec(&cgroup_bpf_enabled_key);
312 } 336 }
313 bpf_cgroup_storage_link(storage, cgrp, type); 337 for_each_cgroup_storage_type(stype)
338 bpf_cgroup_storage_link(storage[stype], cgrp, type);
314 return 0; 339 return 0;
315 340
316cleanup: 341cleanup:
317 /* and cleanup the prog list */ 342 /* and cleanup the prog list */
318 pl->prog = old_prog; 343 pl->prog = old_prog;
319 bpf_cgroup_storage_free(pl->storage); 344 for_each_cgroup_storage_type(stype) {
320 pl->storage = old_storage; 345 bpf_cgroup_storage_free(pl->storage[stype]);
321 bpf_cgroup_storage_link(old_storage, cgrp, type); 346 pl->storage[stype] = old_storage[stype];
347 bpf_cgroup_storage_link(old_storage[stype], cgrp, type);
348 }
322 if (pl_was_allocated) { 349 if (pl_was_allocated) {
323 list_del(&pl->node); 350 list_del(&pl->node);
324 kfree(pl); 351 kfree(pl);
@@ -339,6 +366,7 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
339 enum bpf_attach_type type, u32 unused_flags) 366 enum bpf_attach_type type, u32 unused_flags)
340{ 367{
341 struct list_head *progs = &cgrp->bpf.progs[type]; 368 struct list_head *progs = &cgrp->bpf.progs[type];
369 enum bpf_cgroup_storage_type stype;
342 u32 flags = cgrp->bpf.flags[type]; 370 u32 flags = cgrp->bpf.flags[type];
343 struct bpf_prog *old_prog = NULL; 371 struct bpf_prog *old_prog = NULL;
344 struct bpf_prog_list *pl; 372 struct bpf_prog_list *pl;
@@ -385,8 +413,10 @@ int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
385 413
386 /* now can actually delete it from this cgroup list */ 414 /* now can actually delete it from this cgroup list */
387 list_del(&pl->node); 415 list_del(&pl->node);
388 bpf_cgroup_storage_unlink(pl->storage); 416 for_each_cgroup_storage_type(stype) {
389 bpf_cgroup_storage_free(pl->storage); 417 bpf_cgroup_storage_unlink(pl->storage[stype]);
418 bpf_cgroup_storage_free(pl->storage[stype]);
419 }
390 kfree(pl); 420 kfree(pl);
391 if (list_empty(progs)) 421 if (list_empty(progs))
392 /* last program was detached, reset flags to zero */ 422 /* last program was detached, reset flags to zero */
diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c
index 1991466b8327..9070b2ace6aa 100644
--- a/kernel/bpf/helpers.c
+++ b/kernel/bpf/helpers.c
@@ -194,16 +194,18 @@ const struct bpf_func_proto bpf_get_current_cgroup_id_proto = {
194 .ret_type = RET_INTEGER, 194 .ret_type = RET_INTEGER,
195}; 195};
196 196
197DECLARE_PER_CPU(void*, bpf_cgroup_storage); 197#ifdef CONFIG_CGROUP_BPF
198DECLARE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
198 199
199BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags) 200BPF_CALL_2(bpf_get_local_storage, struct bpf_map *, map, u64, flags)
200{ 201{
201 /* map and flags arguments are not used now, 202 /* flags argument is not used now,
202 * but provide an ability to extend the API 203 * but provides an ability to extend the API.
203 * for other types of local storages. 204 * verifier checks that its value is correct.
204 * verifier checks that their values are correct.
205 */ 205 */
206 return (unsigned long) this_cpu_read(bpf_cgroup_storage); 206 enum bpf_cgroup_storage_type stype = cgroup_storage_type(map);
207
208 return (unsigned long) this_cpu_read(bpf_cgroup_storage[stype]);
207} 209}
208 210
209const struct bpf_func_proto bpf_get_local_storage_proto = { 211const struct bpf_func_proto bpf_get_local_storage_proto = {
@@ -214,3 +216,4 @@ const struct bpf_func_proto bpf_get_local_storage_proto = {
214 .arg2_type = ARG_ANYTHING, 216 .arg2_type = ARG_ANYTHING,
215}; 217};
216#endif 218#endif
219#endif
diff --git a/kernel/bpf/local_storage.c b/kernel/bpf/local_storage.c
index 22ad967d1e5f..0bd9f19fc557 100644
--- a/kernel/bpf/local_storage.c
+++ b/kernel/bpf/local_storage.c
@@ -7,7 +7,7 @@
7#include <linux/rbtree.h> 7#include <linux/rbtree.h>
8#include <linux/slab.h> 8#include <linux/slab.h>
9 9
10DEFINE_PER_CPU(void*, bpf_cgroup_storage); 10DEFINE_PER_CPU(void*, bpf_cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]);
11 11
12#ifdef CONFIG_CGROUP_BPF 12#ifdef CONFIG_CGROUP_BPF
13 13
@@ -251,6 +251,7 @@ const struct bpf_map_ops cgroup_storage_map_ops = {
251 251
252int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map) 252int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
253{ 253{
254 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
254 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 255 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
255 int ret = -EBUSY; 256 int ret = -EBUSY;
256 257
@@ -258,11 +259,12 @@ int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *_map)
258 259
259 if (map->prog && map->prog != prog) 260 if (map->prog && map->prog != prog)
260 goto unlock; 261 goto unlock;
261 if (prog->aux->cgroup_storage && prog->aux->cgroup_storage != _map) 262 if (prog->aux->cgroup_storage[stype] &&
263 prog->aux->cgroup_storage[stype] != _map)
262 goto unlock; 264 goto unlock;
263 265
264 map->prog = prog; 266 map->prog = prog;
265 prog->aux->cgroup_storage = _map; 267 prog->aux->cgroup_storage[stype] = _map;
266 ret = 0; 268 ret = 0;
267unlock: 269unlock:
268 spin_unlock_bh(&map->lock); 270 spin_unlock_bh(&map->lock);
@@ -272,24 +274,26 @@ unlock:
272 274
273void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map) 275void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *_map)
274{ 276{
277 enum bpf_cgroup_storage_type stype = cgroup_storage_type(_map);
275 struct bpf_cgroup_storage_map *map = map_to_storage(_map); 278 struct bpf_cgroup_storage_map *map = map_to_storage(_map);
276 279
277 spin_lock_bh(&map->lock); 280 spin_lock_bh(&map->lock);
278 if (map->prog == prog) { 281 if (map->prog == prog) {
279 WARN_ON(prog->aux->cgroup_storage != _map); 282 WARN_ON(prog->aux->cgroup_storage[stype] != _map);
280 map->prog = NULL; 283 map->prog = NULL;
281 prog->aux->cgroup_storage = NULL; 284 prog->aux->cgroup_storage[stype] = NULL;
282 } 285 }
283 spin_unlock_bh(&map->lock); 286 spin_unlock_bh(&map->lock);
284} 287}
285 288
286struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog) 289struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
290 enum bpf_cgroup_storage_type stype)
287{ 291{
288 struct bpf_cgroup_storage *storage; 292 struct bpf_cgroup_storage *storage;
289 struct bpf_map *map; 293 struct bpf_map *map;
290 u32 pages; 294 u32 pages;
291 295
292 map = prog->aux->cgroup_storage; 296 map = prog->aux->cgroup_storage[stype];
293 if (!map) 297 if (!map)
294 return NULL; 298 return NULL;
295 299
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index b3c2d09bcf7a..8c91d2b41b1e 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -988,10 +988,15 @@ static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
988/* drop refcnt on maps used by eBPF program and free auxilary data */ 988/* drop refcnt on maps used by eBPF program and free auxilary data */
989static void free_used_maps(struct bpf_prog_aux *aux) 989static void free_used_maps(struct bpf_prog_aux *aux)
990{ 990{
991 enum bpf_cgroup_storage_type stype;
991 int i; 992 int i;
992 993
993 if (aux->cgroup_storage) 994 for_each_cgroup_storage_type(stype) {
994 bpf_cgroup_storage_release(aux->prog, aux->cgroup_storage); 995 if (!aux->cgroup_storage[stype])
996 continue;
997 bpf_cgroup_storage_release(aux->prog,
998 aux->cgroup_storage[stype]);
999 }
995 1000
996 for (i = 0; i < aux->used_map_cnt; i++) 1001 for (i = 0; i < aux->used_map_cnt; i++)
997 bpf_map_put(aux->used_maps[i]); 1002 bpf_map_put(aux->used_maps[i]);
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index e986518d7bc3..e90899df585d 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5171,11 +5171,15 @@ next_insn:
5171/* drop refcnt of maps used by the rejected program */ 5171/* drop refcnt of maps used by the rejected program */
5172static void release_maps(struct bpf_verifier_env *env) 5172static void release_maps(struct bpf_verifier_env *env)
5173{ 5173{
5174 enum bpf_cgroup_storage_type stype;
5174 int i; 5175 int i;
5175 5176
5176 if (env->prog->aux->cgroup_storage) 5177 for_each_cgroup_storage_type(stype) {
5178 if (!env->prog->aux->cgroup_storage[stype])
5179 continue;
5177 bpf_cgroup_storage_release(env->prog, 5180 bpf_cgroup_storage_release(env->prog,
5178 env->prog->aux->cgroup_storage); 5181 env->prog->aux->cgroup_storage[stype]);
5182 }
5179 5183
5180 for (i = 0; i < env->used_map_cnt; i++) 5184 for (i = 0; i < env->used_map_cnt; i++)
5181 bpf_map_put(env->used_maps[i]); 5185 bpf_map_put(env->used_maps[i]);
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index f4078830ea50..0c423b8cd75c 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -12,7 +12,7 @@
12#include <linux/sched/signal.h> 12#include <linux/sched/signal.h>
13 13
14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx, 14static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
15 struct bpf_cgroup_storage *storage) 15 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE])
16{ 16{
17 u32 ret; 17 u32 ret;
18 18
@@ -28,13 +28,20 @@ static __always_inline u32 bpf_test_run_one(struct bpf_prog *prog, void *ctx,
28 28
29static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time) 29static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
30{ 30{
31 struct bpf_cgroup_storage *storage = NULL; 31 struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { 0 };
32 enum bpf_cgroup_storage_type stype;
32 u64 time_start, time_spent = 0; 33 u64 time_start, time_spent = 0;
33 u32 ret = 0, i; 34 u32 ret = 0, i;
34 35
35 storage = bpf_cgroup_storage_alloc(prog); 36 for_each_cgroup_storage_type(stype) {
36 if (IS_ERR(storage)) 37 storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
37 return PTR_ERR(storage); 38 if (IS_ERR(storage[stype])) {
39 storage[stype] = NULL;
40 for_each_cgroup_storage_type(stype)
41 bpf_cgroup_storage_free(storage[stype]);
42 return -ENOMEM;
43 }
44 }
38 45
39 if (!repeat) 46 if (!repeat)
40 repeat = 1; 47 repeat = 1;
@@ -53,7 +60,8 @@ static u32 bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *time)
53 do_div(time_spent, repeat); 60 do_div(time_spent, repeat);
54 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent; 61 *time = time_spent > U32_MAX ? U32_MAX : (u32)time_spent;
55 62
56 bpf_cgroup_storage_free(storage); 63 for_each_cgroup_storage_type(stype)
64 bpf_cgroup_storage_free(storage[stype]);
57 65
58 return ret; 66 return ret;
59} 67}