aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h79
1 files changed, 67 insertions, 12 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 8827e797ff97..523481a3471b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,7 +23,7 @@ struct bpf_prog;
23struct bpf_map; 23struct bpf_map;
24struct sock; 24struct sock;
25struct seq_file; 25struct seq_file;
26struct btf; 26struct btf_type;
27 27
28/* map is generic key/value storage optionally accesible by eBPF programs */ 28/* map is generic key/value storage optionally accesible by eBPF programs */
29struct bpf_map_ops { 29struct bpf_map_ops {
@@ -48,8 +48,9 @@ struct bpf_map_ops {
48 u32 (*map_fd_sys_lookup_elem)(void *ptr); 48 u32 (*map_fd_sys_lookup_elem)(void *ptr);
49 void (*map_seq_show_elem)(struct bpf_map *map, void *key, 49 void (*map_seq_show_elem)(struct bpf_map *map, void *key,
50 struct seq_file *m); 50 struct seq_file *m);
51 int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf, 51 int (*map_check_btf)(const struct bpf_map *map,
52 u32 key_type_id, u32 value_type_id); 52 const struct btf_type *key_type,
53 const struct btf_type *value_type);
53}; 54};
54 55
55struct bpf_map { 56struct bpf_map {
@@ -85,6 +86,7 @@ struct bpf_map {
85 char name[BPF_OBJ_NAME_LEN]; 86 char name[BPF_OBJ_NAME_LEN];
86}; 87};
87 88
89struct bpf_offload_dev;
88struct bpf_offloaded_map; 90struct bpf_offloaded_map;
89 91
90struct bpf_map_dev_ops { 92struct bpf_map_dev_ops {
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
117 119
118static inline bool bpf_map_support_seq_show(const struct bpf_map *map) 120static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
119{ 121{
120 return map->ops->map_seq_show_elem && map->ops->map_check_btf; 122 return map->btf && map->ops->map_seq_show_elem;
121} 123}
122 124
125int map_check_no_btf(const struct bpf_map *map,
126 const struct btf_type *key_type,
127 const struct btf_type *value_type);
128
123extern const struct bpf_map_ops bpf_map_offload_ops; 129extern const struct bpf_map_ops bpf_map_offload_ops;
124 130
125/* function argument constraints */ 131/* function argument constraints */
@@ -154,6 +160,7 @@ enum bpf_arg_type {
154enum bpf_return_type { 160enum bpf_return_type {
155 RET_INTEGER, /* function returns integer */ 161 RET_INTEGER, /* function returns integer */
156 RET_VOID, /* function doesn't return anything */ 162 RET_VOID, /* function doesn't return anything */
163 RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
157 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ 164 RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
158}; 165};
159 166
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
281 struct bpf_prog *prog; 288 struct bpf_prog *prog;
282 struct user_struct *user; 289 struct user_struct *user;
283 u64 load_time; /* ns since boottime */ 290 u64 load_time; /* ns since boottime */
291 struct bpf_map *cgroup_storage;
284 char name[BPF_OBJ_NAME_LEN]; 292 char name[BPF_OBJ_NAME_LEN];
285#ifdef CONFIG_SECURITY 293#ifdef CONFIG_SECURITY
286 void *security; 294 void *security;
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
347 * The 'struct bpf_prog_array *' should only be replaced with xchg() 355 * The 'struct bpf_prog_array *' should only be replaced with xchg()
348 * since other cpus are walking the array of pointers in parallel. 356 * since other cpus are walking the array of pointers in parallel.
349 */ 357 */
358struct bpf_prog_array_item {
359 struct bpf_prog *prog;
360 struct bpf_cgroup_storage *cgroup_storage;
361};
362
350struct bpf_prog_array { 363struct bpf_prog_array {
351 struct rcu_head rcu; 364 struct rcu_head rcu;
352 struct bpf_prog *progs[0]; 365 struct bpf_prog_array_item items[0];
353}; 366};
354 367
355struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); 368struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
356void bpf_prog_array_free(struct bpf_prog_array __rcu *progs); 369void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
357int bpf_prog_array_length(struct bpf_prog_array __rcu *progs); 370int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
358int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, 371int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
370 383
371#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \ 384#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
372 ({ \ 385 ({ \
373 struct bpf_prog **_prog, *__prog; \ 386 struct bpf_prog_array_item *_item; \
387 struct bpf_prog *_prog; \
374 struct bpf_prog_array *_array; \ 388 struct bpf_prog_array *_array; \
375 u32 _ret = 1; \ 389 u32 _ret = 1; \
376 preempt_disable(); \ 390 preempt_disable(); \
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
378 _array = rcu_dereference(array); \ 392 _array = rcu_dereference(array); \
379 if (unlikely(check_non_null && !_array))\ 393 if (unlikely(check_non_null && !_array))\
380 goto _out; \ 394 goto _out; \
381 _prog = _array->progs; \ 395 _item = &_array->items[0]; \
382 while ((__prog = READ_ONCE(*_prog))) { \ 396 while ((_prog = READ_ONCE(_item->prog))) { \
383 _ret &= func(__prog, ctx); \ 397 bpf_cgroup_storage_set(_item->cgroup_storage); \
384 _prog++; \ 398 _ret &= func(_prog, ctx); \
399 _item++; \
385 } \ 400 } \
386_out: \ 401_out: \
387 rcu_read_unlock(); \ 402 rcu_read_unlock(); \
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
434void bpf_map_put_with_uref(struct bpf_map *map); 449void bpf_map_put_with_uref(struct bpf_map *map);
435void bpf_map_put(struct bpf_map *map); 450void bpf_map_put(struct bpf_map *map);
436int bpf_map_precharge_memlock(u32 pages); 451int bpf_map_precharge_memlock(u32 pages);
452int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
453void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
437void *bpf_map_area_alloc(size_t size, int numa_node); 454void *bpf_map_area_alloc(size_t size, int numa_node);
438void bpf_map_area_free(void *base); 455void bpf_map_area_free(void *base);
439void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); 456void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -512,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
512} 529}
513 530
514struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); 531struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
532int array_map_alloc_check(union bpf_attr *attr);
515 533
516#else /* !CONFIG_BPF_SYSCALL */ 534#else /* !CONFIG_BPF_SYSCALL */
517static inline struct bpf_prog *bpf_prog_get(u32 ufd) 535static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -648,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
648int bpf_map_offload_get_next_key(struct bpf_map *map, 666int bpf_map_offload_get_next_key(struct bpf_map *map,
649 void *key, void *next_key); 667 void *key, void *next_key);
650 668
651bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); 669bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
670
671struct bpf_offload_dev *bpf_offload_dev_create(void);
672void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
673int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
674 struct net_device *netdev);
675void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
676 struct net_device *netdev);
677bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
652 678
653#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) 679#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
654int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); 680int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -749,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
749} 775}
750#endif 776#endif
751 777
778#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
779void bpf_sk_reuseport_detach(struct sock *sk);
780int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
781 void *value);
782int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
783 void *value, u64 map_flags);
784#else
785static inline void bpf_sk_reuseport_detach(struct sock *sk)
786{
787}
788
789#ifdef CONFIG_BPF_SYSCALL
790static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
791 void *key, void *value)
792{
793 return -EOPNOTSUPP;
794}
795
796static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
797 void *key, void *value,
798 u64 map_flags)
799{
800 return -EOPNOTSUPP;
801}
802#endif /* CONFIG_BPF_SYSCALL */
803#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
804
752/* verifier prototypes for helper functions called from eBPF programs */ 805/* verifier prototypes for helper functions called from eBPF programs */
753extern const struct bpf_func_proto bpf_map_lookup_elem_proto; 806extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
754extern const struct bpf_func_proto bpf_map_update_elem_proto; 807extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -768,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
768extern const struct bpf_func_proto bpf_sock_hash_update_proto; 821extern const struct bpf_func_proto bpf_sock_hash_update_proto;
769extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; 822extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
770 823
824extern const struct bpf_func_proto bpf_get_local_storage_proto;
825
771/* Shared helpers among cBPF and eBPF. */ 826/* Shared helpers among cBPF and eBPF. */
772void bpf_user_rnd_init_once(void); 827void bpf_user_rnd_init_once(void);
773u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 828u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);