diff options
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r-- | include/linux/bpf.h | 94 |
1 files changed, 88 insertions, 6 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0b25cf87b6d6..66df387106de 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/numa.h> | 17 | #include <linux/numa.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | 19 | ||
20 | struct bpf_verifier_env; | ||
20 | struct perf_event; | 21 | struct perf_event; |
21 | struct bpf_prog; | 22 | struct bpf_prog; |
22 | struct bpf_map; | 23 | struct bpf_map; |
@@ -24,6 +25,7 @@ struct bpf_map; | |||
24 | /* map is generic key/value storage optionally accesible by eBPF programs */ | 25 | /* map is generic key/value storage optionally accesible by eBPF programs */ |
25 | struct bpf_map_ops { | 26 | struct bpf_map_ops { |
26 | /* funcs callable from userspace (via syscall) */ | 27 | /* funcs callable from userspace (via syscall) */ |
28 | int (*map_alloc_check)(union bpf_attr *attr); | ||
27 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); | 29 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); |
28 | void (*map_release)(struct bpf_map *map, struct file *map_file); | 30 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
29 | void (*map_free)(struct bpf_map *map); | 31 | void (*map_free)(struct bpf_map *map); |
@@ -72,6 +74,33 @@ struct bpf_map { | |||
72 | char name[BPF_OBJ_NAME_LEN]; | 74 | char name[BPF_OBJ_NAME_LEN]; |
73 | }; | 75 | }; |
74 | 76 | ||
77 | struct bpf_offloaded_map; | ||
78 | |||
79 | struct bpf_map_dev_ops { | ||
80 | int (*map_get_next_key)(struct bpf_offloaded_map *map, | ||
81 | void *key, void *next_key); | ||
82 | int (*map_lookup_elem)(struct bpf_offloaded_map *map, | ||
83 | void *key, void *value); | ||
84 | int (*map_update_elem)(struct bpf_offloaded_map *map, | ||
85 | void *key, void *value, u64 flags); | ||
86 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); | ||
87 | }; | ||
88 | |||
89 | struct bpf_offloaded_map { | ||
90 | struct bpf_map map; | ||
91 | struct net_device *netdev; | ||
92 | const struct bpf_map_dev_ops *dev_ops; | ||
93 | void *dev_priv; | ||
94 | struct list_head offloads; | ||
95 | }; | ||
96 | |||
97 | static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) | ||
98 | { | ||
99 | return container_of(map, struct bpf_offloaded_map, map); | ||
100 | } | ||
101 | |||
102 | extern const struct bpf_map_ops bpf_map_offload_ops; | ||
103 | |||
75 | /* function argument constraints */ | 104 | /* function argument constraints */ |
76 | enum bpf_arg_type { | 105 | enum bpf_arg_type { |
77 | ARG_DONTCARE = 0, /* unused argument in helper function */ | 106 | ARG_DONTCARE = 0, /* unused argument in helper function */ |
@@ -193,14 +222,20 @@ struct bpf_verifier_ops { | |||
193 | struct bpf_prog *prog, u32 *target_size); | 222 | struct bpf_prog *prog, u32 *target_size); |
194 | }; | 223 | }; |
195 | 224 | ||
196 | struct bpf_dev_offload { | 225 | struct bpf_prog_offload_ops { |
226 | int (*insn_hook)(struct bpf_verifier_env *env, | ||
227 | int insn_idx, int prev_insn_idx); | ||
228 | }; | ||
229 | |||
230 | struct bpf_prog_offload { | ||
197 | struct bpf_prog *prog; | 231 | struct bpf_prog *prog; |
198 | struct net_device *netdev; | 232 | struct net_device *netdev; |
199 | void *dev_priv; | 233 | void *dev_priv; |
200 | struct list_head offloads; | 234 | struct list_head offloads; |
201 | bool dev_state; | 235 | bool dev_state; |
202 | bool verifier_running; | 236 | const struct bpf_prog_offload_ops *dev_ops; |
203 | wait_queue_head_t verifier_done; | 237 | void *jited_image; |
238 | u32 jited_len; | ||
204 | }; | 239 | }; |
205 | 240 | ||
206 | struct bpf_prog_aux { | 241 | struct bpf_prog_aux { |
@@ -209,6 +244,10 @@ struct bpf_prog_aux { | |||
209 | u32 max_ctx_offset; | 244 | u32 max_ctx_offset; |
210 | u32 stack_depth; | 245 | u32 stack_depth; |
211 | u32 id; | 246 | u32 id; |
247 | u32 func_cnt; | ||
248 | bool offload_requested; | ||
249 | struct bpf_prog **func; | ||
250 | void *jit_data; /* JIT specific data. arch dependent */ | ||
212 | struct latch_tree_node ksym_tnode; | 251 | struct latch_tree_node ksym_tnode; |
213 | struct list_head ksym_lnode; | 252 | struct list_head ksym_lnode; |
214 | const struct bpf_prog_ops *ops; | 253 | const struct bpf_prog_ops *ops; |
@@ -220,7 +259,7 @@ struct bpf_prog_aux { | |||
220 | #ifdef CONFIG_SECURITY | 259 | #ifdef CONFIG_SECURITY |
221 | void *security; | 260 | void *security; |
222 | #endif | 261 | #endif |
223 | struct bpf_dev_offload *offload; | 262 | struct bpf_prog_offload *offload; |
224 | union { | 263 | union { |
225 | struct work_struct work; | 264 | struct work_struct work; |
226 | struct rcu_head rcu; | 265 | struct rcu_head rcu; |
@@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, | |||
295 | 334 | ||
296 | void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, | 335 | void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, |
297 | struct bpf_prog *old_prog); | 336 | struct bpf_prog *old_prog); |
337 | int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, | ||
338 | __u32 __user *prog_ids, u32 request_cnt, | ||
339 | __u32 __user *prog_cnt); | ||
298 | int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, | 340 | int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, |
299 | struct bpf_prog *exclude_prog, | 341 | struct bpf_prog *exclude_prog, |
300 | struct bpf_prog *include_prog, | 342 | struct bpf_prog *include_prog, |
@@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog); | |||
355 | int __bpf_prog_charge(struct user_struct *user, u32 pages); | 397 | int __bpf_prog_charge(struct user_struct *user, u32 pages); |
356 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages); | 398 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages); |
357 | 399 | ||
400 | void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); | ||
401 | void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); | ||
402 | |||
358 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); | 403 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
359 | struct bpf_map *__bpf_map_get(struct fd f); | 404 | struct bpf_map *__bpf_map_get(struct fd f); |
360 | struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); | 405 | struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); |
@@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map); | |||
363 | int bpf_map_precharge_memlock(u32 pages); | 408 | int bpf_map_precharge_memlock(u32 pages); |
364 | void *bpf_map_area_alloc(size_t size, int numa_node); | 409 | void *bpf_map_area_alloc(size_t size, int numa_node); |
365 | void bpf_map_area_free(void *base); | 410 | void bpf_map_area_free(void *base); |
411 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); | ||
366 | 412 | ||
367 | extern int sysctl_unprivileged_bpf_disabled; | 413 | extern int sysctl_unprivileged_bpf_disabled; |
368 | 414 | ||
@@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) | |||
409 | 455 | ||
410 | /* verify correctness of eBPF program */ | 456 | /* verify correctness of eBPF program */ |
411 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); | 457 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); |
458 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); | ||
412 | 459 | ||
413 | /* Map specifics */ | 460 | /* Map specifics */ |
414 | struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); | 461 | struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); |
@@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); | |||
536 | 583 | ||
537 | int bpf_prog_offload_compile(struct bpf_prog *prog); | 584 | int bpf_prog_offload_compile(struct bpf_prog *prog); |
538 | void bpf_prog_offload_destroy(struct bpf_prog *prog); | 585 | void bpf_prog_offload_destroy(struct bpf_prog *prog); |
586 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, | ||
587 | struct bpf_prog *prog); | ||
588 | |||
589 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); | ||
590 | |||
591 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); | ||
592 | int bpf_map_offload_update_elem(struct bpf_map *map, | ||
593 | void *key, void *value, u64 flags); | ||
594 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); | ||
595 | int bpf_map_offload_get_next_key(struct bpf_map *map, | ||
596 | void *key, void *next_key); | ||
597 | |||
598 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); | ||
539 | 599 | ||
540 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) | 600 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
541 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); | 601 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); |
542 | 602 | ||
543 | static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) | 603 | static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) |
544 | { | 604 | { |
545 | return aux->offload; | 605 | return aux->offload_requested; |
546 | } | 606 | } |
607 | |||
608 | static inline bool bpf_map_is_dev_bound(struct bpf_map *map) | ||
609 | { | ||
610 | return unlikely(map->ops == &bpf_map_offload_ops); | ||
611 | } | ||
612 | |||
613 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); | ||
614 | void bpf_map_offload_map_free(struct bpf_map *map); | ||
547 | #else | 615 | #else |
548 | static inline int bpf_prog_offload_init(struct bpf_prog *prog, | 616 | static inline int bpf_prog_offload_init(struct bpf_prog *prog, |
549 | union bpf_attr *attr) | 617 | union bpf_attr *attr) |
@@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) | |||
555 | { | 623 | { |
556 | return false; | 624 | return false; |
557 | } | 625 | } |
626 | |||
627 | static inline bool bpf_map_is_dev_bound(struct bpf_map *map) | ||
628 | { | ||
629 | return false; | ||
630 | } | ||
631 | |||
632 | static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) | ||
633 | { | ||
634 | return ERR_PTR(-EOPNOTSUPP); | ||
635 | } | ||
636 | |||
637 | static inline void bpf_map_offload_map_free(struct bpf_map *map) | ||
638 | { | ||
639 | } | ||
558 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ | 640 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ |
559 | 641 | ||
560 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) | 642 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) |
561 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); | 643 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); |
562 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); | 644 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); |
563 | #else | 645 | #else |