aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/bpf.h')
-rw-r--r--include/linux/bpf.h25
1 files changed, 21 insertions, 4 deletions
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 0de4de6dd43e..8411032ac90d 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -11,14 +11,17 @@
11#include <linux/workqueue.h> 11#include <linux/workqueue.h>
12#include <linux/file.h> 12#include <linux/file.h>
13#include <linux/percpu.h> 13#include <linux/percpu.h>
14#include <linux/err.h>
14 15
16struct perf_event;
15struct bpf_map; 17struct bpf_map;
16 18
17/* map is generic key/value storage optionally accesible by eBPF programs */ 19/* map is generic key/value storage optionally accesible by eBPF programs */
18struct bpf_map_ops { 20struct bpf_map_ops {
19 /* funcs callable from userspace (via syscall) */ 21 /* funcs callable from userspace (via syscall) */
20 struct bpf_map *(*map_alloc)(union bpf_attr *attr); 22 struct bpf_map *(*map_alloc)(union bpf_attr *attr);
21 void (*map_free)(struct bpf_map *); 23 void (*map_release)(struct bpf_map *map, struct file *map_file);
24 void (*map_free)(struct bpf_map *map);
22 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); 25 int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
23 26
24 /* funcs callable from userspace and from eBPF programs */ 27 /* funcs callable from userspace and from eBPF programs */
@@ -27,8 +30,9 @@ struct bpf_map_ops {
27 int (*map_delete_elem)(struct bpf_map *map, void *key); 30 int (*map_delete_elem)(struct bpf_map *map, void *key);
28 31
29 /* funcs called by prog_array and perf_event_array map */ 32 /* funcs called by prog_array and perf_event_array map */
30 void *(*map_fd_get_ptr) (struct bpf_map *map, int fd); 33 void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
31 void (*map_fd_put_ptr) (void *ptr); 34 int fd);
35 void (*map_fd_put_ptr)(void *ptr);
32}; 36};
33 37
34struct bpf_map { 38struct bpf_map {
@@ -189,11 +193,19 @@ struct bpf_array {
189 void __percpu *pptrs[0] __aligned(8); 193 void __percpu *pptrs[0] __aligned(8);
190 }; 194 };
191}; 195};
196
192#define MAX_TAIL_CALL_CNT 32 197#define MAX_TAIL_CALL_CNT 32
193 198
199struct bpf_event_entry {
200 struct perf_event *event;
201 struct file *perf_file;
202 struct file *map_file;
203 struct rcu_head rcu;
204};
205
194u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5); 206u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
195u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); 207u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
196void bpf_fd_array_map_clear(struct bpf_map *map); 208
197bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp); 209bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
198 210
199const struct bpf_func_proto *bpf_get_trace_printk_proto(void); 211const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -231,8 +243,13 @@ int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
231 u64 flags); 243 u64 flags);
232int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, 244int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
233 u64 flags); 245 u64 flags);
246
234int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); 247int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
235 248
249int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
250 void *key, void *value, u64 map_flags);
251void bpf_fd_array_map_clear(struct bpf_map *map);
252
236/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and 253/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
237 * forced to use 'long' read/writes to try to atomically copy long counters. 254 * forced to use 'long' read/writes to try to atomically copy long counters.
238 * Best-effort only. No barriers here, since it _will_ race with concurrent 255 * Best-effort only. No barriers here, since it _will_ race with concurrent