diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-23 19:49:12 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-23 19:49:12 -0500 |
commit | 00198dab3b825ab264424a052beea5acb859754f (patch) | |
tree | 1afa5fc96a0447bc8049a9992e5d4d047f5f0b38 /tools/include | |
parent | 9004fda59577d439564d44d6d1db52d262fe3f99 (diff) | |
parent | 3705b97505bcbf6440f38119c0e7d6058f585b54 (diff) |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf fixes from Ingo Molnar:
"On the kernel side there's two x86 PMU driver fixes and a uprobes fix,
plus on the tooling side there's a number of fixes and some late
updates"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (36 commits)
perf sched timehist: Fix invalid period calculation
perf sched timehist: Remove hardcoded 'comm_width' check at print_summary
perf sched timehist: Enlarge default 'comm_width'
perf sched timehist: Honour 'comm_width' when aligning the headers
perf/x86: Fix overlap counter scheduling bug
perf/x86/pebs: Fix handling of PEBS buffer overflows
samples/bpf: Move open_raw_sock to separate header
samples/bpf: Remove perf_event_open() declaration
samples/bpf: Be consistent with bpf_load_program bpf_insn parameter
tools lib bpf: Add bpf_prog_{attach,detach}
samples/bpf: Switch over to libbpf
perf diff: Do not overwrite valid build id
perf annotate: Don't throw error for zero length symbols
perf bench futex: Fix lock-pi help string
perf trace: Check if MAP_32BIT is defined (again)
samples/bpf: Make perf_event_read() static
uprobes: Fix uprobes on MIPS, allow for a cache flush after ixol breakpoint creation
samples/bpf: Make samples more libbpf-centric
tools lib bpf: Add flags to bpf_create_map()
tools lib bpf: use __u32 from linux/types.h
...
Diffstat (limited to 'tools/include')
-rw-r--r-- | tools/include/uapi/linux/bpf.h | 593 |
1 files changed, 364 insertions, 229 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9e5fc168c8a3..0eb0e87dbe9f 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h | |||
@@ -73,6 +73,8 @@ enum bpf_cmd { | |||
73 | BPF_PROG_LOAD, | 73 | BPF_PROG_LOAD, |
74 | BPF_OBJ_PIN, | 74 | BPF_OBJ_PIN, |
75 | BPF_OBJ_GET, | 75 | BPF_OBJ_GET, |
76 | BPF_PROG_ATTACH, | ||
77 | BPF_PROG_DETACH, | ||
76 | }; | 78 | }; |
77 | 79 | ||
78 | enum bpf_map_type { | 80 | enum bpf_map_type { |
@@ -85,6 +87,8 @@ enum bpf_map_type { | |||
85 | BPF_MAP_TYPE_PERCPU_ARRAY, | 87 | BPF_MAP_TYPE_PERCPU_ARRAY, |
86 | BPF_MAP_TYPE_STACK_TRACE, | 88 | BPF_MAP_TYPE_STACK_TRACE, |
87 | BPF_MAP_TYPE_CGROUP_ARRAY, | 89 | BPF_MAP_TYPE_CGROUP_ARRAY, |
90 | BPF_MAP_TYPE_LRU_HASH, | ||
91 | BPF_MAP_TYPE_LRU_PERCPU_HASH, | ||
88 | }; | 92 | }; |
89 | 93 | ||
90 | enum bpf_prog_type { | 94 | enum bpf_prog_type { |
@@ -95,8 +99,23 @@ enum bpf_prog_type { | |||
95 | BPF_PROG_TYPE_SCHED_ACT, | 99 | BPF_PROG_TYPE_SCHED_ACT, |
96 | BPF_PROG_TYPE_TRACEPOINT, | 100 | BPF_PROG_TYPE_TRACEPOINT, |
97 | BPF_PROG_TYPE_XDP, | 101 | BPF_PROG_TYPE_XDP, |
102 | BPF_PROG_TYPE_PERF_EVENT, | ||
103 | BPF_PROG_TYPE_CGROUP_SKB, | ||
104 | BPF_PROG_TYPE_CGROUP_SOCK, | ||
105 | BPF_PROG_TYPE_LWT_IN, | ||
106 | BPF_PROG_TYPE_LWT_OUT, | ||
107 | BPF_PROG_TYPE_LWT_XMIT, | ||
98 | }; | 108 | }; |
99 | 109 | ||
110 | enum bpf_attach_type { | ||
111 | BPF_CGROUP_INET_INGRESS, | ||
112 | BPF_CGROUP_INET_EGRESS, | ||
113 | BPF_CGROUP_INET_SOCK_CREATE, | ||
114 | __MAX_BPF_ATTACH_TYPE | ||
115 | }; | ||
116 | |||
117 | #define MAX_BPF_ATTACH_TYPE __MAX_BPF_ATTACH_TYPE | ||
118 | |||
100 | #define BPF_PSEUDO_MAP_FD 1 | 119 | #define BPF_PSEUDO_MAP_FD 1 |
101 | 120 | ||
102 | /* flags for BPF_MAP_UPDATE_ELEM command */ | 121 | /* flags for BPF_MAP_UPDATE_ELEM command */ |
@@ -105,6 +124,13 @@ enum bpf_prog_type { | |||
105 | #define BPF_EXIST 2 /* update existing element */ | 124 | #define BPF_EXIST 2 /* update existing element */ |
106 | 125 | ||
107 | #define BPF_F_NO_PREALLOC (1U << 0) | 126 | #define BPF_F_NO_PREALLOC (1U << 0) |
127 | /* Instead of having one common LRU list in the | ||
128 | * BPF_MAP_TYPE_LRU_[PERCPU_]HASH map, use a percpu LRU list | ||
129 | * which can scale and perform better. | ||
130 | * Note, the LRU nodes (including free nodes) cannot be moved | ||
131 | * across different LRU lists. | ||
132 | */ | ||
133 | #define BPF_F_NO_COMMON_LRU (1U << 1) | ||
108 | 134 | ||
109 | union bpf_attr { | 135 | union bpf_attr { |
110 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ | 136 | struct { /* anonymous struct used by BPF_MAP_CREATE command */ |
@@ -140,243 +166,327 @@ union bpf_attr { | |||
140 | __aligned_u64 pathname; | 166 | __aligned_u64 pathname; |
141 | __u32 bpf_fd; | 167 | __u32 bpf_fd; |
142 | }; | 168 | }; |
169 | |||
170 | struct { /* anonymous struct used by BPF_PROG_ATTACH/DETACH commands */ | ||
171 | __u32 target_fd; /* container object to attach to */ | ||
172 | __u32 attach_bpf_fd; /* eBPF program to attach */ | ||
173 | __u32 attach_type; | ||
174 | }; | ||
143 | } __attribute__((aligned(8))); | 175 | } __attribute__((aligned(8))); |
144 | 176 | ||
177 | /* BPF helper function descriptions: | ||
178 | * | ||
179 | * void *bpf_map_lookup_elem(&map, &key) | ||
180 | * Return: Map value or NULL | ||
181 | * | ||
182 | * int bpf_map_update_elem(&map, &key, &value, flags) | ||
183 | * Return: 0 on success or negative error | ||
184 | * | ||
185 | * int bpf_map_delete_elem(&map, &key) | ||
186 | * Return: 0 on success or negative error | ||
187 | * | ||
188 | * int bpf_probe_read(void *dst, int size, void *src) | ||
189 | * Return: 0 on success or negative error | ||
190 | * | ||
191 | * u64 bpf_ktime_get_ns(void) | ||
192 | * Return: current ktime | ||
193 | * | ||
194 | * int bpf_trace_printk(const char *fmt, int fmt_size, ...) | ||
195 | * Return: length of buffer written or negative error | ||
196 | * | ||
197 | * u32 bpf_prandom_u32(void) | ||
198 | * Return: random value | ||
199 | * | ||
200 | * u32 bpf_raw_smp_processor_id(void) | ||
201 | * Return: SMP processor ID | ||
202 | * | ||
203 | * int bpf_skb_store_bytes(skb, offset, from, len, flags) | ||
204 | * store bytes into packet | ||
205 | * @skb: pointer to skb | ||
206 | * @offset: offset within packet from skb->mac_header | ||
207 | * @from: pointer where to copy bytes from | ||
208 | * @len: number of bytes to store into packet | ||
209 | * @flags: bit 0 - if true, recompute skb->csum | ||
210 | * other bits - reserved | ||
211 | * Return: 0 on success or negative error | ||
212 | * | ||
213 | * int bpf_l3_csum_replace(skb, offset, from, to, flags) | ||
214 | * recompute IP checksum | ||
215 | * @skb: pointer to skb | ||
216 | * @offset: offset within packet where IP checksum is located | ||
217 | * @from: old value of header field | ||
218 | * @to: new value of header field | ||
219 | * @flags: bits 0-3 - size of header field | ||
220 | * other bits - reserved | ||
221 | * Return: 0 on success or negative error | ||
222 | * | ||
223 | * int bpf_l4_csum_replace(skb, offset, from, to, flags) | ||
224 | * recompute TCP/UDP checksum | ||
225 | * @skb: pointer to skb | ||
226 | * @offset: offset within packet where TCP/UDP checksum is located | ||
227 | * @from: old value of header field | ||
228 | * @to: new value of header field | ||
229 | * @flags: bits 0-3 - size of header field | ||
230 | * bit 4 - is pseudo header | ||
231 | * other bits - reserved | ||
232 | * Return: 0 on success or negative error | ||
233 | * | ||
234 | * int bpf_tail_call(ctx, prog_array_map, index) | ||
235 | * jump into another BPF program | ||
236 | * @ctx: context pointer passed to next program | ||
237 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | ||
238 | * @index: index inside array that selects specific program to run | ||
239 | * Return: 0 on success or negative error | ||
240 | * | ||
241 | * int bpf_clone_redirect(skb, ifindex, flags) | ||
242 | * redirect to another netdev | ||
243 | * @skb: pointer to skb | ||
244 | * @ifindex: ifindex of the net device | ||
245 | * @flags: bit 0 - if set, redirect to ingress instead of egress | ||
246 | * other bits - reserved | ||
247 | * Return: 0 on success or negative error | ||
248 | * | ||
249 | * u64 bpf_get_current_pid_tgid(void) | ||
250 | * Return: current->tgid << 32 | current->pid | ||
251 | * | ||
252 | * u64 bpf_get_current_uid_gid(void) | ||
253 | * Return: current_gid << 32 | current_uid | ||
254 | * | ||
255 | * int bpf_get_current_comm(char *buf, int size_of_buf) | ||
256 | * stores current->comm into buf | ||
257 | * Return: 0 on success or negative error | ||
258 | * | ||
259 | * u32 bpf_get_cgroup_classid(skb) | ||
260 | * retrieve a proc's classid | ||
261 | * @skb: pointer to skb | ||
262 | * Return: classid if != 0 | ||
263 | * | ||
264 | * int bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) | ||
265 | * Return: 0 on success or negative error | ||
266 | * | ||
267 | * int bpf_skb_vlan_pop(skb) | ||
268 | * Return: 0 on success or negative error | ||
269 | * | ||
270 | * int bpf_skb_get_tunnel_key(skb, key, size, flags) | ||
271 | * int bpf_skb_set_tunnel_key(skb, key, size, flags) | ||
272 | * retrieve or populate tunnel metadata | ||
273 | * @skb: pointer to skb | ||
274 | * @key: pointer to 'struct bpf_tunnel_key' | ||
275 | * @size: size of 'struct bpf_tunnel_key' | ||
276 | * @flags: room for future extensions | ||
277 | * Return: 0 on success or negative error | ||
278 | * | ||
279 | * u64 bpf_perf_event_read(&map, index) | ||
280 | * Return: Number events read or error code | ||
281 | * | ||
282 | * int bpf_redirect(ifindex, flags) | ||
283 | * redirect to another netdev | ||
284 | * @ifindex: ifindex of the net device | ||
285 | * @flags: bit 0 - if set, redirect to ingress instead of egress | ||
286 | * other bits - reserved | ||
287 | * Return: TC_ACT_REDIRECT | ||
288 | * | ||
289 | * u32 bpf_get_route_realm(skb) | ||
290 | * retrieve a dst's tclassid | ||
291 | * @skb: pointer to skb | ||
292 | * Return: realm if != 0 | ||
293 | * | ||
294 | * int bpf_perf_event_output(ctx, map, index, data, size) | ||
295 | * output perf raw sample | ||
296 | * @ctx: struct pt_regs* | ||
297 | * @map: pointer to perf_event_array map | ||
298 | * @index: index of event in the map | ||
299 | * @data: data on stack to be output as raw data | ||
300 | * @size: size of data | ||
301 | * Return: 0 on success or negative error | ||
302 | * | ||
303 | * int bpf_get_stackid(ctx, map, flags) | ||
304 | * walk user or kernel stack and return id | ||
305 | * @ctx: struct pt_regs* | ||
306 | * @map: pointer to stack_trace map | ||
307 | * @flags: bits 0-7 - numer of stack frames to skip | ||
308 | * bit 8 - collect user stack instead of kernel | ||
309 | * bit 9 - compare stacks by hash only | ||
310 | * bit 10 - if two different stacks hash into the same stackid | ||
311 | * discard old | ||
312 | * other bits - reserved | ||
313 | * Return: >= 0 stackid on success or negative error | ||
314 | * | ||
315 | * s64 bpf_csum_diff(from, from_size, to, to_size, seed) | ||
316 | * calculate csum diff | ||
317 | * @from: raw from buffer | ||
318 | * @from_size: length of from buffer | ||
319 | * @to: raw to buffer | ||
320 | * @to_size: length of to buffer | ||
321 | * @seed: optional seed | ||
322 | * Return: csum result or negative error code | ||
323 | * | ||
324 | * int bpf_skb_get_tunnel_opt(skb, opt, size) | ||
325 | * retrieve tunnel options metadata | ||
326 | * @skb: pointer to skb | ||
327 | * @opt: pointer to raw tunnel option data | ||
328 | * @size: size of @opt | ||
329 | * Return: option size | ||
330 | * | ||
331 | * int bpf_skb_set_tunnel_opt(skb, opt, size) | ||
332 | * populate tunnel options metadata | ||
333 | * @skb: pointer to skb | ||
334 | * @opt: pointer to raw tunnel option data | ||
335 | * @size: size of @opt | ||
336 | * Return: 0 on success or negative error | ||
337 | * | ||
338 | * int bpf_skb_change_proto(skb, proto, flags) | ||
339 | * Change protocol of the skb. Currently supported is v4 -> v6, | ||
340 | * v6 -> v4 transitions. The helper will also resize the skb. eBPF | ||
341 | * program is expected to fill the new headers via skb_store_bytes | ||
342 | * and lX_csum_replace. | ||
343 | * @skb: pointer to skb | ||
344 | * @proto: new skb->protocol type | ||
345 | * @flags: reserved | ||
346 | * Return: 0 on success or negative error | ||
347 | * | ||
348 | * int bpf_skb_change_type(skb, type) | ||
349 | * Change packet type of skb. | ||
350 | * @skb: pointer to skb | ||
351 | * @type: new skb->pkt_type type | ||
352 | * Return: 0 on success or negative error | ||
353 | * | ||
354 | * int bpf_skb_under_cgroup(skb, map, index) | ||
355 | * Check cgroup2 membership of skb | ||
356 | * @skb: pointer to skb | ||
357 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | ||
358 | * @index: index of the cgroup in the bpf_map | ||
359 | * Return: | ||
360 | * == 0 skb failed the cgroup2 descendant test | ||
361 | * == 1 skb succeeded the cgroup2 descendant test | ||
362 | * < 0 error | ||
363 | * | ||
364 | * u32 bpf_get_hash_recalc(skb) | ||
365 | * Retrieve and possibly recalculate skb->hash. | ||
366 | * @skb: pointer to skb | ||
367 | * Return: hash | ||
368 | * | ||
369 | * u64 bpf_get_current_task(void) | ||
370 | * Returns current task_struct | ||
371 | * Return: current | ||
372 | * | ||
373 | * int bpf_probe_write_user(void *dst, void *src, int len) | ||
374 | * safely attempt to write to a location | ||
375 | * @dst: destination address in userspace | ||
376 | * @src: source address on stack | ||
377 | * @len: number of bytes to copy | ||
378 | * Return: 0 on success or negative error | ||
379 | * | ||
380 | * int bpf_current_task_under_cgroup(map, index) | ||
381 | * Check cgroup2 membership of current task | ||
382 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | ||
383 | * @index: index of the cgroup in the bpf_map | ||
384 | * Return: | ||
385 | * == 0 current failed the cgroup2 descendant test | ||
386 | * == 1 current succeeded the cgroup2 descendant test | ||
387 | * < 0 error | ||
388 | * | ||
389 | * int bpf_skb_change_tail(skb, len, flags) | ||
390 | * The helper will resize the skb to the given new size, to be used f.e. | ||
391 | * with control messages. | ||
392 | * @skb: pointer to skb | ||
393 | * @len: new skb length | ||
394 | * @flags: reserved | ||
395 | * Return: 0 on success or negative error | ||
396 | * | ||
397 | * int bpf_skb_pull_data(skb, len) | ||
398 | * The helper will pull in non-linear data in case the skb is non-linear | ||
399 | * and not all of len are part of the linear section. Only needed for | ||
400 | * read/write with direct packet access. | ||
401 | * @skb: pointer to skb | ||
402 | * @len: len to make read/writeable | ||
403 | * Return: 0 on success or negative error | ||
404 | * | ||
405 | * s64 bpf_csum_update(skb, csum) | ||
406 | * Adds csum into skb->csum in case of CHECKSUM_COMPLETE. | ||
407 | * @skb: pointer to skb | ||
408 | * @csum: csum to add | ||
409 | * Return: csum on success or negative error | ||
410 | * | ||
411 | * void bpf_set_hash_invalid(skb) | ||
412 | * Invalidate current skb->hash. | ||
413 | * @skb: pointer to skb | ||
414 | * | ||
415 | * int bpf_get_numa_node_id() | ||
416 | * Return: Id of current NUMA node. | ||
417 | * | ||
418 | * int bpf_skb_change_head() | ||
419 | * Grows headroom of skb and adjusts MAC header offset accordingly. | ||
420 | * Will extends/reallocae as required automatically. | ||
421 | * May change skb data pointer and will thus invalidate any check | ||
422 | * performed for direct packet access. | ||
423 | * @skb: pointer to skb | ||
424 | * @len: length of header to be pushed in front | ||
425 | * @flags: Flags (unused for now) | ||
426 | * Return: 0 on success or negative error | ||
427 | * | ||
428 | * int bpf_xdp_adjust_head(xdp_md, delta) | ||
429 | * Adjust the xdp_md.data by delta | ||
430 | * @xdp_md: pointer to xdp_md | ||
431 | * @delta: An positive/negative integer to be added to xdp_md.data | ||
432 | * Return: 0 on success or negative on error | ||
433 | */ | ||
434 | #define __BPF_FUNC_MAPPER(FN) \ | ||
435 | FN(unspec), \ | ||
436 | FN(map_lookup_elem), \ | ||
437 | FN(map_update_elem), \ | ||
438 | FN(map_delete_elem), \ | ||
439 | FN(probe_read), \ | ||
440 | FN(ktime_get_ns), \ | ||
441 | FN(trace_printk), \ | ||
442 | FN(get_prandom_u32), \ | ||
443 | FN(get_smp_processor_id), \ | ||
444 | FN(skb_store_bytes), \ | ||
445 | FN(l3_csum_replace), \ | ||
446 | FN(l4_csum_replace), \ | ||
447 | FN(tail_call), \ | ||
448 | FN(clone_redirect), \ | ||
449 | FN(get_current_pid_tgid), \ | ||
450 | FN(get_current_uid_gid), \ | ||
451 | FN(get_current_comm), \ | ||
452 | FN(get_cgroup_classid), \ | ||
453 | FN(skb_vlan_push), \ | ||
454 | FN(skb_vlan_pop), \ | ||
455 | FN(skb_get_tunnel_key), \ | ||
456 | FN(skb_set_tunnel_key), \ | ||
457 | FN(perf_event_read), \ | ||
458 | FN(redirect), \ | ||
459 | FN(get_route_realm), \ | ||
460 | FN(perf_event_output), \ | ||
461 | FN(skb_load_bytes), \ | ||
462 | FN(get_stackid), \ | ||
463 | FN(csum_diff), \ | ||
464 | FN(skb_get_tunnel_opt), \ | ||
465 | FN(skb_set_tunnel_opt), \ | ||
466 | FN(skb_change_proto), \ | ||
467 | FN(skb_change_type), \ | ||
468 | FN(skb_under_cgroup), \ | ||
469 | FN(get_hash_recalc), \ | ||
470 | FN(get_current_task), \ | ||
471 | FN(probe_write_user), \ | ||
472 | FN(current_task_under_cgroup), \ | ||
473 | FN(skb_change_tail), \ | ||
474 | FN(skb_pull_data), \ | ||
475 | FN(csum_update), \ | ||
476 | FN(set_hash_invalid), \ | ||
477 | FN(get_numa_node_id), \ | ||
478 | FN(skb_change_head), \ | ||
479 | FN(xdp_adjust_head), | ||
480 | |||
145 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 481 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
146 | * function eBPF program intends to call | 482 | * function eBPF program intends to call |
147 | */ | 483 | */ |
484 | #define __BPF_ENUM_FN(x) BPF_FUNC_ ## x | ||
148 | enum bpf_func_id { | 485 | enum bpf_func_id { |
149 | BPF_FUNC_unspec, | 486 | __BPF_FUNC_MAPPER(__BPF_ENUM_FN) |
150 | BPF_FUNC_map_lookup_elem, /* void *map_lookup_elem(&map, &key) */ | ||
151 | BPF_FUNC_map_update_elem, /* int map_update_elem(&map, &key, &value, flags) */ | ||
152 | BPF_FUNC_map_delete_elem, /* int map_delete_elem(&map, &key) */ | ||
153 | BPF_FUNC_probe_read, /* int bpf_probe_read(void *dst, int size, void *src) */ | ||
154 | BPF_FUNC_ktime_get_ns, /* u64 bpf_ktime_get_ns(void) */ | ||
155 | BPF_FUNC_trace_printk, /* int bpf_trace_printk(const char *fmt, int fmt_size, ...) */ | ||
156 | BPF_FUNC_get_prandom_u32, /* u32 prandom_u32(void) */ | ||
157 | BPF_FUNC_get_smp_processor_id, /* u32 raw_smp_processor_id(void) */ | ||
158 | |||
159 | /** | ||
160 | * skb_store_bytes(skb, offset, from, len, flags) - store bytes into packet | ||
161 | * @skb: pointer to skb | ||
162 | * @offset: offset within packet from skb->mac_header | ||
163 | * @from: pointer where to copy bytes from | ||
164 | * @len: number of bytes to store into packet | ||
165 | * @flags: bit 0 - if true, recompute skb->csum | ||
166 | * other bits - reserved | ||
167 | * Return: 0 on success | ||
168 | */ | ||
169 | BPF_FUNC_skb_store_bytes, | ||
170 | |||
171 | /** | ||
172 | * l3_csum_replace(skb, offset, from, to, flags) - recompute IP checksum | ||
173 | * @skb: pointer to skb | ||
174 | * @offset: offset within packet where IP checksum is located | ||
175 | * @from: old value of header field | ||
176 | * @to: new value of header field | ||
177 | * @flags: bits 0-3 - size of header field | ||
178 | * other bits - reserved | ||
179 | * Return: 0 on success | ||
180 | */ | ||
181 | BPF_FUNC_l3_csum_replace, | ||
182 | |||
183 | /** | ||
184 | * l4_csum_replace(skb, offset, from, to, flags) - recompute TCP/UDP checksum | ||
185 | * @skb: pointer to skb | ||
186 | * @offset: offset within packet where TCP/UDP checksum is located | ||
187 | * @from: old value of header field | ||
188 | * @to: new value of header field | ||
189 | * @flags: bits 0-3 - size of header field | ||
190 | * bit 4 - is pseudo header | ||
191 | * other bits - reserved | ||
192 | * Return: 0 on success | ||
193 | */ | ||
194 | BPF_FUNC_l4_csum_replace, | ||
195 | |||
196 | /** | ||
197 | * bpf_tail_call(ctx, prog_array_map, index) - jump into another BPF program | ||
198 | * @ctx: context pointer passed to next program | ||
199 | * @prog_array_map: pointer to map which type is BPF_MAP_TYPE_PROG_ARRAY | ||
200 | * @index: index inside array that selects specific program to run | ||
201 | * Return: 0 on success | ||
202 | */ | ||
203 | BPF_FUNC_tail_call, | ||
204 | |||
205 | /** | ||
206 | * bpf_clone_redirect(skb, ifindex, flags) - redirect to another netdev | ||
207 | * @skb: pointer to skb | ||
208 | * @ifindex: ifindex of the net device | ||
209 | * @flags: bit 0 - if set, redirect to ingress instead of egress | ||
210 | * other bits - reserved | ||
211 | * Return: 0 on success | ||
212 | */ | ||
213 | BPF_FUNC_clone_redirect, | ||
214 | |||
215 | /** | ||
216 | * u64 bpf_get_current_pid_tgid(void) | ||
217 | * Return: current->tgid << 32 | current->pid | ||
218 | */ | ||
219 | BPF_FUNC_get_current_pid_tgid, | ||
220 | |||
221 | /** | ||
222 | * u64 bpf_get_current_uid_gid(void) | ||
223 | * Return: current_gid << 32 | current_uid | ||
224 | */ | ||
225 | BPF_FUNC_get_current_uid_gid, | ||
226 | |||
227 | /** | ||
228 | * bpf_get_current_comm(char *buf, int size_of_buf) | ||
229 | * stores current->comm into buf | ||
230 | * Return: 0 on success | ||
231 | */ | ||
232 | BPF_FUNC_get_current_comm, | ||
233 | |||
234 | /** | ||
235 | * bpf_get_cgroup_classid(skb) - retrieve a proc's classid | ||
236 | * @skb: pointer to skb | ||
237 | * Return: classid if != 0 | ||
238 | */ | ||
239 | BPF_FUNC_get_cgroup_classid, | ||
240 | BPF_FUNC_skb_vlan_push, /* bpf_skb_vlan_push(skb, vlan_proto, vlan_tci) */ | ||
241 | BPF_FUNC_skb_vlan_pop, /* bpf_skb_vlan_pop(skb) */ | ||
242 | |||
243 | /** | ||
244 | * bpf_skb_[gs]et_tunnel_key(skb, key, size, flags) | ||
245 | * retrieve or populate tunnel metadata | ||
246 | * @skb: pointer to skb | ||
247 | * @key: pointer to 'struct bpf_tunnel_key' | ||
248 | * @size: size of 'struct bpf_tunnel_key' | ||
249 | * @flags: room for future extensions | ||
250 | * Retrun: 0 on success | ||
251 | */ | ||
252 | BPF_FUNC_skb_get_tunnel_key, | ||
253 | BPF_FUNC_skb_set_tunnel_key, | ||
254 | BPF_FUNC_perf_event_read, /* u64 bpf_perf_event_read(&map, index) */ | ||
255 | /** | ||
256 | * bpf_redirect(ifindex, flags) - redirect to another netdev | ||
257 | * @ifindex: ifindex of the net device | ||
258 | * @flags: bit 0 - if set, redirect to ingress instead of egress | ||
259 | * other bits - reserved | ||
260 | * Return: TC_ACT_REDIRECT | ||
261 | */ | ||
262 | BPF_FUNC_redirect, | ||
263 | |||
264 | /** | ||
265 | * bpf_get_route_realm(skb) - retrieve a dst's tclassid | ||
266 | * @skb: pointer to skb | ||
267 | * Return: realm if != 0 | ||
268 | */ | ||
269 | BPF_FUNC_get_route_realm, | ||
270 | |||
271 | /** | ||
272 | * bpf_perf_event_output(ctx, map, index, data, size) - output perf raw sample | ||
273 | * @ctx: struct pt_regs* | ||
274 | * @map: pointer to perf_event_array map | ||
275 | * @index: index of event in the map | ||
276 | * @data: data on stack to be output as raw data | ||
277 | * @size: size of data | ||
278 | * Return: 0 on success | ||
279 | */ | ||
280 | BPF_FUNC_perf_event_output, | ||
281 | BPF_FUNC_skb_load_bytes, | ||
282 | |||
283 | /** | ||
284 | * bpf_get_stackid(ctx, map, flags) - walk user or kernel stack and return id | ||
285 | * @ctx: struct pt_regs* | ||
286 | * @map: pointer to stack_trace map | ||
287 | * @flags: bits 0-7 - numer of stack frames to skip | ||
288 | * bit 8 - collect user stack instead of kernel | ||
289 | * bit 9 - compare stacks by hash only | ||
290 | * bit 10 - if two different stacks hash into the same stackid | ||
291 | * discard old | ||
292 | * other bits - reserved | ||
293 | * Return: >= 0 stackid on success or negative error | ||
294 | */ | ||
295 | BPF_FUNC_get_stackid, | ||
296 | |||
297 | /** | ||
298 | * bpf_csum_diff(from, from_size, to, to_size, seed) - calculate csum diff | ||
299 | * @from: raw from buffer | ||
300 | * @from_size: length of from buffer | ||
301 | * @to: raw to buffer | ||
302 | * @to_size: length of to buffer | ||
303 | * @seed: optional seed | ||
304 | * Return: csum result | ||
305 | */ | ||
306 | BPF_FUNC_csum_diff, | ||
307 | |||
308 | /** | ||
309 | * bpf_skb_[gs]et_tunnel_opt(skb, opt, size) | ||
310 | * retrieve or populate tunnel options metadata | ||
311 | * @skb: pointer to skb | ||
312 | * @opt: pointer to raw tunnel option data | ||
313 | * @size: size of @opt | ||
314 | * Return: 0 on success for set, option size for get | ||
315 | */ | ||
316 | BPF_FUNC_skb_get_tunnel_opt, | ||
317 | BPF_FUNC_skb_set_tunnel_opt, | ||
318 | |||
319 | /** | ||
320 | * bpf_skb_change_proto(skb, proto, flags) | ||
321 | * Change protocol of the skb. Currently supported is | ||
322 | * v4 -> v6, v6 -> v4 transitions. The helper will also | ||
323 | * resize the skb. eBPF program is expected to fill the | ||
324 | * new headers via skb_store_bytes and lX_csum_replace. | ||
325 | * @skb: pointer to skb | ||
326 | * @proto: new skb->protocol type | ||
327 | * @flags: reserved | ||
328 | * Return: 0 on success or negative error | ||
329 | */ | ||
330 | BPF_FUNC_skb_change_proto, | ||
331 | |||
332 | /** | ||
333 | * bpf_skb_change_type(skb, type) | ||
334 | * Change packet type of skb. | ||
335 | * @skb: pointer to skb | ||
336 | * @type: new skb->pkt_type type | ||
337 | * Return: 0 on success or negative error | ||
338 | */ | ||
339 | BPF_FUNC_skb_change_type, | ||
340 | |||
341 | /** | ||
342 | * bpf_skb_under_cgroup(skb, map, index) - Check cgroup2 membership of skb | ||
343 | * @skb: pointer to skb | ||
344 | * @map: pointer to bpf_map in BPF_MAP_TYPE_CGROUP_ARRAY type | ||
345 | * @index: index of the cgroup in the bpf_map | ||
346 | * Return: | ||
347 | * == 0 skb failed the cgroup2 descendant test | ||
348 | * == 1 skb succeeded the cgroup2 descendant test | ||
349 | * < 0 error | ||
350 | */ | ||
351 | BPF_FUNC_skb_under_cgroup, | ||
352 | |||
353 | /** | ||
354 | * bpf_get_hash_recalc(skb) | ||
355 | * Retrieve and possibly recalculate skb->hash. | ||
356 | * @skb: pointer to skb | ||
357 | * Return: hash | ||
358 | */ | ||
359 | BPF_FUNC_get_hash_recalc, | ||
360 | |||
361 | /** | ||
362 | * u64 bpf_get_current_task(void) | ||
363 | * Returns current task_struct | ||
364 | * Return: current | ||
365 | */ | ||
366 | BPF_FUNC_get_current_task, | ||
367 | |||
368 | /** | ||
369 | * bpf_probe_write_user(void *dst, void *src, int len) | ||
370 | * safely attempt to write to a location | ||
371 | * @dst: destination address in userspace | ||
372 | * @src: source address on stack | ||
373 | * @len: number of bytes to copy | ||
374 | * Return: 0 on success or negative error | ||
375 | */ | ||
376 | BPF_FUNC_probe_write_user, | ||
377 | |||
378 | __BPF_FUNC_MAX_ID, | 487 | __BPF_FUNC_MAX_ID, |
379 | }; | 488 | }; |
489 | #undef __BPF_ENUM_FN | ||
380 | 490 | ||
381 | /* All flags used by eBPF helper functions, placed here. */ | 491 | /* All flags used by eBPF helper functions, placed here. */ |
382 | 492 | ||
@@ -450,6 +560,31 @@ struct bpf_tunnel_key { | |||
450 | __u32 tunnel_label; | 560 | __u32 tunnel_label; |
451 | }; | 561 | }; |
452 | 562 | ||
563 | /* Generic BPF return codes which all BPF program types may support. | ||
564 | * The values are binary compatible with their TC_ACT_* counter-part to | ||
565 | * provide backwards compatibility with existing SCHED_CLS and SCHED_ACT | ||
566 | * programs. | ||
567 | * | ||
568 | * XDP is handled seprately, see XDP_*. | ||
569 | */ | ||
570 | enum bpf_ret_code { | ||
571 | BPF_OK = 0, | ||
572 | /* 1 reserved */ | ||
573 | BPF_DROP = 2, | ||
574 | /* 3-6 reserved */ | ||
575 | BPF_REDIRECT = 7, | ||
576 | /* >127 are reserved for prog type specific return codes */ | ||
577 | }; | ||
578 | |||
579 | struct bpf_sock { | ||
580 | __u32 bound_dev_if; | ||
581 | __u32 family; | ||
582 | __u32 type; | ||
583 | __u32 protocol; | ||
584 | }; | ||
585 | |||
586 | #define XDP_PACKET_HEADROOM 256 | ||
587 | |||
453 | /* User return codes for XDP prog type. | 588 | /* User return codes for XDP prog type. |
454 | * A valid XDP program must return one of these defined values. All other | 589 | * A valid XDP program must return one of these defined values. All other |
455 | * return codes are reserved for future use. Unknown return codes will result | 590 | * return codes are reserved for future use. Unknown return codes will result |