diff options
Diffstat (limited to 'include/uapi/linux/bpf.h')
| -rw-r--r-- | include/uapi/linux/bpf.h | 94 |
1 files changed, 82 insertions, 12 deletions
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index da77a9388947..93d5a4eeec2a 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h | |||
| @@ -116,6 +116,7 @@ enum bpf_map_type { | |||
| 116 | BPF_MAP_TYPE_DEVMAP, | 116 | BPF_MAP_TYPE_DEVMAP, |
| 117 | BPF_MAP_TYPE_SOCKMAP, | 117 | BPF_MAP_TYPE_SOCKMAP, |
| 118 | BPF_MAP_TYPE_CPUMAP, | 118 | BPF_MAP_TYPE_CPUMAP, |
| 119 | BPF_MAP_TYPE_XSKMAP, | ||
| 119 | }; | 120 | }; |
| 120 | 121 | ||
| 121 | enum bpf_prog_type { | 122 | enum bpf_prog_type { |
| @@ -828,12 +829,12 @@ union bpf_attr { | |||
| 828 | * | 829 | * |
| 829 | * Also, be aware that the newer helper | 830 | * Also, be aware that the newer helper |
| 830 | * **bpf_perf_event_read_value**\ () is recommended over | 831 | * **bpf_perf_event_read_value**\ () is recommended over |
| 831 | * **bpf_perf_event_read*\ () in general. The latter has some ABI | 832 | * **bpf_perf_event_read**\ () in general. The latter has some ABI |
| 832 | * quirks where error and counter value are used as a return code | 833 | * quirks where error and counter value are used as a return code |
| 833 | * (which is wrong to do since ranges may overlap). This issue is | 834 | * (which is wrong to do since ranges may overlap). This issue is |
| 834 | * fixed with bpf_perf_event_read_value(), which at the same time | 835 | * fixed with **bpf_perf_event_read_value**\ (), which at the same |
| 835 | * provides more features over the **bpf_perf_event_read**\ () | 836 | * time provides more features over the **bpf_perf_event_read**\ |
| 836 | * interface. Please refer to the description of | 837 | * () interface. Please refer to the description of |
| 837 | * **bpf_perf_event_read_value**\ () for details. | 838 | * **bpf_perf_event_read_value**\ () for details. |
| 838 | * Return | 839 | * Return |
| 839 | * The value of the perf event counter read from the map, or a | 840 | * The value of the perf event counter read from the map, or a |
| @@ -1361,7 +1362,7 @@ union bpf_attr { | |||
| 1361 | * Return | 1362 | * Return |
| 1362 | * 0 | 1363 | * 0 |
| 1363 | * | 1364 | * |
| 1364 | * int bpf_setsockopt(struct bpf_sock_ops_kern *bpf_socket, int level, int optname, char *optval, int optlen) | 1365 | * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) |
| 1365 | * Description | 1366 | * Description |
| 1366 | * Emulate a call to **setsockopt()** on the socket associated to | 1367 | * Emulate a call to **setsockopt()** on the socket associated to |
| 1367 | * *bpf_socket*, which must be a full socket. The *level* at | 1368 | * *bpf_socket*, which must be a full socket. The *level* at |
| @@ -1435,7 +1436,7 @@ union bpf_attr { | |||
| 1435 | * Return | 1436 | * Return |
| 1436 | * **SK_PASS** on success, or **SK_DROP** on error. | 1437 | * **SK_PASS** on success, or **SK_DROP** on error. |
| 1437 | * | 1438 | * |
| 1438 | * int bpf_sock_map_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags) | 1439 | * int bpf_sock_map_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags) |
| 1439 | * Description | 1440 | * Description |
| 1440 | * Add an entry to, or update a *map* referencing sockets. The | 1441 | * Add an entry to, or update a *map* referencing sockets. The |
| 1441 | * *skops* is used as a new value for the entry associated to | 1442 | * *skops* is used as a new value for the entry associated to |
| @@ -1533,7 +1534,7 @@ union bpf_attr { | |||
| 1533 | * Return | 1534 | * Return |
| 1534 | * 0 on success, or a negative error in case of failure. | 1535 | * 0 on success, or a negative error in case of failure. |
| 1535 | * | 1536 | * |
| 1536 | * int bpf_perf_prog_read_value(struct bpf_perf_event_data_kern *ctx, struct bpf_perf_event_value *buf, u32 buf_size) | 1537 | * int bpf_perf_prog_read_value(struct bpf_perf_event_data *ctx, struct bpf_perf_event_value *buf, u32 buf_size) |
| 1537 | * Description | 1538 | * Description |
| 1538 | * For en eBPF program attached to a perf event, retrieve the | 1539 | * For en eBPF program attached to a perf event, retrieve the |
| 1539 | * value of the event counter associated to *ctx* and store it in | 1540 | * value of the event counter associated to *ctx* and store it in |
| @@ -1544,7 +1545,7 @@ union bpf_attr { | |||
| 1544 | * Return | 1545 | * Return |
| 1545 | * 0 on success, or a negative error in case of failure. | 1546 | * 0 on success, or a negative error in case of failure. |
| 1546 | * | 1547 | * |
| 1547 | * int bpf_getsockopt(struct bpf_sock_ops_kern *bpf_socket, int level, int optname, char *optval, int optlen) | 1548 | * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen) |
| 1548 | * Description | 1549 | * Description |
| 1549 | * Emulate a call to **getsockopt()** on the socket associated to | 1550 | * Emulate a call to **getsockopt()** on the socket associated to |
| 1550 | * *bpf_socket*, which must be a full socket. The *level* at | 1551 | * *bpf_socket*, which must be a full socket. The *level* at |
| @@ -1588,7 +1589,7 @@ union bpf_attr { | |||
| 1588 | * Return | 1589 | * Return |
| 1589 | * 0 | 1590 | * 0 |
| 1590 | * | 1591 | * |
| 1591 | * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops_kern *bpf_sock, int argval) | 1592 | * int bpf_sock_ops_cb_flags_set(struct bpf_sock_ops *bpf_sock, int argval) |
| 1592 | * Description | 1593 | * Description |
| 1593 | * Attempt to set the value of the **bpf_sock_ops_cb_flags** field | 1594 | * Attempt to set the value of the **bpf_sock_ops_cb_flags** field |
| 1594 | * for the full TCP socket associated to *bpf_sock_ops* to | 1595 | * for the full TCP socket associated to *bpf_sock_ops* to |
| @@ -1721,7 +1722,7 @@ union bpf_attr { | |||
| 1721 | * Return | 1722 | * Return |
| 1722 | * 0 on success, or a negative error in case of failure. | 1723 | * 0 on success, or a negative error in case of failure. |
| 1723 | * | 1724 | * |
| 1724 | * int bpf_bind(struct bpf_sock_addr_kern *ctx, struct sockaddr *addr, int addr_len) | 1725 | * int bpf_bind(struct bpf_sock_addr *ctx, struct sockaddr *addr, int addr_len) |
| 1725 | * Description | 1726 | * Description |
| 1726 | * Bind the socket associated to *ctx* to the address pointed by | 1727 | * Bind the socket associated to *ctx* to the address pointed by |
| 1727 | * *addr*, of length *addr_len*. This allows for making outgoing | 1728 | * *addr*, of length *addr_len*. This allows for making outgoing |
| @@ -1767,6 +1768,64 @@ union bpf_attr { | |||
| 1767 | * **CONFIG_XFRM** configuration option. | 1768 | * **CONFIG_XFRM** configuration option. |
| 1768 | * Return | 1769 | * Return |
| 1769 | * 0 on success, or a negative error in case of failure. | 1770 | * 0 on success, or a negative error in case of failure. |
| 1771 | * | ||
| 1772 | * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags) | ||
| 1773 | * Description | ||
| 1774 | * Return a user or a kernel stack in bpf program provided buffer. | ||
| 1775 | * To achieve this, the helper needs *ctx*, which is a pointer | ||
| 1776 | * to the context on which the tracing program is executed. | ||
| 1777 | * To store the stacktrace, the bpf program provides *buf* with | ||
| 1778 | * a nonnegative *size*. | ||
| 1779 | * | ||
| 1780 | * The last argument, *flags*, holds the number of stack frames to | ||
| 1781 | * skip (from 0 to 255), masked with | ||
| 1782 | * **BPF_F_SKIP_FIELD_MASK**. The next bits can be used to set | ||
| 1783 | * the following flags: | ||
| 1784 | * | ||
| 1785 | * **BPF_F_USER_STACK** | ||
| 1786 | * Collect a user space stack instead of a kernel stack. | ||
| 1787 | * **BPF_F_USER_BUILD_ID** | ||
| 1788 | * Collect buildid+offset instead of ips for user stack, | ||
| 1789 | * only valid if **BPF_F_USER_STACK** is also specified. | ||
| 1790 | * | ||
| 1791 | * **bpf_get_stack**\ () can collect up to | ||
| 1792 | * **PERF_MAX_STACK_DEPTH** both kernel and user frames, subject | ||
| 1793 | * to sufficient large buffer size. Note that | ||
| 1794 | * this limit can be controlled with the **sysctl** program, and | ||
| 1795 | * that it should be manually increased in order to profile long | ||
| 1796 | * user stacks (such as stacks for Java programs). To do so, use: | ||
| 1797 | * | ||
| 1798 | * :: | ||
| 1799 | * | ||
| 1800 | * # sysctl kernel.perf_event_max_stack=<new value> | ||
| 1801 | * | ||
| 1802 | * Return | ||
| 1803 | * a non-negative value equal to or less than size on success, or | ||
| 1804 | * a negative error in case of failure. | ||
| 1805 | * | ||
| 1806 | * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header) | ||
| 1807 | * Description | ||
| 1808 | * This helper is similar to **bpf_skb_load_bytes**\ () in that | ||
| 1809 | * it provides an easy way to load *len* bytes from *offset* | ||
| 1810 | * from the packet associated to *skb*, into the buffer pointed | ||
| 1811 | * by *to*. The difference to **bpf_skb_load_bytes**\ () is that | ||
| 1812 | * a fifth argument *start_header* exists in order to select a | ||
| 1813 | * base offset to start from. *start_header* can be one of: | ||
| 1814 | * | ||
| 1815 | * **BPF_HDR_START_MAC** | ||
| 1816 | * Base offset to load data from is *skb*'s mac header. | ||
| 1817 | * **BPF_HDR_START_NET** | ||
| 1818 | * Base offset to load data from is *skb*'s network header. | ||
| 1819 | * | ||
| 1820 | * In general, "direct packet access" is the preferred method to | ||
| 1821 | * access packet data, however, this helper is in particular useful | ||
| 1822 | * in socket filters where *skb*\ **->data** does not always point | ||
| 1823 | * to the start of the mac header and where "direct packet access" | ||
| 1824 | * is not available. | ||
| 1825 | * | ||
| 1826 | * Return | ||
| 1827 | * 0 on success, or a negative error in case of failure. | ||
| 1828 | * | ||
| 1770 | */ | 1829 | */ |
| 1771 | #define __BPF_FUNC_MAPPER(FN) \ | 1830 | #define __BPF_FUNC_MAPPER(FN) \ |
| 1772 | FN(unspec), \ | 1831 | FN(unspec), \ |
| @@ -1835,7 +1894,9 @@ union bpf_attr { | |||
| 1835 | FN(msg_pull_data), \ | 1894 | FN(msg_pull_data), \ |
| 1836 | FN(bind), \ | 1895 | FN(bind), \ |
| 1837 | FN(xdp_adjust_tail), \ | 1896 | FN(xdp_adjust_tail), \ |
| 1838 | FN(skb_get_xfrm_state), | 1897 | FN(skb_get_xfrm_state), \ |
| 1898 | FN(get_stack), \ | ||
| 1899 | FN(skb_load_bytes_relative), | ||
| 1839 | 1900 | ||
| 1840 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper | 1901 | /* integer value in 'imm' field of BPF_CALL instruction selects which helper |
| 1841 | * function eBPF program intends to call | 1902 | * function eBPF program intends to call |
| @@ -1869,11 +1930,14 @@ enum bpf_func_id { | |||
| 1869 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ | 1930 | /* BPF_FUNC_skb_set_tunnel_key and BPF_FUNC_skb_get_tunnel_key flags. */ |
| 1870 | #define BPF_F_TUNINFO_IPV6 (1ULL << 0) | 1931 | #define BPF_F_TUNINFO_IPV6 (1ULL << 0) |
| 1871 | 1932 | ||
| 1872 | /* BPF_FUNC_get_stackid flags. */ | 1933 | /* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */ |
| 1873 | #define BPF_F_SKIP_FIELD_MASK 0xffULL | 1934 | #define BPF_F_SKIP_FIELD_MASK 0xffULL |
| 1874 | #define BPF_F_USER_STACK (1ULL << 8) | 1935 | #define BPF_F_USER_STACK (1ULL << 8) |
| 1936 | /* flags used by BPF_FUNC_get_stackid only. */ | ||
| 1875 | #define BPF_F_FAST_STACK_CMP (1ULL << 9) | 1937 | #define BPF_F_FAST_STACK_CMP (1ULL << 9) |
| 1876 | #define BPF_F_REUSE_STACKID (1ULL << 10) | 1938 | #define BPF_F_REUSE_STACKID (1ULL << 10) |
| 1939 | /* flags used by BPF_FUNC_get_stack only. */ | ||
| 1940 | #define BPF_F_USER_BUILD_ID (1ULL << 11) | ||
| 1877 | 1941 | ||
| 1878 | /* BPF_FUNC_skb_set_tunnel_key flags. */ | 1942 | /* BPF_FUNC_skb_set_tunnel_key flags. */ |
| 1879 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) | 1943 | #define BPF_F_ZERO_CSUM_TX (1ULL << 1) |
| @@ -1893,6 +1957,12 @@ enum bpf_adj_room_mode { | |||
| 1893 | BPF_ADJ_ROOM_NET, | 1957 | BPF_ADJ_ROOM_NET, |
| 1894 | }; | 1958 | }; |
| 1895 | 1959 | ||
| 1960 | /* Mode for BPF_FUNC_skb_load_bytes_relative helper. */ | ||
| 1961 | enum bpf_hdr_start_off { | ||
| 1962 | BPF_HDR_START_MAC, | ||
| 1963 | BPF_HDR_START_NET, | ||
| 1964 | }; | ||
| 1965 | |||
| 1896 | /* user accessible mirror of in-kernel sk_buff. | 1966 | /* user accessible mirror of in-kernel sk_buff. |
| 1897 | * new fields can only be added to the end of this structure | 1967 | * new fields can only be added to the end of this structure |
| 1898 | */ | 1968 | */ |
