aboutsummaryrefslogtreecommitdiffstats
path: root/tools/include/uapi/linux/bpf.h
diff options
context:
space:
mode:
Diffstat (limited to 'tools/include/uapi/linux/bpf.h')
-rw-r--r--tools/include/uapi/linux/bpf.h116
1 files changed, 111 insertions, 5 deletions
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index 4c223ab30293..db6bdc375126 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -17,7 +17,7 @@
17#define BPF_ALU64 0x07 /* alu mode in double word width */ 17#define BPF_ALU64 0x07 /* alu mode in double word width */
18 18
19/* ld/ldx fields */ 19/* ld/ldx fields */
20#define BPF_DW 0x18 /* double word */ 20#define BPF_DW 0x18 /* double word (64-bit) */
21#define BPF_XADD 0xc0 /* exclusive add */ 21#define BPF_XADD 0xc0 /* exclusive add */
22 22
23/* alu/jmp fields */ 23/* alu/jmp fields */
@@ -197,8 +197,14 @@ enum bpf_attach_type {
197 */ 197 */
198#define BPF_F_STRICT_ALIGNMENT (1U << 0) 198#define BPF_F_STRICT_ALIGNMENT (1U << 0)
199 199
200/* when bpf_ldimm64->src_reg == BPF_PSEUDO_MAP_FD, bpf_ldimm64->imm == fd */
200#define BPF_PSEUDO_MAP_FD 1 201#define BPF_PSEUDO_MAP_FD 1
201 202
203/* when bpf_call->src_reg == BPF_PSEUDO_CALL, bpf_call->imm == pc-relative
204 * offset to another bpf function
205 */
206#define BPF_PSEUDO_CALL 1
207
202/* flags for BPF_MAP_UPDATE_ELEM command */ 208/* flags for BPF_MAP_UPDATE_ELEM command */
203#define BPF_ANY 0 /* create new element or update existing */ 209#define BPF_ANY 0 /* create new element or update existing */
204#define BPF_NOEXIST 1 /* create new element if it didn't exist */ 210#define BPF_NOEXIST 1 /* create new element if it didn't exist */
@@ -239,6 +245,7 @@ union bpf_attr {
239 * BPF_F_NUMA_NODE is set). 245 * BPF_F_NUMA_NODE is set).
240 */ 246 */
241 char map_name[BPF_OBJ_NAME_LEN]; 247 char map_name[BPF_OBJ_NAME_LEN];
248 __u32 map_ifindex; /* ifindex of netdev to create on */
242 }; 249 };
243 250
244 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */ 251 struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -635,6 +642,14 @@ union bpf_attr {
635 * @optlen: length of optval in bytes 642 * @optlen: length of optval in bytes
636 * Return: 0 or negative error 643 * Return: 0 or negative error
637 * 644 *
645 * int bpf_sock_ops_cb_flags_set(bpf_sock_ops, flags)
646 * Set callback flags for sock_ops
647 * @bpf_sock_ops: pointer to bpf_sock_ops_kern struct
648 * @flags: flags value
649 * Return: 0 for no error
650 * -EINVAL if there is no full tcp socket
651 * bits in flags that are not supported by current kernel
652 *
638 * int bpf_skb_adjust_room(skb, len_diff, mode, flags) 653 * int bpf_skb_adjust_room(skb, len_diff, mode, flags)
639 * Grow or shrink room in sk_buff. 654 * Grow or shrink room in sk_buff.
640 * @skb: pointer to skb 655 * @skb: pointer to skb
@@ -677,6 +692,10 @@ union bpf_attr {
677 * @buf: buf to fill 692 * @buf: buf to fill
678 * @buf_size: size of the buf 693 * @buf_size: size of the buf
679 * Return : 0 on success or negative error code 694 * Return : 0 on success or negative error code
695 *
696 * int bpf_override_return(pt_regs, rc)
697 * @pt_regs: pointer to struct pt_regs
698 * @rc: the return value to set
680 */ 699 */
681#define __BPF_FUNC_MAPPER(FN) \ 700#define __BPF_FUNC_MAPPER(FN) \
682 FN(unspec), \ 701 FN(unspec), \
@@ -736,7 +755,9 @@ union bpf_attr {
736 FN(xdp_adjust_meta), \ 755 FN(xdp_adjust_meta), \
737 FN(perf_event_read_value), \ 756 FN(perf_event_read_value), \
738 FN(perf_prog_read_value), \ 757 FN(perf_prog_read_value), \
739 FN(getsockopt), 758 FN(getsockopt), \
759 FN(override_return), \
760 FN(sock_ops_cb_flags_set),
740 761
741/* integer value in 'imm' field of BPF_CALL instruction selects which helper 762/* integer value in 'imm' field of BPF_CALL instruction selects which helper
742 * function eBPF program intends to call 763 * function eBPF program intends to call
@@ -888,6 +909,9 @@ struct xdp_md {
888 __u32 data; 909 __u32 data;
889 __u32 data_end; 910 __u32 data_end;
890 __u32 data_meta; 911 __u32 data_meta;
912 /* Below access go through struct xdp_rxq_info */
913 __u32 ingress_ifindex; /* rxq->dev->ifindex */
914 __u32 rx_queue_index; /* rxq->queue_index */
891}; 915};
892 916
893enum sk_action { 917enum sk_action {
@@ -910,6 +934,9 @@ struct bpf_prog_info {
910 __u32 nr_map_ids; 934 __u32 nr_map_ids;
911 __aligned_u64 map_ids; 935 __aligned_u64 map_ids;
912 char name[BPF_OBJ_NAME_LEN]; 936 char name[BPF_OBJ_NAME_LEN];
937 __u32 ifindex;
938 __u64 netns_dev;
939 __u64 netns_ino;
913} __attribute__((aligned(8))); 940} __attribute__((aligned(8)));
914 941
915struct bpf_map_info { 942struct bpf_map_info {
@@ -920,6 +947,9 @@ struct bpf_map_info {
920 __u32 max_entries; 947 __u32 max_entries;
921 __u32 map_flags; 948 __u32 map_flags;
922 char name[BPF_OBJ_NAME_LEN]; 949 char name[BPF_OBJ_NAME_LEN];
950 __u32 ifindex;
951 __u64 netns_dev;
952 __u64 netns_ino;
923} __attribute__((aligned(8))); 953} __attribute__((aligned(8)));
924 954
925/* User bpf_sock_ops struct to access socket values and specify request ops 955/* User bpf_sock_ops struct to access socket values and specify request ops
@@ -931,8 +961,9 @@ struct bpf_map_info {
931struct bpf_sock_ops { 961struct bpf_sock_ops {
932 __u32 op; 962 __u32 op;
933 union { 963 union {
934 __u32 reply; 964 __u32 args[4]; /* Optionally passed to bpf program */
935 __u32 replylong[4]; 965 __u32 reply; /* Returned by bpf program */
966 __u32 replylong[4]; /* Optionally returned by bpf prog */
936 }; 967 };
937 __u32 family; 968 __u32 family;
938 __u32 remote_ip4; /* Stored in network byte order */ 969 __u32 remote_ip4; /* Stored in network byte order */
@@ -941,8 +972,45 @@ struct bpf_sock_ops {
941 __u32 local_ip6[4]; /* Stored in network byte order */ 972 __u32 local_ip6[4]; /* Stored in network byte order */
942 __u32 remote_port; /* Stored in network byte order */ 973 __u32 remote_port; /* Stored in network byte order */
943 __u32 local_port; /* stored in host byte order */ 974 __u32 local_port; /* stored in host byte order */
975 __u32 is_fullsock; /* Some TCP fields are only valid if
976 * there is a full socket. If not, the
977 * fields read as zero.
978 */
979 __u32 snd_cwnd;
980 __u32 srtt_us; /* Averaged RTT << 3 in usecs */
981 __u32 bpf_sock_ops_cb_flags; /* flags defined in uapi/linux/tcp.h */
982 __u32 state;
983 __u32 rtt_min;
984 __u32 snd_ssthresh;
985 __u32 rcv_nxt;
986 __u32 snd_nxt;
987 __u32 snd_una;
988 __u32 mss_cache;
989 __u32 ecn_flags;
990 __u32 rate_delivered;
991 __u32 rate_interval_us;
992 __u32 packets_out;
993 __u32 retrans_out;
994 __u32 total_retrans;
995 __u32 segs_in;
996 __u32 data_segs_in;
997 __u32 segs_out;
998 __u32 data_segs_out;
999 __u32 lost_out;
1000 __u32 sacked_out;
1001 __u32 sk_txhash;
1002 __u64 bytes_received;
1003 __u64 bytes_acked;
944}; 1004};
945 1005
1006/* Definitions for bpf_sock_ops_cb_flags */
1007#define BPF_SOCK_OPS_RTO_CB_FLAG (1<<0)
1008#define BPF_SOCK_OPS_RETRANS_CB_FLAG (1<<1)
1009#define BPF_SOCK_OPS_STATE_CB_FLAG (1<<2)
1010#define BPF_SOCK_OPS_ALL_CB_FLAGS 0x7 /* Mask of all currently
1011 * supported cb flags
1012 */
1013
946/* List of known BPF sock_ops operators. 1014/* List of known BPF sock_ops operators.
947 * New entries can only be added at the end 1015 * New entries can only be added at the end
948 */ 1016 */
@@ -976,6 +1044,43 @@ enum {
976 * a congestion threshold. RTTs above 1044 * a congestion threshold. RTTs above
977 * this indicate congestion 1045 * this indicate congestion
978 */ 1046 */
1047 BPF_SOCK_OPS_RTO_CB, /* Called when an RTO has triggered.
1048 * Arg1: value of icsk_retransmits
1049 * Arg2: value of icsk_rto
1050 * Arg3: whether RTO has expired
1051 */
1052 BPF_SOCK_OPS_RETRANS_CB, /* Called when skb is retransmitted.
1053 * Arg1: sequence number of 1st byte
1054 * Arg2: # segments
1055 * Arg3: return value of
1056 * tcp_transmit_skb (0 => success)
1057 */
1058 BPF_SOCK_OPS_STATE_CB, /* Called when TCP changes state.
1059 * Arg1: old_state
1060 * Arg2: new_state
1061 */
1062};
1063
1064/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
1065 * changes between the TCP and BPF versions. Ideally this should never happen.
1066 * If it does, we need to add code to convert them before calling
1067 * the BPF sock_ops function.
1068 */
1069enum {
1070 BPF_TCP_ESTABLISHED = 1,
1071 BPF_TCP_SYN_SENT,
1072 BPF_TCP_SYN_RECV,
1073 BPF_TCP_FIN_WAIT1,
1074 BPF_TCP_FIN_WAIT2,
1075 BPF_TCP_TIME_WAIT,
1076 BPF_TCP_CLOSE,
1077 BPF_TCP_CLOSE_WAIT,
1078 BPF_TCP_LAST_ACK,
1079 BPF_TCP_LISTEN,
1080 BPF_TCP_CLOSING, /* Now a valid state */
1081 BPF_TCP_NEW_SYN_RECV,
1082
1083 BPF_TCP_MAX_STATES /* Leave at the end! */
979}; 1084};
980 1085
981#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */ 1086#define TCP_BPF_IW 1001 /* Set TCP initial congestion window */
@@ -995,7 +1100,8 @@ struct bpf_perf_event_value {
995#define BPF_DEVCG_DEV_CHAR (1ULL << 1) 1100#define BPF_DEVCG_DEV_CHAR (1ULL << 1)
996 1101
997struct bpf_cgroup_dev_ctx { 1102struct bpf_cgroup_dev_ctx {
998 __u32 access_type; /* (access << 16) | type */ 1103 /* access_type encoded as (BPF_DEVCG_ACC_* << 16) | BPF_DEVCG_DEV_* */
1104 __u32 access_type;
999 __u32 major; 1105 __u32 major;
1000 __u32 minor; 1106 __u32 minor;
1001}; 1107};