diff options
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r-- | include/net/tcp.h | 54 |
1 files changed, 43 insertions, 11 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 6da880d2f022..58278669cc55 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -387,7 +387,7 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst); | |||
387 | void tcp_close(struct sock *sk, long timeout); | 387 | void tcp_close(struct sock *sk, long timeout); |
388 | void tcp_init_sock(struct sock *sk); | 388 | void tcp_init_sock(struct sock *sk); |
389 | void tcp_init_transfer(struct sock *sk, int bpf_op); | 389 | void tcp_init_transfer(struct sock *sk, int bpf_op); |
390 | unsigned int tcp_poll(struct file *file, struct socket *sock, | 390 | __poll_t tcp_poll(struct file *file, struct socket *sock, |
391 | struct poll_table_struct *wait); | 391 | struct poll_table_struct *wait); |
392 | int tcp_getsockopt(struct sock *sk, int level, int optname, | 392 | int tcp_getsockopt(struct sock *sk, int level, int optname, |
393 | char __user *optval, int __user *optlen); | 393 | char __user *optval, int __user *optlen); |
@@ -953,6 +953,7 @@ struct rate_sample { | |||
953 | u32 prior_in_flight; /* in flight before this ACK */ | 953 | u32 prior_in_flight; /* in flight before this ACK */ |
954 | bool is_app_limited; /* is sample from packet with bubble in pipe? */ | 954 | bool is_app_limited; /* is sample from packet with bubble in pipe? */ |
955 | bool is_retrans; /* is sample from retransmission? */ | 955 | bool is_retrans; /* is sample from retransmission? */ |
956 | bool is_ack_delayed; /* is this (likely) a delayed ACK? */ | ||
956 | }; | 957 | }; |
957 | 958 | ||
958 | struct tcp_congestion_ops { | 959 | struct tcp_congestion_ops { |
@@ -1507,8 +1508,7 @@ int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, | |||
1507 | 1508 | ||
1508 | /* From tcp_fastopen.c */ | 1509 | /* From tcp_fastopen.c */ |
1509 | void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, | 1510 | void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, |
1510 | struct tcp_fastopen_cookie *cookie, int *syn_loss, | 1511 | struct tcp_fastopen_cookie *cookie); |
1511 | unsigned long *last_syn_loss); | ||
1512 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, | 1512 | void tcp_fastopen_cache_set(struct sock *sk, u16 mss, |
1513 | struct tcp_fastopen_cookie *cookie, bool syn_lost, | 1513 | struct tcp_fastopen_cookie *cookie, bool syn_lost, |
1514 | u16 try_exp); | 1514 | u16 try_exp); |
@@ -1546,7 +1546,7 @@ extern unsigned int sysctl_tcp_fastopen_blackhole_timeout; | |||
1546 | void tcp_fastopen_active_disable(struct sock *sk); | 1546 | void tcp_fastopen_active_disable(struct sock *sk); |
1547 | bool tcp_fastopen_active_should_disable(struct sock *sk); | 1547 | bool tcp_fastopen_active_should_disable(struct sock *sk); |
1548 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk); | 1548 | void tcp_fastopen_active_disable_ofo_check(struct sock *sk); |
1549 | void tcp_fastopen_active_timeout_reset(void); | 1549 | void tcp_fastopen_active_detect_blackhole(struct sock *sk, bool expired); |
1550 | 1550 | ||
1551 | /* Latencies incurred by various limits for a sender. They are | 1551 | /* Latencies incurred by various limits for a sender. They are |
1552 | * chronograph-like stats that are mutually exclusive. | 1552 | * chronograph-like stats that are mutually exclusive. |
@@ -2006,17 +2006,21 @@ void tcp_cleanup_ulp(struct sock *sk); | |||
2006 | * program loaded). | 2006 | * program loaded). |
2007 | */ | 2007 | */ |
2008 | #ifdef CONFIG_BPF | 2008 | #ifdef CONFIG_BPF |
2009 | static inline int tcp_call_bpf(struct sock *sk, int op) | 2009 | static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) |
2010 | { | 2010 | { |
2011 | struct bpf_sock_ops_kern sock_ops; | 2011 | struct bpf_sock_ops_kern sock_ops; |
2012 | int ret; | 2012 | int ret; |
2013 | 2013 | ||
2014 | if (sk_fullsock(sk)) | 2014 | memset(&sock_ops, 0, offsetof(struct bpf_sock_ops_kern, temp)); |
2015 | if (sk_fullsock(sk)) { | ||
2016 | sock_ops.is_fullsock = 1; | ||
2015 | sock_owned_by_me(sk); | 2017 | sock_owned_by_me(sk); |
2018 | } | ||
2016 | 2019 | ||
2017 | memset(&sock_ops, 0, sizeof(sock_ops)); | ||
2018 | sock_ops.sk = sk; | 2020 | sock_ops.sk = sk; |
2019 | sock_ops.op = op; | 2021 | sock_ops.op = op; |
2022 | if (nargs > 0) | ||
2023 | memcpy(sock_ops.args, args, nargs * sizeof(*args)); | ||
2020 | 2024 | ||
2021 | ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); | 2025 | ret = BPF_CGROUP_RUN_PROG_SOCK_OPS(&sock_ops); |
2022 | if (ret == 0) | 2026 | if (ret == 0) |
@@ -2025,18 +2029,46 @@ static inline int tcp_call_bpf(struct sock *sk, int op) | |||
2025 | ret = -1; | 2029 | ret = -1; |
2026 | return ret; | 2030 | return ret; |
2027 | } | 2031 | } |
2032 | |||
2033 | static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) | ||
2034 | { | ||
2035 | u32 args[2] = {arg1, arg2}; | ||
2036 | |||
2037 | return tcp_call_bpf(sk, op, 2, args); | ||
2038 | } | ||
2039 | |||
2040 | static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, | ||
2041 | u32 arg3) | ||
2042 | { | ||
2043 | u32 args[3] = {arg1, arg2, arg3}; | ||
2044 | |||
2045 | return tcp_call_bpf(sk, op, 3, args); | ||
2046 | } | ||
2047 | |||
2028 | #else | 2048 | #else |
2029 | static inline int tcp_call_bpf(struct sock *sk, int op) | 2049 | static inline int tcp_call_bpf(struct sock *sk, int op, u32 nargs, u32 *args) |
2030 | { | 2050 | { |
2031 | return -EPERM; | 2051 | return -EPERM; |
2032 | } | 2052 | } |
2053 | |||
2054 | static inline int tcp_call_bpf_2arg(struct sock *sk, int op, u32 arg1, u32 arg2) | ||
2055 | { | ||
2056 | return -EPERM; | ||
2057 | } | ||
2058 | |||
2059 | static inline int tcp_call_bpf_3arg(struct sock *sk, int op, u32 arg1, u32 arg2, | ||
2060 | u32 arg3) | ||
2061 | { | ||
2062 | return -EPERM; | ||
2063 | } | ||
2064 | |||
2033 | #endif | 2065 | #endif |
2034 | 2066 | ||
2035 | static inline u32 tcp_timeout_init(struct sock *sk) | 2067 | static inline u32 tcp_timeout_init(struct sock *sk) |
2036 | { | 2068 | { |
2037 | int timeout; | 2069 | int timeout; |
2038 | 2070 | ||
2039 | timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT); | 2071 | timeout = tcp_call_bpf(sk, BPF_SOCK_OPS_TIMEOUT_INIT, 0, NULL); |
2040 | 2072 | ||
2041 | if (timeout <= 0) | 2073 | if (timeout <= 0) |
2042 | timeout = TCP_TIMEOUT_INIT; | 2074 | timeout = TCP_TIMEOUT_INIT; |
@@ -2047,7 +2079,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk) | |||
2047 | { | 2079 | { |
2048 | int rwnd; | 2080 | int rwnd; |
2049 | 2081 | ||
2050 | rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT); | 2082 | rwnd = tcp_call_bpf(sk, BPF_SOCK_OPS_RWND_INIT, 0, NULL); |
2051 | 2083 | ||
2052 | if (rwnd < 0) | 2084 | if (rwnd < 0) |
2053 | rwnd = 0; | 2085 | rwnd = 0; |
@@ -2056,7 +2088,7 @@ static inline u32 tcp_rwnd_init_bpf(struct sock *sk) | |||
2056 | 2088 | ||
2057 | static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) | 2089 | static inline bool tcp_bpf_ca_needs_ecn(struct sock *sk) |
2058 | { | 2090 | { |
2059 | return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN) == 1); | 2091 | return (tcp_call_bpf(sk, BPF_SOCK_OPS_NEEDS_ECN, 0, NULL) == 1); |
2060 | } | 2092 | } |
2061 | 2093 | ||
2062 | #if IS_ENABLED(CONFIG_SMC) | 2094 | #if IS_ENABLED(CONFIG_SMC) |