aboutsummaryrefslogtreecommitdiffstats
path: root/include/net/tcp.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/net/tcp.h')
-rw-r--r--include/net/tcp.h22
1 files changed, 21 insertions, 1 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 163d2b467d78..a9fe7bc4f4bb 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -974,7 +974,27 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
974{ 974{
975 return tp->snd_una + tp->snd_wnd; 975 return tp->snd_una + tp->snd_wnd;
976} 976}
977bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight); 977
978/* We follow the spirit of RFC2861 to validate cwnd but implement a more
979 * flexible approach. The RFC suggests cwnd should not be raised unless
980 * it was fully used previously. But we allow cwnd to grow as long as the
981 * application has used half the cwnd.
982 * Example :
983 * cwnd is 10 (IW10), but application sends 9 frames.
984 * We allow cwnd to reach 18 when all frames are ACKed.
985 * This check is safe because it's as aggressive as slow start which already
986 * risks 100% overshoot. The advantage is that we discourage application to
987 * either send more filler packets or data to artificially blow up the cwnd
988 * usage, and allow application-limited process to probe bw more aggressively.
989 *
990 * TODO: remove in_flight once we can fix all callers, and their callers...
991 */
992static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
993{
994 const struct tcp_sock *tp = tcp_sk(sk);
995
996 return tp->snd_cwnd < 2 * tp->lsnd_pending;
997}
978 998
979static inline void tcp_check_probe_timer(struct sock *sk) 999static inline void tcp_check_probe_timer(struct sock *sk)
980{ 1000{