aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2018-11-11 09:41:30 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-11 16:54:53 -0500
commitf1c6ea3827b5e5ec62e297bcf4ccfd065326e8f7 (patch)
tree69036d0cdbfb568233a8838bc1ff9f9b29ab624b /net/ipv4/tcp_output.c
parent1c09f7d073b1d1ce85765c5552e4b40a6b6ba770 (diff)
tcp: refine tcp_tso_should_defer() after EDT adoption
tcp_tso_should_defer() last step tries to check if the probable next ACK packet is coming in less than half rtt. Problem is that the head->tstamp might be in the future, so we need to use signed arithmetics to avoid overflows. Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 35feadf48030..78a56cef7e39 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited, u32 max_segs)
1908{ 1908{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1909 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 age, send_win, cong_win, limit, in_flight; 1910 u32 send_win, cong_win, limit, in_flight;
1911 struct tcp_sock *tp = tcp_sk(sk); 1911 struct tcp_sock *tp = tcp_sk(sk);
1912 struct sk_buff *head; 1912 struct sk_buff *head;
1913 int win_divisor; 1913 int win_divisor;
1914 s64 delta;
1914 1915
1915 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1916 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916 goto send_now; 1917 goto send_now;
@@ -1972,9 +1973,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1972 head = tcp_rtx_queue_head(sk); 1973 head = tcp_rtx_queue_head(sk);
1973 if (!head) 1974 if (!head)
1974 goto send_now; 1975 goto send_now;
1975 age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); 1976 delta = tp->tcp_clock_cache - head->tstamp;
1976 /* If next ACK is likely to come too late (half srtt), do not defer */ 1977 /* If next ACK is likely to come too late (half srtt), do not defer */
1977 if (age < (tp->srtt_us >> 4)) 1978 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
1978 goto send_now; 1979 goto send_now;
1979 1980
1980 /* Ok, it looks like it is advisable to defer. */ 1981 /* Ok, it looks like it is advisable to defer. */