aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-11-11 16:54:54 -0500
committerDavid S. Miller <davem@davemloft.net>2018-11-11 16:54:54 -0500
commit83afb36a7097bdf6776e261fa3a2bf2dc5a4a8c2 (patch)
tree1ba9698d64af24b2567bcd2d6cc590fb140ec386 /net/ipv4/tcp_output.c
parent5e13a0d3f5c11df7eb297e6583cf874a79a00374 (diff)
parenta682850a114aef947da5d603f7fd2cfe7eabbd72 (diff)
Merge branch 'tcp-tso-defer-improvements'
Eric Dumazet says: ==================== tcp: tso defer improvements This series makes tcp_tso_should_defer() a bit smarter : 1) MSG_EOR gives a hint to TCP to not defer some skbs 2) Second patch takes into account that head tstamp can be in the future. 3) Third patch uses existing high resolution state variables to have a more precise heuristic. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 9c34b97d365d..75dcf4daca72 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited, u32 max_segs)
1908{ 1908{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1909 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 age, send_win, cong_win, limit, in_flight; 1910 u32 send_win, cong_win, limit, in_flight;
1911 struct tcp_sock *tp = tcp_sk(sk); 1911 struct tcp_sock *tp = tcp_sk(sk);
1912 struct sk_buff *head; 1912 struct sk_buff *head;
1913 int win_divisor; 1913 int win_divisor;
1914 s64 delta;
1914 1915
1915 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1916 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916 goto send_now; 1917 goto send_now;
@@ -1919,9 +1920,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1919 goto send_now; 1920 goto send_now;
1920 1921
1921 /* Avoid bursty behavior by allowing defer 1922 /* Avoid bursty behavior by allowing defer
1922 * only if the last write was recent. 1923 * only if the last write was recent (1 ms).
1924 * Note that tp->tcp_wstamp_ns can be in the future if we have
1925 * packets waiting in a qdisc or device for EDT delivery.
1923 */ 1926 */
1924 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0) 1927 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1928 if (delta > 0)
1925 goto send_now; 1929 goto send_now;
1926 1930
1927 in_flight = tcp_packets_in_flight(tp); 1931 in_flight = tcp_packets_in_flight(tp);
@@ -1944,6 +1948,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1944 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1948 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1945 goto send_now; 1949 goto send_now;
1946 1950
1951 /* If this packet won't get more data, do not wait. */
1952 if (TCP_SKB_CB(skb)->eor)
1953 goto send_now;
1954
1947 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 1955 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1948 if (win_divisor) { 1956 if (win_divisor) {
1949 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1957 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1968,9 +1976,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1968 head = tcp_rtx_queue_head(sk); 1976 head = tcp_rtx_queue_head(sk);
1969 if (!head) 1977 if (!head)
1970 goto send_now; 1978 goto send_now;
1971 age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); 1979 delta = tp->tcp_clock_cache - head->tstamp;
1972 /* If next ACK is likely to come too late (half srtt), do not defer */ 1980 /* If next ACK is likely to come too late (half srtt), do not defer */
1973 if (age < (tp->srtt_us >> 4)) 1981 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
1974 goto send_now; 1982 goto send_now;
1975 1983
1976 /* Ok, it looks like it is advisable to defer. */ 1984 /* Ok, it looks like it is advisable to defer. */