aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c23
1 files changed, 16 insertions, 7 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3f510cad0b3e..c5dc4c4fdadd 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1907,10 +1907,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited, u32 max_segs)
1908{ 1908{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1909 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 age, send_win, cong_win, limit, in_flight; 1910 u32 send_win, cong_win, limit, in_flight;
1911 struct tcp_sock *tp = tcp_sk(sk); 1911 struct tcp_sock *tp = tcp_sk(sk);
1912 struct sk_buff *head; 1912 struct sk_buff *head;
1913 int win_divisor; 1913 int win_divisor;
1914 s64 delta;
1914 1915
1915 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) 1916 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916 goto send_now; 1917 goto send_now;
@@ -1919,9 +1920,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1919 goto send_now; 1920 goto send_now;
1920 1921
1921 /* Avoid bursty behavior by allowing defer 1922 /* Avoid bursty behavior by allowing defer
1922 * only if the last write was recent. 1923 * only if the last write was recent (1 ms).
1924 * Note that tp->tcp_wstamp_ns can be in the future if we have
1925 * packets waiting in a qdisc or device for EDT delivery.
1923 */ 1926 */
1924 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0) 1927 delta = tp->tcp_clock_cache - tp->tcp_wstamp_ns - NSEC_PER_MSEC;
1928 if (delta > 0)
1925 goto send_now; 1929 goto send_now;
1926 1930
1927 in_flight = tcp_packets_in_flight(tp); 1931 in_flight = tcp_packets_in_flight(tp);
@@ -1944,6 +1948,10 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1944 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) 1948 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1945 goto send_now; 1949 goto send_now;
1946 1950
1951 /* If this packet won't get more data, do not wait. */
1952 if (TCP_SKB_CB(skb)->eor)
1953 goto send_now;
1954
1947 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor); 1955 win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
1948 if (win_divisor) { 1956 if (win_divisor) {
1949 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1957 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
@@ -1968,9 +1976,9 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1968 head = tcp_rtx_queue_head(sk); 1976 head = tcp_rtx_queue_head(sk);
1969 if (!head) 1977 if (!head)
1970 goto send_now; 1978 goto send_now;
1971 age = tcp_stamp_us_delta(tp->tcp_mstamp, tcp_skb_timestamp_us(head)); 1979 delta = tp->tcp_clock_cache - head->tstamp;
1972 /* If next ACK is likely to come too late (half srtt), do not defer */ 1980 /* If next ACK is likely to come too late (half srtt), do not defer */
1973 if (age < (tp->srtt_us >> 4)) 1981 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
1974 goto send_now; 1982 goto send_now;
1975 1983
1976 /* Ok, it looks like it is advisable to defer. */ 1984 /* Ok, it looks like it is advisable to defer. */
@@ -2212,8 +2220,9 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2212 limit = max_t(unsigned long, 2220 limit = max_t(unsigned long,
2213 2 * skb->truesize, 2221 2 * skb->truesize,
2214 sk->sk_pacing_rate >> sk->sk_pacing_shift); 2222 sk->sk_pacing_rate >> sk->sk_pacing_shift);
2215 limit = min_t(unsigned long, limit, 2223 if (sk->sk_pacing_status == SK_PACING_NONE)
2216 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes); 2224 limit = min_t(unsigned long, limit,
2225 sock_net(sk)->ipv4.sysctl_tcp_limit_output_bytes);
2217 limit <<= factor; 2226 limit <<= factor;
2218 2227
2219 if (refcount_read(&sk->sk_wmem_alloc) > limit) { 2228 if (refcount_read(&sk->sk_wmem_alloc) > limit) {