aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c35
1 files changed, 24 insertions, 11 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5aa600900695..d1676d8a6ed7 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1904 * This algorithm is from John Heffner. 1904 * This algorithm is from John Heffner.
1905 */ 1905 */
1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited,
1908 bool *is_rwnd_limited,
1909 u32 max_segs)
1908{ 1910{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1911 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 age, send_win, cong_win, limit, in_flight; 1912 u32 age, send_win, cong_win, limit, in_flight;
@@ -1912,9 +1914,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1912 struct sk_buff *head; 1914 struct sk_buff *head;
1913 int win_divisor; 1915 int win_divisor;
1914 1916
1915 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1916 goto send_now;
1917
1918 if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1917 if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1919 goto send_now; 1918 goto send_now;
1920 1919
@@ -1973,10 +1972,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1973 if (age < (tp->srtt_us >> 4)) 1972 if (age < (tp->srtt_us >> 4))
1974 goto send_now; 1973 goto send_now;
1975 1974
1976 /* Ok, it looks like it is advisable to defer. */ 1975 /* Ok, it looks like it is advisable to defer.
1976 * Three cases are tracked :
1977 * 1) We are cwnd-limited
1978 * 2) We are rwnd-limited
1979 * 3) We are application limited.
1980 */
1981 if (cong_win < send_win) {
1982 if (cong_win <= skb->len) {
1983 *is_cwnd_limited = true;
1984 return true;
1985 }
1986 } else {
1987 if (send_win <= skb->len) {
1988 *is_rwnd_limited = true;
1989 return true;
1990 }
1991 }
1977 1992
1978 if (cong_win < send_win && cong_win <= skb->len) 1993 /* If this packet won't get more data, do not wait. */
1979 *is_cwnd_limited = true; 1994 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1995 goto send_now;
1980 1996
1981 return true; 1997 return true;
1982 1998
@@ -2356,11 +2372,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2356 } else { 2372 } else {
2357 if (!push_one && 2373 if (!push_one &&
2358 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2374 tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2359 max_segs)) { 2375 &is_rwnd_limited, max_segs))
2360 if (!is_cwnd_limited)
2361 is_rwnd_limited = true;
2362 break; 2376 break;
2363 }
2364 } 2377 }
2365 2378
2366 limit = mss_now; 2379 limit = mss_now;