aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-12-10 00:27:48 -0500
committerDavid S. Miller <davem@davemloft.net>2018-12-10 00:43:31 -0500
commit4cc1feeb6ffc2799f8badb4dea77c637d340cb0d (patch)
treec41c1e4c05f016298246ad7b3a6034dc1e65c154 /net/ipv4/tcp_output.c
parenta60956ed72f7b715e9918df93fcf2f63a30fdda1 (diff)
parent40e020c129cfc991e8ab4736d2665351ffd1468d (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Several conflicts, seemingly all over the place. I used Stephen Rothwell's sample resolutions for many of these, if not just to double check my own work, so definitely the credit largely goes to him. The NFP conflict consisted of a bug fix (moving operations past the rhashtable operation) while chaning the initial argument in the function call in the moved code. The net/dsa/master.c conflict had to do with a bug fix intermixing of making dsa_master_set_mtu() static with the fixing of the tagging attribute location. cls_flower had a conflict because the dup reject fix from Or overlapped with the addition of port range classifiction. __set_phy_supported()'s conflict was relatively easy to resolve because Andrew fixed it in both trees, so it was just a matter of taking the net-next copy. Or at least I think it was :-) Joe Stringer's fix to the handling of netns id 0 in bpf_sk_lookup() intermixed with changes on how the sdif and caller_net are calculated in these code paths in net-next. The remaining BPF conflicts were largely about the addition of the __bpf_md_ptr stuff in 'net' overlapping with adjustments and additions to the relevant data structure where the MD pointer macros are used. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c45
1 files changed, 32 insertions, 13 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d3b691f3a9e8..c31badfee806 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1904,7 +1904,9 @@ static int tso_fragment(struct sock *sk, enum tcp_queue tcp_queue,
1904 * This algorithm is from John Heffner. 1904 * This algorithm is from John Heffner.
1905 */ 1905 */
1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, 1906static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1907 bool *is_cwnd_limited, u32 max_segs) 1907 bool *is_cwnd_limited,
1908 bool *is_rwnd_limited,
1909 u32 max_segs)
1908{ 1910{
1909 const struct inet_connection_sock *icsk = inet_csk(sk); 1911 const struct inet_connection_sock *icsk = inet_csk(sk);
1910 u32 send_win, cong_win, limit, in_flight; 1912 u32 send_win, cong_win, limit, in_flight;
@@ -1913,9 +1915,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1913 int win_divisor; 1915 int win_divisor;
1914 s64 delta; 1916 s64 delta;
1915 1917
1916 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
1917 goto send_now;
1918
1919 if (icsk->icsk_ca_state >= TCP_CA_Recovery) 1918 if (icsk->icsk_ca_state >= TCP_CA_Recovery)
1920 goto send_now; 1919 goto send_now;
1921 1920
@@ -1981,10 +1980,27 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
1981 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0) 1980 if ((s64)(delta - (u64)NSEC_PER_USEC * (tp->srtt_us >> 4)) < 0)
1982 goto send_now; 1981 goto send_now;
1983 1982
1984 /* Ok, it looks like it is advisable to defer. */ 1983 /* Ok, it looks like it is advisable to defer.
1984 * Three cases are tracked :
1985 * 1) We are cwnd-limited
1986 * 2) We are rwnd-limited
1987 * 3) We are application limited.
1988 */
1989 if (cong_win < send_win) {
1990 if (cong_win <= skb->len) {
1991 *is_cwnd_limited = true;
1992 return true;
1993 }
1994 } else {
1995 if (send_win <= skb->len) {
1996 *is_rwnd_limited = true;
1997 return true;
1998 }
1999 }
1985 2000
1986 if (cong_win < send_win && cong_win <= skb->len) 2001 /* If this packet won't get more data, do not wait. */
1987 *is_cwnd_limited = true; 2002 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
2003 goto send_now;
1988 2004
1989 return true; 2005 return true;
1990 2006
@@ -2365,7 +2381,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2365 } else { 2381 } else {
2366 if (!push_one && 2382 if (!push_one &&
2367 tcp_tso_should_defer(sk, skb, &is_cwnd_limited, 2383 tcp_tso_should_defer(sk, skb, &is_cwnd_limited,
2368 max_segs)) 2384 &is_rwnd_limited, max_segs))
2369 break; 2385 break;
2370 } 2386 }
2371 2387
@@ -2503,15 +2519,18 @@ void tcp_send_loss_probe(struct sock *sk)
2503 goto rearm_timer; 2519 goto rearm_timer;
2504 } 2520 }
2505 skb = skb_rb_last(&sk->tcp_rtx_queue); 2521 skb = skb_rb_last(&sk->tcp_rtx_queue);
2522 if (unlikely(!skb)) {
2523 WARN_ONCE(tp->packets_out,
2524 "invalid inflight: %u state %u cwnd %u mss %d\n",
2525 tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
2526 inet_csk(sk)->icsk_pending = 0;
2527 return;
2528 }
2506 2529
2507 /* At most one outstanding TLP retransmission. */ 2530 /* At most one outstanding TLP retransmission. */
2508 if (tp->tlp_high_seq) 2531 if (tp->tlp_high_seq)
2509 goto rearm_timer; 2532 goto rearm_timer;
2510 2533
2511 /* Retransmit last segment. */
2512 if (WARN_ON(!skb))
2513 goto rearm_timer;
2514
2515 if (skb_still_in_host_queue(sk, skb)) 2534 if (skb_still_in_host_queue(sk, skb))
2516 goto rearm_timer; 2535 goto rearm_timer;
2517 2536
@@ -2929,7 +2948,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2929 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; 2948 TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
2930 trace_tcp_retransmit_skb(sk, skb); 2949 trace_tcp_retransmit_skb(sk, skb);
2931 } else if (err != -EBUSY) { 2950 } else if (err != -EBUSY) {
2932 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2951 NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL, segs);
2933 } 2952 }
2934 return err; 2953 return err;
2935} 2954}