aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2016-09-23 06:46:57 -0400
committerDavid S. Miller <davem@davemloft.net>2016-09-23 06:46:57 -0400
commitd6989d4bbe6c4d1c2a76696833a07f044e85694d (patch)
tree2d9a70d0feee4d4a20568be1b39a961fa0d27d81 /net/ipv4/tcp_output.c
parent0364a8824c020f12e2d5e9fad963685b58f7574e (diff)
parentb1f2beb87bb034bb209773807994279f90cace78 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 478dfc539178..7c777089a4d6 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2643,7 +2643,8 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2643 * copying overhead: fragmentation, tunneling, mangling etc. 2643 * copying overhead: fragmentation, tunneling, mangling etc.
2644 */ 2644 */
2645 if (atomic_read(&sk->sk_wmem_alloc) > 2645 if (atomic_read(&sk->sk_wmem_alloc) >
2646 min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) 2646 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2),
2647 sk->sk_sndbuf))
2647 return -EAGAIN; 2648 return -EAGAIN;
2648 2649
2649 if (skb_still_in_host_queue(sk, skb)) 2650 if (skb_still_in_host_queue(sk, skb))
@@ -2872,7 +2873,7 @@ begin_fwd:
2872 if (tcp_retransmit_skb(sk, skb, segs)) 2873 if (tcp_retransmit_skb(sk, skb, segs))
2873 return; 2874 return;
2874 2875
2875 NET_INC_STATS(sock_net(sk), mib_idx); 2876 NET_ADD_STATS(sock_net(sk), mib_idx, tcp_skb_pcount(skb));
2876 2877
2877 if (tcp_in_cwnd_reduction(sk)) 2878 if (tcp_in_cwnd_reduction(sk))
2878 tp->prr_out += tcp_skb_pcount(skb); 2879 tp->prr_out += tcp_skb_pcount(skb);
@@ -3609,6 +3610,8 @@ int tcp_rtx_synack(const struct sock *sk, struct request_sock *req)
3609 if (!res) { 3610 if (!res) {
3610 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS); 3611 __TCP_INC_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS);
3611 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS); 3612 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3613 if (unlikely(tcp_passive_fastopen(sk)))
3614 tcp_sk(sk)->total_retrans++;
3612 } 3615 }
3613 return res; 3616 return res;
3614} 3617}