diff options
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 37 |
1 files changed, 28 insertions, 9 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 03d26b85eab8..f0eb4e337ec8 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk) | |||
698 | if ((1 << sk->sk_state) & | 698 | if ((1 << sk->sk_state) & |
699 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | | 699 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING | |
700 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) | 700 | TCPF_CLOSE_WAIT | TCPF_LAST_ACK)) |
701 | tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC); | 701 | tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle, |
702 | 0, GFP_ATOMIC); | ||
702 | } | 703 | } |
703 | /* | 704 | /* |
704 | * One tasklet per cpu tries to send more skbs. | 705 | * One tasklet per cpu tries to send more skbs. |
@@ -863,8 +864,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
863 | 864 | ||
864 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | 865 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && |
865 | fclone->fclone == SKB_FCLONE_CLONE)) | 866 | fclone->fclone == SKB_FCLONE_CLONE)) |
866 | NET_INC_STATS_BH(sock_net(sk), | 867 | NET_INC_STATS(sock_net(sk), |
867 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); | 868 | LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); |
868 | 869 | ||
869 | if (unlikely(skb_cloned(skb))) | 870 | if (unlikely(skb_cloned(skb))) |
870 | skb = pskb_copy(skb, gfp_mask); | 871 | skb = pskb_copy(skb, gfp_mask); |
@@ -1904,7 +1905,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
1904 | 1905 | ||
1905 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { | 1906 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { |
1906 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | 1907 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); |
1907 | break; | 1908 | /* It is possible TX completion already happened |
1909 | * before we set TSQ_THROTTLED, so we must | ||
1910 | * test again the condition. | ||
1911 | * We abuse smp_mb__after_clear_bit() because | ||
1912 | * there is no smp_mb__after_set_bit() yet | ||
1913 | */ | ||
1914 | smp_mb__after_clear_bit(); | ||
1915 | if (atomic_read(&sk->sk_wmem_alloc) > limit) | ||
1916 | break; | ||
1908 | } | 1917 | } |
1909 | 1918 | ||
1910 | limit = mss_now; | 1919 | limit = mss_now; |
@@ -1977,7 +1986,7 @@ bool tcp_schedule_loss_probe(struct sock *sk) | |||
1977 | /* Schedule a loss probe in 2*RTT for SACK capable connections | 1986 | /* Schedule a loss probe in 2*RTT for SACK capable connections |
1978 | * in Open state, that are either limited by cwnd or application. | 1987 | * in Open state, that are either limited by cwnd or application. |
1979 | */ | 1988 | */ |
1980 | if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out || | 1989 | if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out || |
1981 | !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) | 1990 | !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open) |
1982 | return false; | 1991 | return false; |
1983 | 1992 | ||
@@ -2328,6 +2337,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2328 | struct tcp_sock *tp = tcp_sk(sk); | 2337 | struct tcp_sock *tp = tcp_sk(sk); |
2329 | struct inet_connection_sock *icsk = inet_csk(sk); | 2338 | struct inet_connection_sock *icsk = inet_csk(sk); |
2330 | unsigned int cur_mss; | 2339 | unsigned int cur_mss; |
2340 | int err; | ||
2331 | 2341 | ||
2332 | /* Inconslusive MTU probe */ | 2342 | /* Inconslusive MTU probe */ |
2333 | if (icsk->icsk_mtup.probe_size) { | 2343 | if (icsk->icsk_mtup.probe_size) { |
@@ -2391,11 +2401,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
2391 | skb_headroom(skb) >= 0xFFFF)) { | 2401 | skb_headroom(skb) >= 0xFFFF)) { |
2392 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, | 2402 | struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, |
2393 | GFP_ATOMIC); | 2403 | GFP_ATOMIC); |
2394 | return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : | 2404 | err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : |
2395 | -ENOBUFS; | 2405 | -ENOBUFS; |
2396 | } else { | 2406 | } else { |
2397 | return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); | 2407 | err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); |
2398 | } | 2408 | } |
2409 | |||
2410 | if (likely(!err)) | ||
2411 | TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS; | ||
2412 | return err; | ||
2399 | } | 2413 | } |
2400 | 2414 | ||
2401 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | 2415 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
@@ -2899,7 +2913,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) | |||
2899 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - | 2913 | space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) - |
2900 | MAX_TCP_OPTION_SPACE; | 2914 | MAX_TCP_OPTION_SPACE; |
2901 | 2915 | ||
2902 | syn_data = skb_copy_expand(syn, skb_headroom(syn), space, | 2916 | space = min_t(size_t, space, fo->size); |
2917 | |||
2918 | /* limit to order-0 allocations */ | ||
2919 | space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER)); | ||
2920 | |||
2921 | syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space, | ||
2903 | sk->sk_allocation); | 2922 | sk->sk_allocation); |
2904 | if (syn_data == NULL) | 2923 | if (syn_data == NULL) |
2905 | goto fallback; | 2924 | goto fallback; |