diff options
| author | Paul Moore <pmoore@redhat.com> | 2014-01-28 14:44:16 -0500 | 
|---|---|---|
| committer | Paul Moore <pmoore@redhat.com> | 2014-02-05 10:39:48 -0500 | 
| commit | 825e587af2e90e9b953849f3347a01d8f383d577 (patch) | |
| tree | e48942a05882da47544e179c6a0c920e00137a6a /net/ipv4/tcp_output.c | |
| parent | 8ed814602876bec9bad2649ca17f34b499357a1c (diff) | |
| parent | d8ec26d7f8287f5788a494f56e8814210f0e64be (diff) | |
Merge tag 'v3.13' into stable-3.14
Linux 3.13
Conflicts:
	security/selinux/hooks.c
Trivial merge issue in selinux_inet_conn_request() likely due to me
including patches that I sent to the stable folks in my next tree
resulting in the patch hitting twice (I think).  Thankfully it was an
easy fix this time, but regardless, lesson learned, I will not do that
again.
Diffstat (limited to 'net/ipv4/tcp_output.c')
| -rw-r--r-- | net/ipv4/tcp_output.c | 40 | 
1 files changed, 14 insertions, 26 deletions
| diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index d46f2143305c..7820f3a7dd70 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
| @@ -850,15 +850,15 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
| 850 | 850 | ||
| 851 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | 851 | BUG_ON(!skb || !tcp_skb_pcount(skb)); | 
| 852 | 852 | ||
| 853 | /* If congestion control is doing timestamping, we must | 853 | if (clone_it) { | 
| 854 | * take such a timestamp before we potentially clone/copy. | ||
| 855 | */ | ||
| 856 | if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) | ||
| 857 | __net_timestamp(skb); | ||
| 858 | |||
| 859 | if (likely(clone_it)) { | ||
| 860 | const struct sk_buff *fclone = skb + 1; | 854 | const struct sk_buff *fclone = skb + 1; | 
| 861 | 855 | ||
| 856 | /* If congestion control is doing timestamping, we must | ||
| 857 | * take such a timestamp before we potentially clone/copy. | ||
| 858 | */ | ||
| 859 | if (icsk->icsk_ca_ops->flags & TCP_CONG_RTT_STAMP) | ||
| 860 | __net_timestamp(skb); | ||
| 861 | |||
| 862 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | 862 | if (unlikely(skb->fclone == SKB_FCLONE_ORIG && | 
| 863 | fclone->fclone == SKB_FCLONE_CLONE)) | 863 | fclone->fclone == SKB_FCLONE_CLONE)) | 
| 864 | NET_INC_STATS_BH(sock_net(sk), | 864 | NET_INC_STATS_BH(sock_net(sk), | 
| @@ -1875,8 +1875,12 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, | |||
| 1875 | * - better RTT estimation and ACK scheduling | 1875 | * - better RTT estimation and ACK scheduling | 
| 1876 | * - faster recovery | 1876 | * - faster recovery | 
| 1877 | * - high rates | 1877 | * - high rates | 
| 1878 | * Alas, some drivers / subsystems require a fair amount | ||
| 1879 | * of queued bytes to ensure line rate. | ||
| 1880 | * One example is wifi aggregation (802.11 AMPDU) | ||
| 1878 | */ | 1881 | */ | 
| 1879 | limit = max(skb->truesize, sk->sk_pacing_rate >> 10); | 1882 | limit = max_t(unsigned int, sysctl_tcp_limit_output_bytes, | 
| 1883 | sk->sk_pacing_rate >> 10); | ||
| 1880 | 1884 | ||
| 1881 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { | 1885 | if (atomic_read(&sk->sk_wmem_alloc) > limit) { | 
| 1882 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | 1886 | set_bit(TSQ_THROTTLED, &tp->tsq_flags); | 
| @@ -2353,21 +2357,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
| 2353 | 2357 | ||
| 2354 | tcp_retrans_try_collapse(sk, skb, cur_mss); | 2358 | tcp_retrans_try_collapse(sk, skb, cur_mss); | 
| 2355 | 2359 | ||
| 2356 | /* Some Solaris stacks overoptimize and ignore the FIN on a | ||
| 2357 | * retransmit when old data is attached. So strip it off | ||
| 2358 | * since it is cheap to do so and saves bytes on the network. | ||
| 2359 | */ | ||
| 2360 | if (skb->len > 0 && | ||
| 2361 | (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && | ||
| 2362 | tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { | ||
| 2363 | if (!pskb_trim(skb, 0)) { | ||
| 2364 | /* Reuse, even though it does some unnecessary work */ | ||
| 2365 | tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, | ||
| 2366 | TCP_SKB_CB(skb)->tcp_flags); | ||
| 2367 | skb->ip_summed = CHECKSUM_NONE; | ||
| 2368 | } | ||
| 2369 | } | ||
| 2370 | |||
| 2371 | /* Make a copy, if the first transmission SKB clone we made | 2360 | /* Make a copy, if the first transmission SKB clone we made | 
| 2372 | * is still in somebody's hands, else make a clone. | 2361 | * is still in somebody's hands, else make a clone. | 
| 2373 | */ | 2362 | */ | 
| @@ -2736,8 +2725,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
| 2736 | th->syn = 1; | 2725 | th->syn = 1; | 
| 2737 | th->ack = 1; | 2726 | th->ack = 1; | 
| 2738 | TCP_ECN_make_synack(req, th); | 2727 | TCP_ECN_make_synack(req, th); | 
| 2739 | th->source = ireq->loc_port; | 2728 | th->source = htons(ireq->ir_num); | 
| 2740 | th->dest = ireq->rmt_port; | 2729 | th->dest = ireq->ir_rmt_port; | 
| 2741 | /* Setting of flags are superfluous here for callers (and ECE is | 2730 | /* Setting of flags are superfluous here for callers (and ECE is | 
| 2742 | * not even correctly set) | 2731 | * not even correctly set) | 
| 2743 | */ | 2732 | */ | 
| @@ -3108,7 +3097,6 @@ void tcp_send_window_probe(struct sock *sk) | |||
| 3108 | { | 3097 | { | 
| 3109 | if (sk->sk_state == TCP_ESTABLISHED) { | 3098 | if (sk->sk_state == TCP_ESTABLISHED) { | 
| 3110 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; | 3099 | tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; | 
| 3111 | tcp_sk(sk)->snd_nxt = tcp_sk(sk)->write_seq; | ||
| 3112 | tcp_xmit_probe_skb(sk, 0); | 3100 | tcp_xmit_probe_skb(sk, 0); | 
| 3113 | } | 3101 | } | 
| 3114 | } | 3102 | } | 
