aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c65
1 files changed, 20 insertions, 45 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1c839c99114c..478909f4694d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -739,8 +739,10 @@ static void tcp_tsq_handler(struct sock *sk)
739 struct tcp_sock *tp = tcp_sk(sk); 739 struct tcp_sock *tp = tcp_sk(sk);
740 740
741 if (tp->lost_out > tp->retrans_out && 741 if (tp->lost_out > tp->retrans_out &&
742 tp->snd_cwnd > tcp_packets_in_flight(tp)) 742 tp->snd_cwnd > tcp_packets_in_flight(tp)) {
743 tcp_mstamp_refresh(tp);
743 tcp_xmit_retransmit_queue(sk); 744 tcp_xmit_retransmit_queue(sk);
745 }
744 746
745 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle, 747 tcp_write_xmit(sk, tcp_current_mss(sk), tp->nonagle,
746 0, GFP_ATOMIC); 748 0, GFP_ATOMIC);
@@ -1806,40 +1808,6 @@ static bool tcp_snd_wnd_test(const struct tcp_sock *tp,
1806 return !after(end_seq, tcp_wnd_end(tp)); 1808 return !after(end_seq, tcp_wnd_end(tp));
1807} 1809}
1808 1810
1809/* This checks if the data bearing packet SKB (usually tcp_send_head(sk))
1810 * should be put on the wire right now. If so, it returns the number of
1811 * packets allowed by the congestion window.
1812 */
1813static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb,
1814 unsigned int cur_mss, int nonagle)
1815{
1816 const struct tcp_sock *tp = tcp_sk(sk);
1817 unsigned int cwnd_quota;
1818
1819 tcp_init_tso_segs(skb, cur_mss);
1820
1821 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle))
1822 return 0;
1823
1824 cwnd_quota = tcp_cwnd_test(tp, skb);
1825 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss))
1826 cwnd_quota = 0;
1827
1828 return cwnd_quota;
1829}
1830
1831/* Test if sending is allowed right now. */
1832bool tcp_may_send_now(struct sock *sk)
1833{
1834 const struct tcp_sock *tp = tcp_sk(sk);
1835 struct sk_buff *skb = tcp_send_head(sk);
1836
1837 return skb &&
1838 tcp_snd_test(sk, skb, tcp_current_mss(sk),
1839 (tcp_skb_is_last(sk, skb) ?
1840 tp->nonagle : TCP_NAGLE_PUSH));
1841}
1842
1843/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet 1811/* Trim TSO SKB to LEN bytes, put the remaining data into a new packet
1844 * which is put after SKB on the list. It is very much like 1812 * which is put after SKB on the list. It is very much like
1845 * tcp_fragment() except that it may make several kinds of assumptions 1813 * tcp_fragment() except that it may make several kinds of assumptions
@@ -2094,6 +2062,7 @@ static int tcp_mtu_probe(struct sock *sk)
2094 nskb->ip_summed = skb->ip_summed; 2062 nskb->ip_summed = skb->ip_summed;
2095 2063
2096 tcp_insert_write_queue_before(nskb, skb, sk); 2064 tcp_insert_write_queue_before(nskb, skb, sk);
2065 tcp_highest_sack_replace(sk, skb, nskb);
2097 2066
2098 len = 0; 2067 len = 0;
2099 tcp_for_write_queue_from_safe(skb, next, sk) { 2068 tcp_for_write_queue_from_safe(skb, next, sk) {
@@ -2271,6 +2240,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2271 2240
2272 sent_pkts = 0; 2241 sent_pkts = 0;
2273 2242
2243 tcp_mstamp_refresh(tp);
2274 if (!push_one) { 2244 if (!push_one) {
2275 /* Do MTU probing. */ 2245 /* Do MTU probing. */
2276 result = tcp_mtu_probe(sk); 2246 result = tcp_mtu_probe(sk);
@@ -2282,7 +2252,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2282 } 2252 }
2283 2253
2284 max_segs = tcp_tso_segs(sk, mss_now); 2254 max_segs = tcp_tso_segs(sk, mss_now);
2285 tcp_mstamp_refresh(tp);
2286 while ((skb = tcp_send_head(sk))) { 2255 while ((skb = tcp_send_head(sk))) {
2287 unsigned int limit; 2256 unsigned int limit;
2288 2257
@@ -2697,7 +2666,7 @@ static bool tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
2697 else if (!skb_shift(skb, next_skb, next_skb_size)) 2666 else if (!skb_shift(skb, next_skb, next_skb_size))
2698 return false; 2667 return false;
2699 } 2668 }
2700 tcp_highest_sack_combine(sk, next_skb, skb); 2669 tcp_highest_sack_replace(sk, next_skb, skb);
2701 2670
2702 tcp_unlink_write_queue(next_skb, sk); 2671 tcp_unlink_write_queue(next_skb, sk);
2703 2672
@@ -2875,8 +2844,10 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
2875 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); 2844 nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
2876 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : 2845 err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
2877 -ENOBUFS; 2846 -ENOBUFS;
2878 if (!err) 2847 if (!err) {
2879 skb->skb_mstamp = tp->tcp_mstamp; 2848 skb->skb_mstamp = tp->tcp_mstamp;
2849 tcp_rate_skb_sent(sk, skb);
2850 }
2880 } else { 2851 } else {
2881 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2852 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2882 } 2853 }
@@ -3209,13 +3180,8 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
3209 th->source = htons(ireq->ir_num); 3180 th->source = htons(ireq->ir_num);
3210 th->dest = ireq->ir_rmt_port; 3181 th->dest = ireq->ir_rmt_port;
3211 skb->mark = ireq->ir_mark; 3182 skb->mark = ireq->ir_mark;
3212 /* Setting of flags are superfluous here for callers (and ECE is 3183 skb->ip_summed = CHECKSUM_PARTIAL;
3213 * not even correctly set) 3184 th->seq = htonl(tcp_rsk(req)->snt_isn);
3214 */
3215 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn,
3216 TCPHDR_SYN | TCPHDR_ACK);
3217
3218 th->seq = htonl(TCP_SKB_CB(skb)->seq);
3219 /* XXX data is queued and acked as is. No buffer/window check */ 3185 /* XXX data is queued and acked as is. No buffer/window check */
3220 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt); 3186 th->ack_seq = htonl(tcp_rsk(req)->rcv_nxt);
3221 3187
@@ -3423,6 +3389,10 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
3423 goto done; 3389 goto done;
3424 } 3390 }
3425 3391
3392 /* data was not sent, this is our new send_head */
3393 sk->sk_send_head = syn_data;
3394 tp->packets_out -= tcp_skb_pcount(syn_data);
3395
3426fallback: 3396fallback:
3427 /* Send a regular SYN with Fast Open cookie request option */ 3397 /* Send a regular SYN with Fast Open cookie request option */
3428 if (fo->cookie.len > 0) 3398 if (fo->cookie.len > 0)
@@ -3475,6 +3445,11 @@ int tcp_connect(struct sock *sk)
3475 */ 3445 */
3476 tp->snd_nxt = tp->write_seq; 3446 tp->snd_nxt = tp->write_seq;
3477 tp->pushed_seq = tp->write_seq; 3447 tp->pushed_seq = tp->write_seq;
3448 buff = tcp_send_head(sk);
3449 if (unlikely(buff)) {
3450 tp->snd_nxt = TCP_SKB_CB(buff)->seq;
3451 tp->pushed_seq = TCP_SKB_CB(buff)->seq;
3452 }
3478 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS); 3453 TCP_INC_STATS(sock_net(sk), TCP_MIB_ACTIVEOPENS);
3479 3454
3480 /* Timer for repeating the SYN until an answer. */ 3455 /* Timer for repeating the SYN until an answer. */