aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c126
1 files changed, 68 insertions, 58 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 5a7c41fbc6d3..becd98ce9a1c 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -318,36 +318,47 @@ static u16 tcp_select_window(struct sock *sk)
318} 318}
319 319
320/* Packet ECN state for a SYN-ACK */ 320/* Packet ECN state for a SYN-ACK */
321static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) 321static void tcp_ecn_send_synack(struct sock *sk, struct sk_buff *skb)
322{ 322{
323 const struct tcp_sock *tp = tcp_sk(sk);
324
323 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; 325 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR;
324 if (!(tp->ecn_flags & TCP_ECN_OK)) 326 if (!(tp->ecn_flags & TCP_ECN_OK))
325 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; 327 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE;
328 else if (tcp_ca_needs_ecn(sk))
329 INET_ECN_xmit(sk);
326} 330}
327 331
328/* Packet ECN state for a SYN. */ 332/* Packet ECN state for a SYN. */
329static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) 333static void tcp_ecn_send_syn(struct sock *sk, struct sk_buff *skb)
330{ 334{
331 struct tcp_sock *tp = tcp_sk(sk); 335 struct tcp_sock *tp = tcp_sk(sk);
332 336
333 tp->ecn_flags = 0; 337 tp->ecn_flags = 0;
334 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1) { 338 if (sock_net(sk)->ipv4.sysctl_tcp_ecn == 1 ||
339 tcp_ca_needs_ecn(sk)) {
335 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; 340 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR;
336 tp->ecn_flags = TCP_ECN_OK; 341 tp->ecn_flags = TCP_ECN_OK;
342 if (tcp_ca_needs_ecn(sk))
343 INET_ECN_xmit(sk);
337 } 344 }
338} 345}
339 346
340static __inline__ void 347static void
341TCP_ECN_make_synack(const struct request_sock *req, struct tcphdr *th) 348tcp_ecn_make_synack(const struct request_sock *req, struct tcphdr *th,
349 struct sock *sk)
342{ 350{
343 if (inet_rsk(req)->ecn_ok) 351 if (inet_rsk(req)->ecn_ok) {
344 th->ece = 1; 352 th->ece = 1;
353 if (tcp_ca_needs_ecn(sk))
354 INET_ECN_xmit(sk);
355 }
345} 356}
346 357
347/* Set up ECN state for a packet on a ESTABLISHED socket that is about to 358/* Set up ECN state for a packet on a ESTABLISHED socket that is about to
348 * be sent. 359 * be sent.
349 */ 360 */
350static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, 361static void tcp_ecn_send(struct sock *sk, struct sk_buff *skb,
351 int tcp_header_len) 362 int tcp_header_len)
352{ 363{
353 struct tcp_sock *tp = tcp_sk(sk); 364 struct tcp_sock *tp = tcp_sk(sk);
@@ -362,7 +373,7 @@ static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
362 tcp_hdr(skb)->cwr = 1; 373 tcp_hdr(skb)->cwr = 1;
363 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; 374 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
364 } 375 }
365 } else { 376 } else if (!tcp_ca_needs_ecn(sk)) {
366 /* ACK or retransmitted segment: clear ECT|CE */ 377 /* ACK or retransmitted segment: clear ECT|CE */
367 INET_ECN_dontxmit(sk); 378 INET_ECN_dontxmit(sk);
368 } 379 }
@@ -384,7 +395,7 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags)
384 TCP_SKB_CB(skb)->tcp_flags = flags; 395 TCP_SKB_CB(skb)->tcp_flags = flags;
385 TCP_SKB_CB(skb)->sacked = 0; 396 TCP_SKB_CB(skb)->sacked = 0;
386 397
387 shinfo->gso_segs = 1; 398 tcp_skb_pcount_set(skb, 1);
388 shinfo->gso_size = 0; 399 shinfo->gso_size = 0;
389 shinfo->gso_type = 0; 400 shinfo->gso_type = 0;
390 401
@@ -550,7 +561,7 @@ static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb,
550 561
551 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) { 562 if (likely(sysctl_tcp_timestamps && *md5 == NULL)) {
552 opts->options |= OPTION_TS; 563 opts->options |= OPTION_TS;
553 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; 564 opts->tsval = tcp_skb_timestamp(skb) + tp->tsoffset;
554 opts->tsecr = tp->rx_opt.ts_recent; 565 opts->tsecr = tp->rx_opt.ts_recent;
555 remaining -= TCPOLEN_TSTAMP_ALIGNED; 566 remaining -= TCPOLEN_TSTAMP_ALIGNED;
556 } 567 }
@@ -618,7 +629,7 @@ static unsigned int tcp_synack_options(struct sock *sk,
618 } 629 }
619 if (likely(ireq->tstamp_ok)) { 630 if (likely(ireq->tstamp_ok)) {
620 opts->options |= OPTION_TS; 631 opts->options |= OPTION_TS;
621 opts->tsval = TCP_SKB_CB(skb)->when; 632 opts->tsval = tcp_skb_timestamp(skb);
622 opts->tsecr = req->ts_recent; 633 opts->tsecr = req->ts_recent;
623 remaining -= TCPOLEN_TSTAMP_ALIGNED; 634 remaining -= TCPOLEN_TSTAMP_ALIGNED;
624 } 635 }
@@ -647,7 +658,6 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
647 struct tcp_out_options *opts, 658 struct tcp_out_options *opts,
648 struct tcp_md5sig_key **md5) 659 struct tcp_md5sig_key **md5)
649{ 660{
650 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
651 struct tcp_sock *tp = tcp_sk(sk); 661 struct tcp_sock *tp = tcp_sk(sk);
652 unsigned int size = 0; 662 unsigned int size = 0;
653 unsigned int eff_sacks; 663 unsigned int eff_sacks;
@@ -666,7 +676,7 @@ static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb
666 676
667 if (likely(tp->rx_opt.tstamp_ok)) { 677 if (likely(tp->rx_opt.tstamp_ok)) {
668 opts->options |= OPTION_TS; 678 opts->options |= OPTION_TS;
669 opts->tsval = tcb ? tcb->when + tp->tsoffset : 0; 679 opts->tsval = skb ? tcp_skb_timestamp(skb) + tp->tsoffset : 0;
670 opts->tsecr = tp->rx_opt.ts_recent; 680 opts->tsecr = tp->rx_opt.ts_recent;
671 size += TCPOLEN_TSTAMP_ALIGNED; 681 size += TCPOLEN_TSTAMP_ALIGNED;
672 } 682 }
@@ -842,7 +852,7 @@ void tcp_wfree(struct sk_buff *skb)
842 852
843 /* queue this socket to tasklet queue */ 853 /* queue this socket to tasklet queue */
844 local_irq_save(flags); 854 local_irq_save(flags);
845 tsq = &__get_cpu_var(tsq_tasklet); 855 tsq = this_cpu_ptr(&tsq_tasklet);
846 list_add(&tp->tsq_node, &tsq->head); 856 list_add(&tp->tsq_node, &tsq->head);
847 tasklet_schedule(&tsq->tasklet); 857 tasklet_schedule(&tsq->tasklet);
848 local_irq_restore(flags); 858 local_irq_restore(flags);
@@ -886,8 +896,6 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
886 skb = skb_clone(skb, gfp_mask); 896 skb = skb_clone(skb, gfp_mask);
887 if (unlikely(!skb)) 897 if (unlikely(!skb))
888 return -ENOBUFS; 898 return -ENOBUFS;
889 /* Our usage of tstamp should remain private */
890 skb->tstamp.tv64 = 0;
891 } 899 }
892 900
893 inet = inet_sk(sk); 901 inet = inet_sk(sk);
@@ -952,7 +960,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
952 960
953 tcp_options_write((__be32 *)(th + 1), tp, &opts); 961 tcp_options_write((__be32 *)(th + 1), tp, &opts);
954 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0)) 962 if (likely((tcb->tcp_flags & TCPHDR_SYN) == 0))
955 TCP_ECN_send(sk, skb, tcp_header_size); 963 tcp_ecn_send(sk, skb, tcp_header_size);
956 964
957#ifdef CONFIG_TCP_MD5SIG 965#ifdef CONFIG_TCP_MD5SIG
958 /* Calculate the MD5 hash, as we have all we need now */ 966 /* Calculate the MD5 hash, as we have all we need now */
@@ -975,7 +983,18 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
975 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, 983 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS,
976 tcp_skb_pcount(skb)); 984 tcp_skb_pcount(skb));
977 985
986 /* OK, its time to fill skb_shinfo(skb)->gso_segs */
987 skb_shinfo(skb)->gso_segs = tcp_skb_pcount(skb);
988
989 /* Our usage of tstamp should remain private */
990 skb->tstamp.tv64 = 0;
991
992 /* Cleanup our debris for IP stacks */
993 memset(skb->cb, 0, max(sizeof(struct inet_skb_parm),
994 sizeof(struct inet6_skb_parm)));
995
978 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl); 996 err = icsk->icsk_af_ops->queue_xmit(sk, skb, &inet->cork.fl);
997
979 if (likely(err <= 0)) 998 if (likely(err <= 0))
980 return err; 999 return err;
981 1000
@@ -995,7 +1014,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
995 1014
996 /* Advance write_seq and place onto the write_queue. */ 1015 /* Advance write_seq and place onto the write_queue. */
997 tp->write_seq = TCP_SKB_CB(skb)->end_seq; 1016 tp->write_seq = TCP_SKB_CB(skb)->end_seq;
998 skb_header_release(skb); 1017 __skb_header_release(skb);
999 tcp_add_write_queue_tail(sk, skb); 1018 tcp_add_write_queue_tail(sk, skb);
1000 sk->sk_wmem_queued += skb->truesize; 1019 sk->sk_wmem_queued += skb->truesize;
1001 sk_mem_charge(sk, skb->truesize); 1020 sk_mem_charge(sk, skb->truesize);
@@ -1014,11 +1033,11 @@ static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb,
1014 /* Avoid the costly divide in the normal 1033 /* Avoid the costly divide in the normal
1015 * non-TSO case. 1034 * non-TSO case.
1016 */ 1035 */
1017 shinfo->gso_segs = 1; 1036 tcp_skb_pcount_set(skb, 1);
1018 shinfo->gso_size = 0; 1037 shinfo->gso_size = 0;
1019 shinfo->gso_type = 0; 1038 shinfo->gso_type = 0;
1020 } else { 1039 } else {
1021 shinfo->gso_segs = DIV_ROUND_UP(skb->len, mss_now); 1040 tcp_skb_pcount_set(skb, DIV_ROUND_UP(skb->len, mss_now));
1022 shinfo->gso_size = mss_now; 1041 shinfo->gso_size = mss_now;
1023 shinfo->gso_type = sk->sk_gso_type; 1042 shinfo->gso_type = sk->sk_gso_type;
1024 } 1043 }
@@ -1146,10 +1165,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1146 1165
1147 buff->ip_summed = skb->ip_summed; 1166 buff->ip_summed = skb->ip_summed;
1148 1167
1149 /* Looks stupid, but our code really uses when of
1150 * skbs, which it never sent before. --ANK
1151 */
1152 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1153 buff->tstamp = skb->tstamp; 1168 buff->tstamp = skb->tstamp;
1154 tcp_fragment_tstamp(skb, buff); 1169 tcp_fragment_tstamp(skb, buff);
1155 1170
@@ -1171,7 +1186,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1171 } 1186 }
1172 1187
1173 /* Link BUFF into the send queue. */ 1188 /* Link BUFF into the send queue. */
1174 skb_header_release(buff); 1189 __skb_header_release(buff);
1175 tcp_insert_write_queue_after(skb, buff, sk); 1190 tcp_insert_write_queue_after(skb, buff, sk);
1176 1191
1177 return 0; 1192 return 0;
@@ -1675,7 +1690,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1675 tcp_set_skb_tso_segs(sk, buff, mss_now); 1690 tcp_set_skb_tso_segs(sk, buff, mss_now);
1676 1691
1677 /* Link BUFF into the send queue. */ 1692 /* Link BUFF into the send queue. */
1678 skb_header_release(buff); 1693 __skb_header_release(buff);
1679 tcp_insert_write_queue_after(skb, buff, sk); 1694 tcp_insert_write_queue_after(skb, buff, sk);
1680 1695
1681 return 0; 1696 return 0;
@@ -1874,8 +1889,8 @@ static int tcp_mtu_probe(struct sock *sk)
1874 tcp_init_tso_segs(sk, nskb, nskb->len); 1889 tcp_init_tso_segs(sk, nskb, nskb->len);
1875 1890
1876 /* We're ready to send. If this fails, the probe will 1891 /* We're ready to send. If this fails, the probe will
1877 * be resegmented into mss-sized pieces by tcp_write_xmit(). */ 1892 * be resegmented into mss-sized pieces by tcp_write_xmit().
1878 TCP_SKB_CB(nskb)->when = tcp_time_stamp; 1893 */
1879 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) { 1894 if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
1880 /* Decrement cwnd here because we are sending 1895 /* Decrement cwnd here because we are sending
1881 * effectively two packets. */ 1896 * effectively two packets. */
@@ -1935,8 +1950,8 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1935 BUG_ON(!tso_segs); 1950 BUG_ON(!tso_segs);
1936 1951
1937 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { 1952 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
1938 /* "when" is used as a start point for the retransmit timer */ 1953 /* "skb_mstamp" is used as a start point for the retransmit timer */
1939 TCP_SKB_CB(skb)->when = tcp_time_stamp; 1954 skb_mstamp_get(&skb->skb_mstamp);
1940 goto repair; /* Skip network transmission */ 1955 goto repair; /* Skip network transmission */
1941 } 1956 }
1942 1957
@@ -2000,8 +2015,6 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
2000 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) 2015 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
2001 break; 2016 break;
2002 2017
2003 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2004
2005 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) 2018 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp)))
2006 break; 2019 break;
2007 2020
@@ -2097,10 +2110,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2097static bool skb_still_in_host_queue(const struct sock *sk, 2110static bool skb_still_in_host_queue(const struct sock *sk,
2098 const struct sk_buff *skb) 2111 const struct sk_buff *skb)
2099{ 2112{
2100 const struct sk_buff *fclone = skb + 1; 2113 if (unlikely(skb_fclone_busy(skb))) {
2101
2102 if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
2103 fclone->fclone == SKB_FCLONE_CLONE)) {
2104 NET_INC_STATS_BH(sock_net(sk), 2114 NET_INC_STATS_BH(sock_net(sk),
2105 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 2115 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2106 return true; 2116 return true;
@@ -2499,7 +2509,6 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2499 /* Make a copy, if the first transmission SKB clone we made 2509 /* Make a copy, if the first transmission SKB clone we made
2500 * is still in somebody's hands, else make a clone. 2510 * is still in somebody's hands, else make a clone.
2501 */ 2511 */
2502 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2503 2512
2504 /* make sure skb->data is aligned on arches that require it 2513 /* make sure skb->data is aligned on arches that require it
2505 * and check if ack-trimming & collapsing extended the headroom 2514 * and check if ack-trimming & collapsing extended the headroom
@@ -2544,7 +2553,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2544 2553
2545 /* Save stamp of the first retransmit. */ 2554 /* Save stamp of the first retransmit. */
2546 if (!tp->retrans_stamp) 2555 if (!tp->retrans_stamp)
2547 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2556 tp->retrans_stamp = tcp_skb_timestamp(skb);
2548 2557
2549 /* snd_nxt is stored to detect loss of retransmitted segment, 2558 /* snd_nxt is stored to detect loss of retransmitted segment,
2550 * see tcp_input.c tcp_sacktag_write_queue(). 2559 * see tcp_input.c tcp_sacktag_write_queue().
@@ -2752,7 +2761,6 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
2752 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), 2761 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
2753 TCPHDR_ACK | TCPHDR_RST); 2762 TCPHDR_ACK | TCPHDR_RST);
2754 /* Send it off. */ 2763 /* Send it off. */
2755 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2756 if (tcp_transmit_skb(sk, skb, 0, priority)) 2764 if (tcp_transmit_skb(sk, skb, 0, priority))
2757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); 2765 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
2758 2766
@@ -2780,7 +2788,7 @@ int tcp_send_synack(struct sock *sk)
2780 if (nskb == NULL) 2788 if (nskb == NULL)
2781 return -ENOMEM; 2789 return -ENOMEM;
2782 tcp_unlink_write_queue(skb, sk); 2790 tcp_unlink_write_queue(skb, sk);
2783 skb_header_release(nskb); 2791 __skb_header_release(nskb);
2784 __tcp_add_write_queue_head(sk, nskb); 2792 __tcp_add_write_queue_head(sk, nskb);
2785 sk_wmem_free_skb(sk, skb); 2793 sk_wmem_free_skb(sk, skb);
2786 sk->sk_wmem_queued += nskb->truesize; 2794 sk->sk_wmem_queued += nskb->truesize;
@@ -2789,9 +2797,8 @@ int tcp_send_synack(struct sock *sk)
2789 } 2797 }
2790 2798
2791 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; 2799 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK;
2792 TCP_ECN_send_synack(tcp_sk(sk), skb); 2800 tcp_ecn_send_synack(sk, skb);
2793 } 2801 }
2794 TCP_SKB_CB(skb)->when = tcp_time_stamp;
2795 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 2802 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
2796} 2803}
2797 2804
@@ -2835,10 +2842,10 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2835 memset(&opts, 0, sizeof(opts)); 2842 memset(&opts, 0, sizeof(opts));
2836#ifdef CONFIG_SYN_COOKIES 2843#ifdef CONFIG_SYN_COOKIES
2837 if (unlikely(req->cookie_ts)) 2844 if (unlikely(req->cookie_ts))
2838 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); 2845 skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req);
2839 else 2846 else
2840#endif 2847#endif
2841 TCP_SKB_CB(skb)->when = tcp_time_stamp; 2848 skb_mstamp_get(&skb->skb_mstamp);
2842 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, 2849 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5,
2843 foc) + sizeof(*th); 2850 foc) + sizeof(*th);
2844 2851
@@ -2849,7 +2856,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
2849 memset(th, 0, sizeof(struct tcphdr)); 2856 memset(th, 0, sizeof(struct tcphdr));
2850 th->syn = 1; 2857 th->syn = 1;
2851 th->ack = 1; 2858 th->ack = 1;
2852 TCP_ECN_make_synack(req, th); 2859 tcp_ecn_make_synack(req, th, sk);
2853 th->source = htons(ireq->ir_num); 2860 th->source = htons(ireq->ir_num);
2854 th->dest = ireq->ir_rmt_port; 2861 th->dest = ireq->ir_rmt_port;
2855 /* Setting of flags are superfluous here for callers (and ECE is 2862 /* Setting of flags are superfluous here for callers (and ECE is
@@ -2956,7 +2963,7 @@ static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb)
2956 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); 2963 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
2957 2964
2958 tcb->end_seq += skb->len; 2965 tcb->end_seq += skb->len;
2959 skb_header_release(skb); 2966 __skb_header_release(skb);
2960 __tcp_add_write_queue_tail(sk, skb); 2967 __tcp_add_write_queue_tail(sk, skb);
2961 sk->sk_wmem_queued += skb->truesize; 2968 sk->sk_wmem_queued += skb->truesize;
2962 sk_mem_charge(sk, skb->truesize); 2969 sk_mem_charge(sk, skb->truesize);
@@ -3086,9 +3093,9 @@ int tcp_connect(struct sock *sk)
3086 skb_reserve(buff, MAX_TCP_HEADER); 3093 skb_reserve(buff, MAX_TCP_HEADER);
3087 3094
3088 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); 3095 tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
3089 tp->retrans_stamp = TCP_SKB_CB(buff)->when = tcp_time_stamp; 3096 tp->retrans_stamp = tcp_time_stamp;
3090 tcp_connect_queue_skb(sk, buff); 3097 tcp_connect_queue_skb(sk, buff);
3091 TCP_ECN_send_syn(sk, buff); 3098 tcp_ecn_send_syn(sk, buff);
3092 3099
3093 /* Send off SYN; include data in Fast Open. */ 3100 /* Send off SYN; include data in Fast Open. */
3094 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) : 3101 err = tp->fastopen_req ? tcp_send_syn_data(sk, buff) :
@@ -3120,6 +3127,8 @@ void tcp_send_delayed_ack(struct sock *sk)
3120 int ato = icsk->icsk_ack.ato; 3127 int ato = icsk->icsk_ack.ato;
3121 unsigned long timeout; 3128 unsigned long timeout;
3122 3129
3130 tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
3131
3123 if (ato > TCP_DELACK_MIN) { 3132 if (ato > TCP_DELACK_MIN) {
3124 const struct tcp_sock *tp = tcp_sk(sk); 3133 const struct tcp_sock *tp = tcp_sk(sk);
3125 int max_ato = HZ / 2; 3134 int max_ato = HZ / 2;
@@ -3176,6 +3185,8 @@ void tcp_send_ack(struct sock *sk)
3176 if (sk->sk_state == TCP_CLOSE) 3185 if (sk->sk_state == TCP_CLOSE)
3177 return; 3186 return;
3178 3187
3188 tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
3189
3179 /* We are not putting this on the write queue, so 3190 /* We are not putting this on the write queue, so
3180 * tcp_transmit_skb() will set the ownership to this 3191 * tcp_transmit_skb() will set the ownership to this
3181 * sock. 3192 * sock.
@@ -3194,9 +3205,10 @@ void tcp_send_ack(struct sock *sk)
3194 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK); 3205 tcp_init_nondata_skb(buff, tcp_acceptable_seq(sk), TCPHDR_ACK);
3195 3206
3196 /* Send it off, this clears delayed acks for us. */ 3207 /* Send it off, this clears delayed acks for us. */
3197 TCP_SKB_CB(buff)->when = tcp_time_stamp; 3208 skb_mstamp_get(&buff->skb_mstamp);
3198 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC)); 3209 tcp_transmit_skb(sk, buff, 0, sk_gfp_atomic(sk, GFP_ATOMIC));
3199} 3210}
3211EXPORT_SYMBOL_GPL(tcp_send_ack);
3200 3212
3201/* This routine sends a packet with an out of date sequence 3213/* This routine sends a packet with an out of date sequence
3202 * number. It assumes the other end will try to ack it. 3214 * number. It assumes the other end will try to ack it.
@@ -3226,7 +3238,7 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent)
3226 * send it. 3238 * send it.
3227 */ 3239 */
3228 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); 3240 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
3229 TCP_SKB_CB(skb)->when = tcp_time_stamp; 3241 skb_mstamp_get(&skb->skb_mstamp);
3230 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); 3242 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC);
3231} 3243}
3232 3244
@@ -3270,7 +3282,6 @@ int tcp_write_wakeup(struct sock *sk)
3270 tcp_set_skb_tso_segs(sk, skb, mss); 3282 tcp_set_skb_tso_segs(sk, skb, mss);
3271 3283
3272 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; 3284 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
3273 TCP_SKB_CB(skb)->when = tcp_time_stamp;
3274 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); 3285 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
3275 if (!err) 3286 if (!err)
3276 tcp_event_new_data_sent(sk, skb); 3287 tcp_event_new_data_sent(sk, skb);
@@ -3289,6 +3300,7 @@ void tcp_send_probe0(struct sock *sk)
3289{ 3300{
3290 struct inet_connection_sock *icsk = inet_csk(sk); 3301 struct inet_connection_sock *icsk = inet_csk(sk);
3291 struct tcp_sock *tp = tcp_sk(sk); 3302 struct tcp_sock *tp = tcp_sk(sk);
3303 unsigned long probe_max;
3292 int err; 3304 int err;
3293 3305
3294 err = tcp_write_wakeup(sk); 3306 err = tcp_write_wakeup(sk);
@@ -3304,9 +3316,7 @@ void tcp_send_probe0(struct sock *sk)
3304 if (icsk->icsk_backoff < sysctl_tcp_retries2) 3316 if (icsk->icsk_backoff < sysctl_tcp_retries2)
3305 icsk->icsk_backoff++; 3317 icsk->icsk_backoff++;
3306 icsk->icsk_probes_out++; 3318 icsk->icsk_probes_out++;
3307 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3319 probe_max = TCP_RTO_MAX;
3308 min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX),
3309 TCP_RTO_MAX);
3310 } else { 3320 } else {
3311 /* If packet was not sent due to local congestion, 3321 /* If packet was not sent due to local congestion,
3312 * do not backoff and do not remember icsk_probes_out. 3322 * do not backoff and do not remember icsk_probes_out.
@@ -3316,11 +3326,11 @@ void tcp_send_probe0(struct sock *sk)
3316 */ 3326 */
3317 if (!icsk->icsk_probes_out) 3327 if (!icsk->icsk_probes_out)
3318 icsk->icsk_probes_out = 1; 3328 icsk->icsk_probes_out = 1;
3319 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, 3329 probe_max = TCP_RESOURCE_PROBE_INTERVAL;
3320 min(icsk->icsk_rto << icsk->icsk_backoff,
3321 TCP_RESOURCE_PROBE_INTERVAL),
3322 TCP_RTO_MAX);
3323 } 3330 }
3331 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
3332 inet_csk_rto_backoff(icsk, probe_max),
3333 TCP_RTO_MAX);
3324} 3334}
3325 3335
3326int tcp_rtx_synack(struct sock *sk, struct request_sock *req) 3336int tcp_rtx_synack(struct sock *sk, struct request_sock *req)