aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c42
1 files changed, 39 insertions, 3 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 179b51e6bda3..5a7c41fbc6d3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -800,7 +800,7 @@ void tcp_release_cb(struct sock *sk)
800 __sock_put(sk); 800 __sock_put(sk);
801 } 801 }
802 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) { 802 if (flags & (1UL << TCP_MTU_REDUCED_DEFERRED)) {
803 sk->sk_prot->mtu_reduced(sk); 803 inet_csk(sk)->icsk_af_ops->mtu_reduced(sk);
804 __sock_put(sk); 804 __sock_put(sk);
805 } 805 }
806} 806}
@@ -916,6 +916,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
916 skb_orphan(skb); 916 skb_orphan(skb);
917 skb->sk = sk; 917 skb->sk = sk;
918 skb->destructor = tcp_wfree; 918 skb->destructor = tcp_wfree;
919 skb_set_hash_from_sk(skb, sk);
919 atomic_add(skb->truesize, &sk->sk_wmem_alloc); 920 atomic_add(skb->truesize, &sk->sk_wmem_alloc);
920 921
921 /* Build TCP header and checksum it. */ 922 /* Build TCP header and checksum it. */
@@ -978,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
978 if (likely(err <= 0)) 979 if (likely(err <= 0))
979 return err; 980 return err;
980 981
981 tcp_enter_cwr(sk, 1); 982 tcp_enter_cwr(sk);
982 983
983 return net_xmit_eval(err); 984 return net_xmit_eval(err);
984} 985}
@@ -1068,6 +1069,21 @@ static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int de
1068 tcp_verify_left_out(tp); 1069 tcp_verify_left_out(tp);
1069} 1070}
1070 1071
1072static void tcp_fragment_tstamp(struct sk_buff *skb, struct sk_buff *skb2)
1073{
1074 struct skb_shared_info *shinfo = skb_shinfo(skb);
1075
1076 if (unlikely(shinfo->tx_flags & SKBTX_ANY_TSTAMP) &&
1077 !before(shinfo->tskey, TCP_SKB_CB(skb2)->seq)) {
1078 struct skb_shared_info *shinfo2 = skb_shinfo(skb2);
1079 u8 tsflags = shinfo->tx_flags & SKBTX_ANY_TSTAMP;
1080
1081 shinfo->tx_flags &= ~tsflags;
1082 shinfo2->tx_flags |= tsflags;
1083 swap(shinfo->tskey, shinfo2->tskey);
1084 }
1085}
1086
1071/* Function to create two new TCP segments. Shrinks the given segment 1087/* Function to create two new TCP segments. Shrinks the given segment
1072 * to the specified size and appends a new segment with the rest of the 1088 * to the specified size and appends a new segment with the rest of the
1073 * packet to the list. This won't be called frequently, I hope. 1089 * packet to the list. This won't be called frequently, I hope.
@@ -1135,6 +1151,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
1135 */ 1151 */
1136 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; 1152 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
1137 buff->tstamp = skb->tstamp; 1153 buff->tstamp = skb->tstamp;
1154 tcp_fragment_tstamp(skb, buff);
1138 1155
1139 old_factor = tcp_skb_pcount(skb); 1156 old_factor = tcp_skb_pcount(skb);
1140 1157
@@ -1651,6 +1668,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1651 1668
1652 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; 1669 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL;
1653 skb_split(skb, buff, len); 1670 skb_split(skb, buff, len);
1671 tcp_fragment_tstamp(skb, buff);
1654 1672
1655 /* Fix up tso_factor for both original and new SKB. */ 1673 /* Fix up tso_factor for both original and new SKB. */
1656 tcp_set_skb_tso_segs(sk, skb, mss_now); 1674 tcp_set_skb_tso_segs(sk, skb, mss_now);
@@ -1916,8 +1934,11 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
1916 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); 1934 tso_segs = tcp_init_tso_segs(sk, skb, mss_now);
1917 BUG_ON(!tso_segs); 1935 BUG_ON(!tso_segs);
1918 1936
1919 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) 1937 if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
1938 /* "when" is used as a start point for the retransmit timer */
1939 TCP_SKB_CB(skb)->when = tcp_time_stamp;
1920 goto repair; /* Skip network transmission */ 1940 goto repair; /* Skip network transmission */
1941 }
1921 1942
1922 cwnd_quota = tcp_cwnd_test(tp, skb); 1943 cwnd_quota = tcp_cwnd_test(tp, skb);
1923 if (!cwnd_quota) { 1944 if (!cwnd_quota) {
@@ -3301,3 +3322,18 @@ void tcp_send_probe0(struct sock *sk)
3301 TCP_RTO_MAX); 3322 TCP_RTO_MAX);
3302 } 3323 }
3303} 3324}
3325
3326int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
3327{
3328 const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
3329 struct flowi fl;
3330 int res;
3331
3332 res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
3333 if (!res) {
3334 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
3335 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
3336 }
3337 return res;
3338}
3339EXPORT_SYMBOL(tcp_rtx_synack);