aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c62
1 files changed, 30 insertions, 32 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e98b57578dc..e43065654930 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -548,10 +548,9 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
548 * To save cycles in the RFC 1323 implementation it was better to break 548 * To save cycles in the RFC 1323 implementation it was better to break
549 * it up into three procedures. -- erics 549 * it up into three procedures. -- erics
550 */ 550 */
551static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt) 551static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
552{ 552{
553 struct tcp_sock *tp = tcp_sk(sk); 553 struct tcp_sock *tp = tcp_sk(sk);
554 const struct inet_connection_sock *icsk = inet_csk(sk);
555 long m = mrtt; /* RTT */ 554 long m = mrtt; /* RTT */
556 555
557 /* The following amusing code comes from Jacobson's 556 /* The following amusing code comes from Jacobson's
@@ -610,9 +609,6 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
610 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN); 609 tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
611 tp->rtt_seq = tp->snd_nxt; 610 tp->rtt_seq = tp->snd_nxt;
612 } 611 }
613
614 if (icsk->icsk_ca_ops->rtt_sample)
615 icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
616} 612}
617 613
618/* Calculate rto without backoff. This is the second half of Van Jacobson's 614/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -1921,7 +1917,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
1921/* Read draft-ietf-tcplw-high-performance before mucking 1917/* Read draft-ietf-tcplw-high-performance before mucking
1922 * with this code. (Superceeds RFC1323) 1918 * with this code. (Superceeds RFC1323)
1923 */ 1919 */
1924static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag) 1920static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
1925{ 1921{
1926 /* RTTM Rule: A TSecr value received in a segment is used to 1922 /* RTTM Rule: A TSecr value received in a segment is used to
1927 * update the averaged RTT measurement only if the segment 1923 * update the averaged RTT measurement only if the segment
@@ -1940,13 +1936,13 @@ static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
1940 */ 1936 */
1941 struct tcp_sock *tp = tcp_sk(sk); 1937 struct tcp_sock *tp = tcp_sk(sk);
1942 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr; 1938 const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
1943 tcp_rtt_estimator(sk, seq_rtt, usrtt); 1939 tcp_rtt_estimator(sk, seq_rtt);
1944 tcp_set_rto(sk); 1940 tcp_set_rto(sk);
1945 inet_csk(sk)->icsk_backoff = 0; 1941 inet_csk(sk)->icsk_backoff = 0;
1946 tcp_bound_rto(sk); 1942 tcp_bound_rto(sk);
1947} 1943}
1948 1944
1949static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag) 1945static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
1950{ 1946{
1951 /* We don't have a timestamp. Can only use 1947 /* We don't have a timestamp. Can only use
1952 * packets that are not retransmitted to determine 1948 * packets that are not retransmitted to determine
@@ -1960,21 +1956,21 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag
1960 if (flag & FLAG_RETRANS_DATA_ACKED) 1956 if (flag & FLAG_RETRANS_DATA_ACKED)
1961 return; 1957 return;
1962 1958
1963 tcp_rtt_estimator(sk, seq_rtt, usrtt); 1959 tcp_rtt_estimator(sk, seq_rtt);
1964 tcp_set_rto(sk); 1960 tcp_set_rto(sk);
1965 inet_csk(sk)->icsk_backoff = 0; 1961 inet_csk(sk)->icsk_backoff = 0;
1966 tcp_bound_rto(sk); 1962 tcp_bound_rto(sk);
1967} 1963}
1968 1964
1969static inline void tcp_ack_update_rtt(struct sock *sk, const int flag, 1965static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
1970 const s32 seq_rtt, u32 *usrtt) 1966 const s32 seq_rtt)
1971{ 1967{
1972 const struct tcp_sock *tp = tcp_sk(sk); 1968 const struct tcp_sock *tp = tcp_sk(sk);
1973 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */ 1969 /* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
1974 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) 1970 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
1975 tcp_ack_saw_tstamp(sk, usrtt, flag); 1971 tcp_ack_saw_tstamp(sk, flag);
1976 else if (seq_rtt >= 0) 1972 else if (seq_rtt >= 0)
1977 tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag); 1973 tcp_ack_no_tstamp(sk, seq_rtt, flag);
1978} 1974}
1979 1975
1980static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, 1976static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
@@ -2054,20 +2050,27 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
2054 return acked; 2050 return acked;
2055} 2051}
2056 2052
2053static inline u32 tcp_usrtt(const struct sk_buff *skb)
2054{
2055 struct timeval tv, now;
2056
2057 do_gettimeofday(&now);
2058 skb_get_timestamp(skb, &tv);
2059 return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
2060}
2057 2061
2058/* Remove acknowledged frames from the retransmission queue. */ 2062/* Remove acknowledged frames from the retransmission queue. */
2059static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt) 2063static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
2060{ 2064{
2061 struct tcp_sock *tp = tcp_sk(sk); 2065 struct tcp_sock *tp = tcp_sk(sk);
2066 const struct inet_connection_sock *icsk = inet_csk(sk);
2062 struct sk_buff *skb; 2067 struct sk_buff *skb;
2063 __u32 now = tcp_time_stamp; 2068 __u32 now = tcp_time_stamp;
2064 int acked = 0; 2069 int acked = 0;
2065 __s32 seq_rtt = -1; 2070 __s32 seq_rtt = -1;
2066 struct timeval usnow;
2067 u32 pkts_acked = 0; 2071 u32 pkts_acked = 0;
2068 2072 void (*rtt_sample)(struct sock *sk, u32 usrtt)
2069 if (seq_usrtt) 2073 = icsk->icsk_ca_ops->rtt_sample;
2070 do_gettimeofday(&usnow);
2071 2074
2072 while ((skb = skb_peek(&sk->sk_write_queue)) && 2075 while ((skb = skb_peek(&sk->sk_write_queue)) &&
2073 skb != sk->sk_send_head) { 2076 skb != sk->sk_send_head) {
@@ -2107,16 +2110,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2107 tp->retrans_out -= tcp_skb_pcount(skb); 2110 tp->retrans_out -= tcp_skb_pcount(skb);
2108 acked |= FLAG_RETRANS_DATA_ACKED; 2111 acked |= FLAG_RETRANS_DATA_ACKED;
2109 seq_rtt = -1; 2112 seq_rtt = -1;
2110 } else if (seq_rtt < 0) 2113 } else if (seq_rtt < 0) {
2111 seq_rtt = now - scb->when; 2114 seq_rtt = now - scb->when;
2112 if (seq_usrtt) { 2115 if (rtt_sample)
2113 struct timeval tv; 2116 (*rtt_sample)(sk, tcp_usrtt(skb));
2114
2115 skb_get_timestamp(skb, &tv);
2116 *seq_usrtt = (usnow.tv_sec - tv.tv_sec) * 1000000
2117 + (usnow.tv_usec - tv.tv_usec);
2118 } 2117 }
2119
2120 if (sacked & TCPCB_SACKED_ACKED) 2118 if (sacked & TCPCB_SACKED_ACKED)
2121 tp->sacked_out -= tcp_skb_pcount(skb); 2119 tp->sacked_out -= tcp_skb_pcount(skb);
2122 if (sacked & TCPCB_LOST) 2120 if (sacked & TCPCB_LOST)
@@ -2126,8 +2124,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2126 !before(scb->end_seq, tp->snd_up)) 2124 !before(scb->end_seq, tp->snd_up))
2127 tp->urg_mode = 0; 2125 tp->urg_mode = 0;
2128 } 2126 }
2129 } else if (seq_rtt < 0) 2127 } else if (seq_rtt < 0) {
2130 seq_rtt = now - scb->when; 2128 seq_rtt = now - scb->when;
2129 if (rtt_sample)
2130 (*rtt_sample)(sk, tcp_usrtt(skb));
2131 }
2131 tcp_dec_pcount_approx(&tp->fackets_out, skb); 2132 tcp_dec_pcount_approx(&tp->fackets_out, skb);
2132 tcp_packets_out_dec(tp, skb); 2133 tcp_packets_out_dec(tp, skb);
2133 __skb_unlink(skb, &sk->sk_write_queue); 2134 __skb_unlink(skb, &sk->sk_write_queue);
@@ -2135,8 +2136,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
2135 } 2136 }
2136 2137
2137 if (acked&FLAG_ACKED) { 2138 if (acked&FLAG_ACKED) {
2138 const struct inet_connection_sock *icsk = inet_csk(sk); 2139 tcp_ack_update_rtt(sk, acked, seq_rtt);
2139 tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
2140 tcp_ack_packets_out(sk, tp); 2140 tcp_ack_packets_out(sk, tp);
2141 2141
2142 if (icsk->icsk_ca_ops->pkts_acked) 2142 if (icsk->icsk_ca_ops->pkts_acked)
@@ -2299,7 +2299,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2299 u32 ack = TCP_SKB_CB(skb)->ack_seq; 2299 u32 ack = TCP_SKB_CB(skb)->ack_seq;
2300 u32 prior_in_flight; 2300 u32 prior_in_flight;
2301 s32 seq_rtt; 2301 s32 seq_rtt;
2302 s32 seq_usrtt = 0;
2303 int prior_packets; 2302 int prior_packets;
2304 2303
2305 /* If the ack is newer than sent or older than previous acks 2304 /* If the ack is newer than sent or older than previous acks
@@ -2352,8 +2351,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
2352 prior_in_flight = tcp_packets_in_flight(tp); 2351 prior_in_flight = tcp_packets_in_flight(tp);
2353 2352
2354 /* See if we can take anything off of the retransmit queue. */ 2353 /* See if we can take anything off of the retransmit queue. */
2355 flag |= tcp_clean_rtx_queue(sk, &seq_rtt, 2354 flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
2356 icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
2357 2355
2358 if (tp->frto_counter) 2356 if (tp->frto_counter)
2359 tcp_process_frto(sk, prior_snd_una); 2357 tcp_process_frto(sk, prior_snd_una);
@@ -4242,7 +4240,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
4242 */ 4240 */
4243 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 4241 if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
4244 !tp->srtt) 4242 !tp->srtt)
4245 tcp_ack_saw_tstamp(sk, NULL, 0); 4243 tcp_ack_saw_tstamp(sk, 0);
4246 4244
4247 if (tp->rx_opt.tstamp_ok) 4245 if (tp->rx_opt.tstamp_ok)
4248 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED; 4246 tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;