aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c219
1 files changed, 28 insertions, 191 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b224eb8bce8..055ac49b8b4 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -93,7 +93,6 @@ int sysctl_tcp_rfc1337 __read_mostly;
93int sysctl_tcp_max_orphans __read_mostly = NR_FILE; 93int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
94int sysctl_tcp_frto __read_mostly = 2; 94int sysctl_tcp_frto __read_mostly = 2;
95int sysctl_tcp_frto_response __read_mostly; 95int sysctl_tcp_frto_response __read_mostly;
96int sysctl_tcp_nometrics_save __read_mostly;
97 96
98int sysctl_tcp_thin_dupack __read_mostly; 97int sysctl_tcp_thin_dupack __read_mostly;
99 98
@@ -701,7 +700,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
701/* Calculate rto without backoff. This is the second half of Van Jacobson's 700/* Calculate rto without backoff. This is the second half of Van Jacobson's
702 * routine referred to above. 701 * routine referred to above.
703 */ 702 */
704static inline void tcp_set_rto(struct sock *sk) 703void tcp_set_rto(struct sock *sk)
705{ 704{
706 const struct tcp_sock *tp = tcp_sk(sk); 705 const struct tcp_sock *tp = tcp_sk(sk);
707 /* Old crap is replaced with new one. 8) 706 /* Old crap is replaced with new one. 8)
@@ -728,109 +727,6 @@ static inline void tcp_set_rto(struct sock *sk)
728 tcp_bound_rto(sk); 727 tcp_bound_rto(sk);
729} 728}
730 729
731/* Save metrics learned by this TCP session.
732 This function is called only, when TCP finishes successfully
733 i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE.
734 */
735void tcp_update_metrics(struct sock *sk)
736{
737 struct tcp_sock *tp = tcp_sk(sk);
738 struct dst_entry *dst = __sk_dst_get(sk);
739
740 if (sysctl_tcp_nometrics_save)
741 return;
742
743 dst_confirm(dst);
744
745 if (dst && (dst->flags & DST_HOST)) {
746 const struct inet_connection_sock *icsk = inet_csk(sk);
747 int m;
748 unsigned long rtt;
749
750 if (icsk->icsk_backoff || !tp->srtt) {
751 /* This session failed to estimate rtt. Why?
752 * Probably, no packets returned in time.
753 * Reset our results.
754 */
755 if (!(dst_metric_locked(dst, RTAX_RTT)))
756 dst_metric_set(dst, RTAX_RTT, 0);
757 return;
758 }
759
760 rtt = dst_metric_rtt(dst, RTAX_RTT);
761 m = rtt - tp->srtt;
762
763 /* If newly calculated rtt larger than stored one,
764 * store new one. Otherwise, use EWMA. Remember,
765 * rtt overestimation is always better than underestimation.
766 */
767 if (!(dst_metric_locked(dst, RTAX_RTT))) {
768 if (m <= 0)
769 set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt);
770 else
771 set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3));
772 }
773
774 if (!(dst_metric_locked(dst, RTAX_RTTVAR))) {
775 unsigned long var;
776 if (m < 0)
777 m = -m;
778
779 /* Scale deviation to rttvar fixed point */
780 m >>= 1;
781 if (m < tp->mdev)
782 m = tp->mdev;
783
784 var = dst_metric_rtt(dst, RTAX_RTTVAR);
785 if (m >= var)
786 var = m;
787 else
788 var -= (var - m) >> 2;
789
790 set_dst_metric_rtt(dst, RTAX_RTTVAR, var);
791 }
792
793 if (tcp_in_initial_slowstart(tp)) {
794 /* Slow start still did not finish. */
795 if (dst_metric(dst, RTAX_SSTHRESH) &&
796 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
797 (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH))
798 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1);
799 if (!dst_metric_locked(dst, RTAX_CWND) &&
800 tp->snd_cwnd > dst_metric(dst, RTAX_CWND))
801 dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd);
802 } else if (tp->snd_cwnd > tp->snd_ssthresh &&
803 icsk->icsk_ca_state == TCP_CA_Open) {
804 /* Cong. avoidance phase, cwnd is reliable. */
805 if (!dst_metric_locked(dst, RTAX_SSTHRESH))
806 dst_metric_set(dst, RTAX_SSTHRESH,
807 max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
808 if (!dst_metric_locked(dst, RTAX_CWND))
809 dst_metric_set(dst, RTAX_CWND,
810 (dst_metric(dst, RTAX_CWND) +
811 tp->snd_cwnd) >> 1);
812 } else {
813 /* Else slow start did not finish, cwnd is non-sense,
814 ssthresh may be also invalid.
815 */
816 if (!dst_metric_locked(dst, RTAX_CWND))
817 dst_metric_set(dst, RTAX_CWND,
818 (dst_metric(dst, RTAX_CWND) +
819 tp->snd_ssthresh) >> 1);
820 if (dst_metric(dst, RTAX_SSTHRESH) &&
821 !dst_metric_locked(dst, RTAX_SSTHRESH) &&
822 tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH))
823 dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh);
824 }
825
826 if (!dst_metric_locked(dst, RTAX_REORDERING)) {
827 if (dst_metric(dst, RTAX_REORDERING) < tp->reordering &&
828 tp->reordering != sysctl_tcp_reordering)
829 dst_metric_set(dst, RTAX_REORDERING, tp->reordering);
830 }
831 }
832}
833
834__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) 730__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst)
835{ 731{
836 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); 732 __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0);
@@ -867,7 +763,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
867 * Packet counting of FACK is based on in-order assumptions, therefore TCP 763 * Packet counting of FACK is based on in-order assumptions, therefore TCP
868 * disables it when reordering is detected 764 * disables it when reordering is detected
869 */ 765 */
870static void tcp_disable_fack(struct tcp_sock *tp) 766void tcp_disable_fack(struct tcp_sock *tp)
871{ 767{
872 /* RFC3517 uses different metric in lost marker => reset on change */ 768 /* RFC3517 uses different metric in lost marker => reset on change */
873 if (tcp_is_fack(tp)) 769 if (tcp_is_fack(tp))
@@ -881,86 +777,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp)
881 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; 777 tp->rx_opt.sack_ok |= TCP_DSACK_SEEN;
882} 778}
883 779
884/* Initialize metrics on socket. */
885
886static void tcp_init_metrics(struct sock *sk)
887{
888 struct tcp_sock *tp = tcp_sk(sk);
889 struct dst_entry *dst = __sk_dst_get(sk);
890
891 if (dst == NULL)
892 goto reset;
893
894 dst_confirm(dst);
895
896 if (dst_metric_locked(dst, RTAX_CWND))
897 tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND);
898 if (dst_metric(dst, RTAX_SSTHRESH)) {
899 tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH);
900 if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
901 tp->snd_ssthresh = tp->snd_cwnd_clamp;
902 } else {
903 /* ssthresh may have been reduced unnecessarily during.
904 * 3WHS. Restore it back to its initial default.
905 */
906 tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
907 }
908 if (dst_metric(dst, RTAX_REORDERING) &&
909 tp->reordering != dst_metric(dst, RTAX_REORDERING)) {
910 tcp_disable_fack(tp);
911 tcp_disable_early_retrans(tp);
912 tp->reordering = dst_metric(dst, RTAX_REORDERING);
913 }
914
915 if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0)
916 goto reset;
917
918 /* Initial rtt is determined from SYN,SYN-ACK.
919 * The segment is small and rtt may appear much
920 * less than real one. Use per-dst memory
921 * to make it more realistic.
922 *
923 * A bit of theory. RTT is time passed after "normal" sized packet
924 * is sent until it is ACKed. In normal circumstances sending small
925 * packets force peer to delay ACKs and calculation is correct too.
926 * The algorithm is adaptive and, provided we follow specs, it
927 * NEVER underestimate RTT. BUT! If peer tries to make some clever
928 * tricks sort of "quick acks" for time long enough to decrease RTT
929 * to low value, and then abruptly stops to do it and starts to delay
930 * ACKs, wait for troubles.
931 */
932 if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) {
933 tp->srtt = dst_metric_rtt(dst, RTAX_RTT);
934 tp->rtt_seq = tp->snd_nxt;
935 }
936 if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) {
937 tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR);
938 tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
939 }
940 tcp_set_rto(sk);
941reset:
942 if (tp->srtt == 0) {
943 /* RFC6298: 5.7 We've failed to get a valid RTT sample from
944 * 3WHS. This is most likely due to retransmission,
945 * including spurious one. Reset the RTO back to 3secs
946 * from the more aggressive 1sec to avoid more spurious
947 * retransmission.
948 */
949 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK;
950 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK;
951 }
952 /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been
953 * retransmitted. In light of RFC6298 more aggressive 1sec
954 * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK
955 * retransmission has occurred.
956 */
957 if (tp->total_retrans > 1)
958 tp->snd_cwnd = 1;
959 else
960 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
961 tp->snd_cwnd_stamp = tcp_time_stamp;
962}
963
964static void tcp_update_reordering(struct sock *sk, const int metric, 780static void tcp_update_reordering(struct sock *sk, const int metric,
965 const int ts) 781 const int ts)
966{ 782{
@@ -3869,9 +3685,11 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3869 tcp_cong_avoid(sk, ack, prior_in_flight); 3685 tcp_cong_avoid(sk, ack, prior_in_flight);
3870 } 3686 }
3871 3687
3872 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) 3688 if ((flag & FLAG_FORWARD_PROGRESS) || !(flag & FLAG_NOT_DUP)) {
3873 dst_confirm(__sk_dst_get(sk)); 3689 struct dst_entry *dst = __sk_dst_get(sk);
3874 3690 if (dst)
3691 dst_confirm(dst);
3692 }
3875 return 1; 3693 return 1;
3876 3694
3877no_queue: 3695no_queue:
@@ -5518,6 +5336,18 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
5518 struct tcp_sock *tp = tcp_sk(sk); 5336 struct tcp_sock *tp = tcp_sk(sk);
5519 int res; 5337 int res;
5520 5338
5339 if (sk->sk_rx_dst) {
5340 struct dst_entry *dst = sk->sk_rx_dst;
5341 if (unlikely(dst->obsolete)) {
5342 if (dst->ops->check(dst, 0) == NULL) {
5343 dst_release(dst);
5344 sk->sk_rx_dst = NULL;
5345 }
5346 }
5347 }
5348 if (unlikely(sk->sk_rx_dst == NULL))
5349 sk->sk_rx_dst = dst_clone(skb_dst(skb));
5350
5521 /* 5351 /*
5522 * Header prediction. 5352 * Header prediction.
5523 * The code loosely follows the one in the famous 5353 * The code loosely follows the one in the famous
@@ -5729,8 +5559,10 @@ void tcp_finish_connect(struct sock *sk, struct sk_buff *skb)
5729 5559
5730 tcp_set_state(sk, TCP_ESTABLISHED); 5560 tcp_set_state(sk, TCP_ESTABLISHED);
5731 5561
5732 if (skb != NULL) 5562 if (skb != NULL) {
5563 sk->sk_rx_dst = dst_clone(skb_dst(skb));
5733 security_inet_conn_established(sk, skb); 5564 security_inet_conn_established(sk, skb);
5565 }
5734 5566
5735 /* Make sure socket is routed, for correct metrics. */ 5567 /* Make sure socket is routed, for correct metrics. */
5736 icsk->icsk_af_ops->rebuild_header(sk); 5568 icsk->icsk_af_ops->rebuild_header(sk);
@@ -6126,9 +5958,14 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
6126 5958
6127 case TCP_FIN_WAIT1: 5959 case TCP_FIN_WAIT1:
6128 if (tp->snd_una == tp->write_seq) { 5960 if (tp->snd_una == tp->write_seq) {
5961 struct dst_entry *dst;
5962
6129 tcp_set_state(sk, TCP_FIN_WAIT2); 5963 tcp_set_state(sk, TCP_FIN_WAIT2);
6130 sk->sk_shutdown |= SEND_SHUTDOWN; 5964 sk->sk_shutdown |= SEND_SHUTDOWN;
6131 dst_confirm(__sk_dst_get(sk)); 5965
5966 dst = __sk_dst_get(sk);
5967 if (dst)
5968 dst_confirm(dst);
6132 5969
6133 if (!sock_flag(sk, SOCK_DEAD)) 5970 if (!sock_flag(sk, SOCK_DEAD))
6134 /* Wake up lingering close() */ 5971 /* Wake up lingering close() */