diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-09 19:07:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-10 23:31:36 -0400 |
commit | 4aabd8ef8c43677cfee3e1e36c5a79edddb41942 (patch) | |
tree | a826352ad00ffcc1632edecfe4ebd77db1301e4b /net/ipv4 | |
parent | ad7eee98bef92481581060801bdfd1b25a6106c0 (diff) |
tcp: Move dynamnic metrics handling into seperate file.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/Makefile | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 188 | ||||
-rw-r--r-- | net/ipv4/tcp_metrics.c | 192 |
3 files changed, 195 insertions, 187 deletions
diff --git a/net/ipv4/Makefile b/net/ipv4/Makefile index ff75d3bbcd6..5a23e8b3710 100644 --- a/net/ipv4/Makefile +++ b/net/ipv4/Makefile | |||
@@ -7,7 +7,7 @@ obj-y := route.o inetpeer.o protocol.o \ | |||
7 | ip_output.o ip_sockglue.o inet_hashtables.o \ | 7 | ip_output.o ip_sockglue.o inet_hashtables.o \ |
8 | inet_timewait_sock.o inet_connection_sock.o \ | 8 | inet_timewait_sock.o inet_connection_sock.o \ |
9 | tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ | 9 | tcp.o tcp_input.o tcp_output.o tcp_timer.o tcp_ipv4.o \ |
10 | tcp_minisocks.o tcp_cong.o \ | 10 | tcp_minisocks.o tcp_cong.o tcp_metrics.o \ |
11 | datagram.o raw.o udp.o udplite.o \ | 11 | datagram.o raw.o udp.o udplite.o \ |
12 | arp.o icmp.o devinet.o af_inet.o igmp.o \ | 12 | arp.o icmp.o devinet.o af_inet.o igmp.o \ |
13 | fib_frontend.o fib_semantics.o fib_trie.o \ | 13 | fib_frontend.o fib_semantics.o fib_trie.o \ |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ca0d0e7c977..055ac49b8b4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -93,7 +93,6 @@ int sysctl_tcp_rfc1337 __read_mostly; | |||
93 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; | 93 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; |
94 | int sysctl_tcp_frto __read_mostly = 2; | 94 | int sysctl_tcp_frto __read_mostly = 2; |
95 | int sysctl_tcp_frto_response __read_mostly; | 95 | int sysctl_tcp_frto_response __read_mostly; |
96 | int sysctl_tcp_nometrics_save __read_mostly; | ||
97 | 96 | ||
98 | int sysctl_tcp_thin_dupack __read_mostly; | 97 | int sysctl_tcp_thin_dupack __read_mostly; |
99 | 98 | ||
@@ -701,7 +700,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
701 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 700 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
702 | * routine referred to above. | 701 | * routine referred to above. |
703 | */ | 702 | */ |
704 | static inline void tcp_set_rto(struct sock *sk) | 703 | void tcp_set_rto(struct sock *sk) |
705 | { | 704 | { |
706 | const struct tcp_sock *tp = tcp_sk(sk); | 705 | const struct tcp_sock *tp = tcp_sk(sk); |
707 | /* Old crap is replaced with new one. 8) | 706 | /* Old crap is replaced with new one. 8) |
@@ -728,109 +727,6 @@ static inline void tcp_set_rto(struct sock *sk) | |||
728 | tcp_bound_rto(sk); | 727 | tcp_bound_rto(sk); |
729 | } | 728 | } |
730 | 729 | ||
731 | /* Save metrics learned by this TCP session. | ||
732 | This function is called only, when TCP finishes successfully | ||
733 | i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. | ||
734 | */ | ||
735 | void tcp_update_metrics(struct sock *sk) | ||
736 | { | ||
737 | struct tcp_sock *tp = tcp_sk(sk); | ||
738 | struct dst_entry *dst = __sk_dst_get(sk); | ||
739 | |||
740 | if (sysctl_tcp_nometrics_save) | ||
741 | return; | ||
742 | |||
743 | if (dst && (dst->flags & DST_HOST)) { | ||
744 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
745 | int m; | ||
746 | unsigned long rtt; | ||
747 | |||
748 | dst_confirm(dst); | ||
749 | |||
750 | if (icsk->icsk_backoff || !tp->srtt) { | ||
751 | /* This session failed to estimate rtt. Why? | ||
752 | * Probably, no packets returned in time. | ||
753 | * Reset our results. | ||
754 | */ | ||
755 | if (!(dst_metric_locked(dst, RTAX_RTT))) | ||
756 | dst_metric_set(dst, RTAX_RTT, 0); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | rtt = dst_metric_rtt(dst, RTAX_RTT); | ||
761 | m = rtt - tp->srtt; | ||
762 | |||
763 | /* If newly calculated rtt larger than stored one, | ||
764 | * store new one. Otherwise, use EWMA. Remember, | ||
765 | * rtt overestimation is always better than underestimation. | ||
766 | */ | ||
767 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | ||
768 | if (m <= 0) | ||
769 | set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); | ||
770 | else | ||
771 | set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); | ||
772 | } | ||
773 | |||
774 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | ||
775 | unsigned long var; | ||
776 | if (m < 0) | ||
777 | m = -m; | ||
778 | |||
779 | /* Scale deviation to rttvar fixed point */ | ||
780 | m >>= 1; | ||
781 | if (m < tp->mdev) | ||
782 | m = tp->mdev; | ||
783 | |||
784 | var = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
785 | if (m >= var) | ||
786 | var = m; | ||
787 | else | ||
788 | var -= (var - m) >> 2; | ||
789 | |||
790 | set_dst_metric_rtt(dst, RTAX_RTTVAR, var); | ||
791 | } | ||
792 | |||
793 | if (tcp_in_initial_slowstart(tp)) { | ||
794 | /* Slow start still did not finish. */ | ||
795 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
796 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
797 | (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) | ||
798 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); | ||
799 | if (!dst_metric_locked(dst, RTAX_CWND) && | ||
800 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) | ||
801 | dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); | ||
802 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | ||
803 | icsk->icsk_ca_state == TCP_CA_Open) { | ||
804 | /* Cong. avoidance phase, cwnd is reliable. */ | ||
805 | if (!dst_metric_locked(dst, RTAX_SSTHRESH)) | ||
806 | dst_metric_set(dst, RTAX_SSTHRESH, | ||
807 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | ||
808 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
809 | dst_metric_set(dst, RTAX_CWND, | ||
810 | (dst_metric(dst, RTAX_CWND) + | ||
811 | tp->snd_cwnd) >> 1); | ||
812 | } else { | ||
813 | /* Else slow start did not finish, cwnd is non-sense, | ||
814 | ssthresh may be also invalid. | ||
815 | */ | ||
816 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
817 | dst_metric_set(dst, RTAX_CWND, | ||
818 | (dst_metric(dst, RTAX_CWND) + | ||
819 | tp->snd_ssthresh) >> 1); | ||
820 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
821 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
822 | tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) | ||
823 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); | ||
824 | } | ||
825 | |||
826 | if (!dst_metric_locked(dst, RTAX_REORDERING)) { | ||
827 | if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && | ||
828 | tp->reordering != sysctl_tcp_reordering) | ||
829 | dst_metric_set(dst, RTAX_REORDERING, tp->reordering); | ||
830 | } | ||
831 | } | ||
832 | } | ||
833 | |||
834 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) | 730 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) |
835 | { | 731 | { |
836 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); | 732 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); |
@@ -867,7 +763,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | |||
867 | * Packet counting of FACK is based on in-order assumptions, therefore TCP | 763 | * Packet counting of FACK is based on in-order assumptions, therefore TCP |
868 | * disables it when reordering is detected | 764 | * disables it when reordering is detected |
869 | */ | 765 | */ |
870 | static void tcp_disable_fack(struct tcp_sock *tp) | 766 | void tcp_disable_fack(struct tcp_sock *tp) |
871 | { | 767 | { |
872 | /* RFC3517 uses different metric in lost marker => reset on change */ | 768 | /* RFC3517 uses different metric in lost marker => reset on change */ |
873 | if (tcp_is_fack(tp)) | 769 | if (tcp_is_fack(tp)) |
@@ -881,86 +777,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp) | |||
881 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; | 777 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; |
882 | } | 778 | } |
883 | 779 | ||
884 | /* Initialize metrics on socket. */ | ||
885 | |||
886 | static void tcp_init_metrics(struct sock *sk) | ||
887 | { | ||
888 | struct tcp_sock *tp = tcp_sk(sk); | ||
889 | struct dst_entry *dst = __sk_dst_get(sk); | ||
890 | |||
891 | if (dst == NULL) | ||
892 | goto reset; | ||
893 | |||
894 | dst_confirm(dst); | ||
895 | |||
896 | if (dst_metric_locked(dst, RTAX_CWND)) | ||
897 | tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); | ||
898 | if (dst_metric(dst, RTAX_SSTHRESH)) { | ||
899 | tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); | ||
900 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) | ||
901 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | ||
902 | } else { | ||
903 | /* ssthresh may have been reduced unnecessarily during. | ||
904 | * 3WHS. Restore it back to its initial default. | ||
905 | */ | ||
906 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | ||
907 | } | ||
908 | if (dst_metric(dst, RTAX_REORDERING) && | ||
909 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { | ||
910 | tcp_disable_fack(tp); | ||
911 | tcp_disable_early_retrans(tp); | ||
912 | tp->reordering = dst_metric(dst, RTAX_REORDERING); | ||
913 | } | ||
914 | |||
915 | if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) | ||
916 | goto reset; | ||
917 | |||
918 | /* Initial rtt is determined from SYN,SYN-ACK. | ||
919 | * The segment is small and rtt may appear much | ||
920 | * less than real one. Use per-dst memory | ||
921 | * to make it more realistic. | ||
922 | * | ||
923 | * A bit of theory. RTT is time passed after "normal" sized packet | ||
924 | * is sent until it is ACKed. In normal circumstances sending small | ||
925 | * packets force peer to delay ACKs and calculation is correct too. | ||
926 | * The algorithm is adaptive and, provided we follow specs, it | ||
927 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | ||
928 | * tricks sort of "quick acks" for time long enough to decrease RTT | ||
929 | * to low value, and then abruptly stops to do it and starts to delay | ||
930 | * ACKs, wait for troubles. | ||
931 | */ | ||
932 | if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { | ||
933 | tp->srtt = dst_metric_rtt(dst, RTAX_RTT); | ||
934 | tp->rtt_seq = tp->snd_nxt; | ||
935 | } | ||
936 | if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { | ||
937 | tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
938 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | ||
939 | } | ||
940 | tcp_set_rto(sk); | ||
941 | reset: | ||
942 | if (tp->srtt == 0) { | ||
943 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | ||
944 | * 3WHS. This is most likely due to retransmission, | ||
945 | * including spurious one. Reset the RTO back to 3secs | ||
946 | * from the more aggressive 1sec to avoid more spurious | ||
947 | * retransmission. | ||
948 | */ | ||
949 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | ||
950 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | ||
951 | } | ||
952 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | ||
953 | * retransmitted. In light of RFC6298 more aggressive 1sec | ||
954 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | ||
955 | * retransmission has occurred. | ||
956 | */ | ||
957 | if (tp->total_retrans > 1) | ||
958 | tp->snd_cwnd = 1; | ||
959 | else | ||
960 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | ||
961 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
962 | } | ||
963 | |||
964 | static void tcp_update_reordering(struct sock *sk, const int metric, | 780 | static void tcp_update_reordering(struct sock *sk, const int metric, |
965 | const int ts) | 781 | const int ts) |
966 | { | 782 | { |
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c new file mode 100644 index 00000000000..2793ecf928d --- /dev/null +++ b/net/ipv4/tcp_metrics.c | |||
@@ -0,0 +1,192 @@ | |||
1 | #include <linux/cache.h> | ||
2 | #include <linux/tcp.h> | ||
3 | |||
4 | #include <net/inet_connection_sock.h> | ||
5 | #include <net/sock.h> | ||
6 | #include <net/dst.h> | ||
7 | #include <net/tcp.h> | ||
8 | |||
9 | int sysctl_tcp_nometrics_save __read_mostly; | ||
10 | |||
11 | /* Save metrics learned by this TCP session. This function is called | ||
12 | * only, when TCP finishes successfully i.e. when it enters TIME-WAIT | ||
13 | * or goes from LAST-ACK to CLOSE. | ||
14 | */ | ||
15 | void tcp_update_metrics(struct sock *sk) | ||
16 | { | ||
17 | struct tcp_sock *tp = tcp_sk(sk); | ||
18 | struct dst_entry *dst = __sk_dst_get(sk); | ||
19 | |||
20 | if (sysctl_tcp_nometrics_save) | ||
21 | return; | ||
22 | |||
23 | if (dst && (dst->flags & DST_HOST)) { | ||
24 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
25 | int m; | ||
26 | unsigned long rtt; | ||
27 | |||
28 | dst_confirm(dst); | ||
29 | |||
30 | if (icsk->icsk_backoff || !tp->srtt) { | ||
31 | /* This session failed to estimate rtt. Why? | ||
32 | * Probably, no packets returned in time. | ||
33 | * Reset our results. | ||
34 | */ | ||
35 | if (!(dst_metric_locked(dst, RTAX_RTT))) | ||
36 | dst_metric_set(dst, RTAX_RTT, 0); | ||
37 | return; | ||
38 | } | ||
39 | |||
40 | rtt = dst_metric_rtt(dst, RTAX_RTT); | ||
41 | m = rtt - tp->srtt; | ||
42 | |||
43 | /* If newly calculated rtt larger than stored one, | ||
44 | * store new one. Otherwise, use EWMA. Remember, | ||
45 | * rtt overestimation is always better than underestimation. | ||
46 | */ | ||
47 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | ||
48 | if (m <= 0) | ||
49 | set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); | ||
50 | else | ||
51 | set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); | ||
52 | } | ||
53 | |||
54 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | ||
55 | unsigned long var; | ||
56 | if (m < 0) | ||
57 | m = -m; | ||
58 | |||
59 | /* Scale deviation to rttvar fixed point */ | ||
60 | m >>= 1; | ||
61 | if (m < tp->mdev) | ||
62 | m = tp->mdev; | ||
63 | |||
64 | var = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
65 | if (m >= var) | ||
66 | var = m; | ||
67 | else | ||
68 | var -= (var - m) >> 2; | ||
69 | |||
70 | set_dst_metric_rtt(dst, RTAX_RTTVAR, var); | ||
71 | } | ||
72 | |||
73 | if (tcp_in_initial_slowstart(tp)) { | ||
74 | /* Slow start still did not finish. */ | ||
75 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
76 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
77 | (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) | ||
78 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); | ||
79 | if (!dst_metric_locked(dst, RTAX_CWND) && | ||
80 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) | ||
81 | dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); | ||
82 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | ||
83 | icsk->icsk_ca_state == TCP_CA_Open) { | ||
84 | /* Cong. avoidance phase, cwnd is reliable. */ | ||
85 | if (!dst_metric_locked(dst, RTAX_SSTHRESH)) | ||
86 | dst_metric_set(dst, RTAX_SSTHRESH, | ||
87 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | ||
88 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
89 | dst_metric_set(dst, RTAX_CWND, | ||
90 | (dst_metric(dst, RTAX_CWND) + | ||
91 | tp->snd_cwnd) >> 1); | ||
92 | } else { | ||
93 | /* Else slow start did not finish, cwnd is non-sense, | ||
94 | ssthresh may be also invalid. | ||
95 | */ | ||
96 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
97 | dst_metric_set(dst, RTAX_CWND, | ||
98 | (dst_metric(dst, RTAX_CWND) + | ||
99 | tp->snd_ssthresh) >> 1); | ||
100 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
101 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
102 | tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) | ||
103 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); | ||
104 | } | ||
105 | |||
106 | if (!dst_metric_locked(dst, RTAX_REORDERING)) { | ||
107 | if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && | ||
108 | tp->reordering != sysctl_tcp_reordering) | ||
109 | dst_metric_set(dst, RTAX_REORDERING, tp->reordering); | ||
110 | } | ||
111 | } | ||
112 | } | ||
113 | |||
114 | /* Initialize metrics on socket. */ | ||
115 | |||
116 | void tcp_init_metrics(struct sock *sk) | ||
117 | { | ||
118 | struct tcp_sock *tp = tcp_sk(sk); | ||
119 | struct dst_entry *dst = __sk_dst_get(sk); | ||
120 | |||
121 | if (dst == NULL) | ||
122 | goto reset; | ||
123 | |||
124 | dst_confirm(dst); | ||
125 | |||
126 | if (dst_metric_locked(dst, RTAX_CWND)) | ||
127 | tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); | ||
128 | if (dst_metric(dst, RTAX_SSTHRESH)) { | ||
129 | tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); | ||
130 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) | ||
131 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | ||
132 | } else { | ||
133 | /* ssthresh may have been reduced unnecessarily during. | ||
134 | * 3WHS. Restore it back to its initial default. | ||
135 | */ | ||
136 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | ||
137 | } | ||
138 | if (dst_metric(dst, RTAX_REORDERING) && | ||
139 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { | ||
140 | tcp_disable_fack(tp); | ||
141 | tcp_disable_early_retrans(tp); | ||
142 | tp->reordering = dst_metric(dst, RTAX_REORDERING); | ||
143 | } | ||
144 | |||
145 | if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) | ||
146 | goto reset; | ||
147 | |||
148 | /* Initial rtt is determined from SYN,SYN-ACK. | ||
149 | * The segment is small and rtt may appear much | ||
150 | * less than real one. Use per-dst memory | ||
151 | * to make it more realistic. | ||
152 | * | ||
153 | * A bit of theory. RTT is time passed after "normal" sized packet | ||
154 | * is sent until it is ACKed. In normal circumstances sending small | ||
155 | * packets force peer to delay ACKs and calculation is correct too. | ||
156 | * The algorithm is adaptive and, provided we follow specs, it | ||
157 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | ||
158 | * tricks sort of "quick acks" for time long enough to decrease RTT | ||
159 | * to low value, and then abruptly stops to do it and starts to delay | ||
160 | * ACKs, wait for troubles. | ||
161 | */ | ||
162 | if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { | ||
163 | tp->srtt = dst_metric_rtt(dst, RTAX_RTT); | ||
164 | tp->rtt_seq = tp->snd_nxt; | ||
165 | } | ||
166 | if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { | ||
167 | tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
168 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | ||
169 | } | ||
170 | tcp_set_rto(sk); | ||
171 | reset: | ||
172 | if (tp->srtt == 0) { | ||
173 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | ||
174 | * 3WHS. This is most likely due to retransmission, | ||
175 | * including spurious one. Reset the RTO back to 3secs | ||
176 | * from the more aggressive 1sec to avoid more spurious | ||
177 | * retransmission. | ||
178 | */ | ||
179 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | ||
180 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | ||
181 | } | ||
182 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | ||
183 | * retransmitted. In light of RFC6298 more aggressive 1sec | ||
184 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | ||
185 | * retransmission has occurred. | ||
186 | */ | ||
187 | if (tp->total_retrans > 1) | ||
188 | tp->snd_cwnd = 1; | ||
189 | else | ||
190 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | ||
191 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
192 | } | ||