diff options
author | David S. Miller <davem@davemloft.net> | 2012-07-09 19:07:30 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-07-10 23:31:36 -0400 |
commit | 4aabd8ef8c43677cfee3e1e36c5a79edddb41942 (patch) | |
tree | a826352ad00ffcc1632edecfe4ebd77db1301e4b /net/ipv4/tcp_input.c | |
parent | ad7eee98bef92481581060801bdfd1b25a6106c0 (diff) |
tcp: Move dynamnic metrics handling into seperate file.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 188 |
1 files changed, 2 insertions, 186 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index ca0d0e7c977..055ac49b8b4 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -93,7 +93,6 @@ int sysctl_tcp_rfc1337 __read_mostly; | |||
93 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; | 93 | int sysctl_tcp_max_orphans __read_mostly = NR_FILE; |
94 | int sysctl_tcp_frto __read_mostly = 2; | 94 | int sysctl_tcp_frto __read_mostly = 2; |
95 | int sysctl_tcp_frto_response __read_mostly; | 95 | int sysctl_tcp_frto_response __read_mostly; |
96 | int sysctl_tcp_nometrics_save __read_mostly; | ||
97 | 96 | ||
98 | int sysctl_tcp_thin_dupack __read_mostly; | 97 | int sysctl_tcp_thin_dupack __read_mostly; |
99 | 98 | ||
@@ -701,7 +700,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) | |||
701 | /* Calculate rto without backoff. This is the second half of Van Jacobson's | 700 | /* Calculate rto without backoff. This is the second half of Van Jacobson's |
702 | * routine referred to above. | 701 | * routine referred to above. |
703 | */ | 702 | */ |
704 | static inline void tcp_set_rto(struct sock *sk) | 703 | void tcp_set_rto(struct sock *sk) |
705 | { | 704 | { |
706 | const struct tcp_sock *tp = tcp_sk(sk); | 705 | const struct tcp_sock *tp = tcp_sk(sk); |
707 | /* Old crap is replaced with new one. 8) | 706 | /* Old crap is replaced with new one. 8) |
@@ -728,109 +727,6 @@ static inline void tcp_set_rto(struct sock *sk) | |||
728 | tcp_bound_rto(sk); | 727 | tcp_bound_rto(sk); |
729 | } | 728 | } |
730 | 729 | ||
731 | /* Save metrics learned by this TCP session. | ||
732 | This function is called only, when TCP finishes successfully | ||
733 | i.e. when it enters TIME-WAIT or goes from LAST-ACK to CLOSE. | ||
734 | */ | ||
735 | void tcp_update_metrics(struct sock *sk) | ||
736 | { | ||
737 | struct tcp_sock *tp = tcp_sk(sk); | ||
738 | struct dst_entry *dst = __sk_dst_get(sk); | ||
739 | |||
740 | if (sysctl_tcp_nometrics_save) | ||
741 | return; | ||
742 | |||
743 | if (dst && (dst->flags & DST_HOST)) { | ||
744 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
745 | int m; | ||
746 | unsigned long rtt; | ||
747 | |||
748 | dst_confirm(dst); | ||
749 | |||
750 | if (icsk->icsk_backoff || !tp->srtt) { | ||
751 | /* This session failed to estimate rtt. Why? | ||
752 | * Probably, no packets returned in time. | ||
753 | * Reset our results. | ||
754 | */ | ||
755 | if (!(dst_metric_locked(dst, RTAX_RTT))) | ||
756 | dst_metric_set(dst, RTAX_RTT, 0); | ||
757 | return; | ||
758 | } | ||
759 | |||
760 | rtt = dst_metric_rtt(dst, RTAX_RTT); | ||
761 | m = rtt - tp->srtt; | ||
762 | |||
763 | /* If newly calculated rtt larger than stored one, | ||
764 | * store new one. Otherwise, use EWMA. Remember, | ||
765 | * rtt overestimation is always better than underestimation. | ||
766 | */ | ||
767 | if (!(dst_metric_locked(dst, RTAX_RTT))) { | ||
768 | if (m <= 0) | ||
769 | set_dst_metric_rtt(dst, RTAX_RTT, tp->srtt); | ||
770 | else | ||
771 | set_dst_metric_rtt(dst, RTAX_RTT, rtt - (m >> 3)); | ||
772 | } | ||
773 | |||
774 | if (!(dst_metric_locked(dst, RTAX_RTTVAR))) { | ||
775 | unsigned long var; | ||
776 | if (m < 0) | ||
777 | m = -m; | ||
778 | |||
779 | /* Scale deviation to rttvar fixed point */ | ||
780 | m >>= 1; | ||
781 | if (m < tp->mdev) | ||
782 | m = tp->mdev; | ||
783 | |||
784 | var = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
785 | if (m >= var) | ||
786 | var = m; | ||
787 | else | ||
788 | var -= (var - m) >> 2; | ||
789 | |||
790 | set_dst_metric_rtt(dst, RTAX_RTTVAR, var); | ||
791 | } | ||
792 | |||
793 | if (tcp_in_initial_slowstart(tp)) { | ||
794 | /* Slow start still did not finish. */ | ||
795 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
796 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
797 | (tp->snd_cwnd >> 1) > dst_metric(dst, RTAX_SSTHRESH)) | ||
798 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_cwnd >> 1); | ||
799 | if (!dst_metric_locked(dst, RTAX_CWND) && | ||
800 | tp->snd_cwnd > dst_metric(dst, RTAX_CWND)) | ||
801 | dst_metric_set(dst, RTAX_CWND, tp->snd_cwnd); | ||
802 | } else if (tp->snd_cwnd > tp->snd_ssthresh && | ||
803 | icsk->icsk_ca_state == TCP_CA_Open) { | ||
804 | /* Cong. avoidance phase, cwnd is reliable. */ | ||
805 | if (!dst_metric_locked(dst, RTAX_SSTHRESH)) | ||
806 | dst_metric_set(dst, RTAX_SSTHRESH, | ||
807 | max(tp->snd_cwnd >> 1, tp->snd_ssthresh)); | ||
808 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
809 | dst_metric_set(dst, RTAX_CWND, | ||
810 | (dst_metric(dst, RTAX_CWND) + | ||
811 | tp->snd_cwnd) >> 1); | ||
812 | } else { | ||
813 | /* Else slow start did not finish, cwnd is non-sense, | ||
814 | ssthresh may be also invalid. | ||
815 | */ | ||
816 | if (!dst_metric_locked(dst, RTAX_CWND)) | ||
817 | dst_metric_set(dst, RTAX_CWND, | ||
818 | (dst_metric(dst, RTAX_CWND) + | ||
819 | tp->snd_ssthresh) >> 1); | ||
820 | if (dst_metric(dst, RTAX_SSTHRESH) && | ||
821 | !dst_metric_locked(dst, RTAX_SSTHRESH) && | ||
822 | tp->snd_ssthresh > dst_metric(dst, RTAX_SSTHRESH)) | ||
823 | dst_metric_set(dst, RTAX_SSTHRESH, tp->snd_ssthresh); | ||
824 | } | ||
825 | |||
826 | if (!dst_metric_locked(dst, RTAX_REORDERING)) { | ||
827 | if (dst_metric(dst, RTAX_REORDERING) < tp->reordering && | ||
828 | tp->reordering != sysctl_tcp_reordering) | ||
829 | dst_metric_set(dst, RTAX_REORDERING, tp->reordering); | ||
830 | } | ||
831 | } | ||
832 | } | ||
833 | |||
834 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) | 730 | __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) |
835 | { | 731 | { |
836 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); | 732 | __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); |
@@ -867,7 +763,7 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | |||
867 | * Packet counting of FACK is based on in-order assumptions, therefore TCP | 763 | * Packet counting of FACK is based on in-order assumptions, therefore TCP |
868 | * disables it when reordering is detected | 764 | * disables it when reordering is detected |
869 | */ | 765 | */ |
870 | static void tcp_disable_fack(struct tcp_sock *tp) | 766 | void tcp_disable_fack(struct tcp_sock *tp) |
871 | { | 767 | { |
872 | /* RFC3517 uses different metric in lost marker => reset on change */ | 768 | /* RFC3517 uses different metric in lost marker => reset on change */ |
873 | if (tcp_is_fack(tp)) | 769 | if (tcp_is_fack(tp)) |
@@ -881,86 +777,6 @@ static void tcp_dsack_seen(struct tcp_sock *tp) | |||
881 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; | 777 | tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; |
882 | } | 778 | } |
883 | 779 | ||
884 | /* Initialize metrics on socket. */ | ||
885 | |||
886 | static void tcp_init_metrics(struct sock *sk) | ||
887 | { | ||
888 | struct tcp_sock *tp = tcp_sk(sk); | ||
889 | struct dst_entry *dst = __sk_dst_get(sk); | ||
890 | |||
891 | if (dst == NULL) | ||
892 | goto reset; | ||
893 | |||
894 | dst_confirm(dst); | ||
895 | |||
896 | if (dst_metric_locked(dst, RTAX_CWND)) | ||
897 | tp->snd_cwnd_clamp = dst_metric(dst, RTAX_CWND); | ||
898 | if (dst_metric(dst, RTAX_SSTHRESH)) { | ||
899 | tp->snd_ssthresh = dst_metric(dst, RTAX_SSTHRESH); | ||
900 | if (tp->snd_ssthresh > tp->snd_cwnd_clamp) | ||
901 | tp->snd_ssthresh = tp->snd_cwnd_clamp; | ||
902 | } else { | ||
903 | /* ssthresh may have been reduced unnecessarily during. | ||
904 | * 3WHS. Restore it back to its initial default. | ||
905 | */ | ||
906 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | ||
907 | } | ||
908 | if (dst_metric(dst, RTAX_REORDERING) && | ||
909 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { | ||
910 | tcp_disable_fack(tp); | ||
911 | tcp_disable_early_retrans(tp); | ||
912 | tp->reordering = dst_metric(dst, RTAX_REORDERING); | ||
913 | } | ||
914 | |||
915 | if (dst_metric(dst, RTAX_RTT) == 0 || tp->srtt == 0) | ||
916 | goto reset; | ||
917 | |||
918 | /* Initial rtt is determined from SYN,SYN-ACK. | ||
919 | * The segment is small and rtt may appear much | ||
920 | * less than real one. Use per-dst memory | ||
921 | * to make it more realistic. | ||
922 | * | ||
923 | * A bit of theory. RTT is time passed after "normal" sized packet | ||
924 | * is sent until it is ACKed. In normal circumstances sending small | ||
925 | * packets force peer to delay ACKs and calculation is correct too. | ||
926 | * The algorithm is adaptive and, provided we follow specs, it | ||
927 | * NEVER underestimate RTT. BUT! If peer tries to make some clever | ||
928 | * tricks sort of "quick acks" for time long enough to decrease RTT | ||
929 | * to low value, and then abruptly stops to do it and starts to delay | ||
930 | * ACKs, wait for troubles. | ||
931 | */ | ||
932 | if (dst_metric_rtt(dst, RTAX_RTT) > tp->srtt) { | ||
933 | tp->srtt = dst_metric_rtt(dst, RTAX_RTT); | ||
934 | tp->rtt_seq = tp->snd_nxt; | ||
935 | } | ||
936 | if (dst_metric_rtt(dst, RTAX_RTTVAR) > tp->mdev) { | ||
937 | tp->mdev = dst_metric_rtt(dst, RTAX_RTTVAR); | ||
938 | tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk)); | ||
939 | } | ||
940 | tcp_set_rto(sk); | ||
941 | reset: | ||
942 | if (tp->srtt == 0) { | ||
943 | /* RFC6298: 5.7 We've failed to get a valid RTT sample from | ||
944 | * 3WHS. This is most likely due to retransmission, | ||
945 | * including spurious one. Reset the RTO back to 3secs | ||
946 | * from the more aggressive 1sec to avoid more spurious | ||
947 | * retransmission. | ||
948 | */ | ||
949 | tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_FALLBACK; | ||
950 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_FALLBACK; | ||
951 | } | ||
952 | /* Cut cwnd down to 1 per RFC5681 if SYN or SYN-ACK has been | ||
953 | * retransmitted. In light of RFC6298 more aggressive 1sec | ||
954 | * initRTO, we only reset cwnd when more than 1 SYN/SYN-ACK | ||
955 | * retransmission has occurred. | ||
956 | */ | ||
957 | if (tp->total_retrans > 1) | ||
958 | tp->snd_cwnd = 1; | ||
959 | else | ||
960 | tp->snd_cwnd = tcp_init_cwnd(tp, dst); | ||
961 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
962 | } | ||
963 | |||
964 | static void tcp_update_reordering(struct sock *sk, const int metric, | 780 | static void tcp_update_reordering(struct sock *sk, const int metric, |
965 | const int ts) | 781 | const int ts) |
966 | { | 782 | { |