diff options
author | Yuchung Cheng <ycheng@google.com> | 2012-09-02 13:38:03 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-09-03 14:34:02 -0400 |
commit | fb4d3d1df31907eadd2e2a745e840921888b346a (patch) | |
tree | d88fd70bfdd91ba8c99763d5cc0871c2ab3dae63 | |
parent | 09484d1f6edb3b798c5926a6755f1badb77ece9f (diff) |
tcp: move tcp_update_cwnd_in_recovery
To prepare replacing rate halving with PRR algorithm in CWR state.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | net/ipv4/tcp_input.c | 64 |
1 files changed, 32 insertions, 32 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 3ab0c7573a0b..38589e464e63 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2700,6 +2700,38 @@ static bool tcp_try_undo_loss(struct sock *sk) | |||
2700 | return false; | 2700 | return false; |
2701 | } | 2701 | } |
2702 | 2702 | ||
2703 | /* This function implements the PRR algorithm, specifcally the PRR-SSRB | ||
2704 | * (proportional rate reduction with slow start reduction bound) as described in | ||
2705 | * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. | ||
2706 | * It computes the number of packets to send (sndcnt) based on packets newly | ||
2707 | * delivered: | ||
2708 | * 1) If the packets in flight is larger than ssthresh, PRR spreads the | ||
2709 | * cwnd reductions across a full RTT. | ||
2710 | * 2) If packets in flight is lower than ssthresh (such as due to excess | ||
2711 | * losses and/or application stalls), do not perform any further cwnd | ||
2712 | * reductions, but instead slow start up to ssthresh. | ||
2713 | */ | ||
2714 | static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, | ||
2715 | int fast_rexmit, int flag) | ||
2716 | { | ||
2717 | struct tcp_sock *tp = tcp_sk(sk); | ||
2718 | int sndcnt = 0; | ||
2719 | int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); | ||
2720 | |||
2721 | if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { | ||
2722 | u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + | ||
2723 | tp->prior_cwnd - 1; | ||
2724 | sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; | ||
2725 | } else { | ||
2726 | sndcnt = min_t(int, delta, | ||
2727 | max_t(int, tp->prr_delivered - tp->prr_out, | ||
2728 | newly_acked_sacked) + 1); | ||
2729 | } | ||
2730 | |||
2731 | sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); | ||
2732 | tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; | ||
2733 | } | ||
2734 | |||
2703 | static inline void tcp_complete_cwr(struct sock *sk) | 2735 | static inline void tcp_complete_cwr(struct sock *sk) |
2704 | { | 2736 | { |
2705 | struct tcp_sock *tp = tcp_sk(sk); | 2737 | struct tcp_sock *tp = tcp_sk(sk); |
@@ -2854,38 +2886,6 @@ void tcp_simple_retransmit(struct sock *sk) | |||
2854 | } | 2886 | } |
2855 | EXPORT_SYMBOL(tcp_simple_retransmit); | 2887 | EXPORT_SYMBOL(tcp_simple_retransmit); |
2856 | 2888 | ||
2857 | /* This function implements the PRR algorithm, specifcally the PRR-SSRB | ||
2858 | * (proportional rate reduction with slow start reduction bound) as described in | ||
2859 | * http://www.ietf.org/id/draft-mathis-tcpm-proportional-rate-reduction-01.txt. | ||
2860 | * It computes the number of packets to send (sndcnt) based on packets newly | ||
2861 | * delivered: | ||
2862 | * 1) If the packets in flight is larger than ssthresh, PRR spreads the | ||
2863 | * cwnd reductions across a full RTT. | ||
2864 | * 2) If packets in flight is lower than ssthresh (such as due to excess | ||
2865 | * losses and/or application stalls), do not perform any further cwnd | ||
2866 | * reductions, but instead slow start up to ssthresh. | ||
2867 | */ | ||
2868 | static void tcp_update_cwnd_in_recovery(struct sock *sk, int newly_acked_sacked, | ||
2869 | int fast_rexmit, int flag) | ||
2870 | { | ||
2871 | struct tcp_sock *tp = tcp_sk(sk); | ||
2872 | int sndcnt = 0; | ||
2873 | int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); | ||
2874 | |||
2875 | if (tcp_packets_in_flight(tp) > tp->snd_ssthresh) { | ||
2876 | u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + | ||
2877 | tp->prior_cwnd - 1; | ||
2878 | sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; | ||
2879 | } else { | ||
2880 | sndcnt = min_t(int, delta, | ||
2881 | max_t(int, tp->prr_delivered - tp->prr_out, | ||
2882 | newly_acked_sacked) + 1); | ||
2883 | } | ||
2884 | |||
2885 | sndcnt = max(sndcnt, (fast_rexmit ? 1 : 0)); | ||
2886 | tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; | ||
2887 | } | ||
2888 | |||
2889 | static void tcp_enter_recovery(struct sock *sk, bool ece_ack) | 2889 | static void tcp_enter_recovery(struct sock *sk, bool ece_ack) |
2890 | { | 2890 | { |
2891 | struct tcp_sock *tp = tcp_sk(sk); | 2891 | struct tcp_sock *tp = tcp_sk(sk); |