diff options
author | Yuchung Cheng <ycheng@google.com> | 2012-09-02 13:38:02 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-09-03 14:34:02 -0400 |
commit | 09484d1f6edb3b798c5926a6755f1badb77ece9f (patch) | |
tree | 217059c32218b27c177293a75cbedfeb78e4a6fa /net/ipv4/tcp_input.c | |
parent | 5aee07281b2aa9a1f740a8ccac4781e787fc1192 (diff) |
tcp: move tcp_enter_cwr()
To prepare replacing rate halving with PRR algorithm in CWR state.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 46 |
1 files changed, 23 insertions, 23 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 8c304a40079..3ab0c7573a0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -743,29 +743,6 @@ __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) | |||
743 | return min_t(__u32, cwnd, tp->snd_cwnd_clamp); | 743 | return min_t(__u32, cwnd, tp->snd_cwnd_clamp); |
744 | } | 744 | } |
745 | 745 | ||
746 | /* Set slow start threshold and cwnd not falling to slow start */ | ||
747 | void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | ||
748 | { | ||
749 | struct tcp_sock *tp = tcp_sk(sk); | ||
750 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
751 | |||
752 | tp->prior_ssthresh = 0; | ||
753 | tp->bytes_acked = 0; | ||
754 | if (icsk->icsk_ca_state < TCP_CA_CWR) { | ||
755 | tp->undo_marker = 0; | ||
756 | if (set_ssthresh) | ||
757 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | ||
758 | tp->snd_cwnd = min(tp->snd_cwnd, | ||
759 | tcp_packets_in_flight(tp) + 1U); | ||
760 | tp->snd_cwnd_cnt = 0; | ||
761 | tp->high_seq = tp->snd_nxt; | ||
762 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
763 | TCP_ECN_queue_cwr(tp); | ||
764 | |||
765 | tcp_set_ca_state(sk, TCP_CA_CWR); | ||
766 | } | ||
767 | } | ||
768 | |||
769 | /* | 746 | /* |
770 | * Packet counting of FACK is based on in-order assumptions, therefore TCP | 747 | * Packet counting of FACK is based on in-order assumptions, therefore TCP |
771 | * disables it when reordering is detected | 748 | * disables it when reordering is detected |
@@ -2741,6 +2718,29 @@ static inline void tcp_complete_cwr(struct sock *sk) | |||
2741 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); | 2718 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); |
2742 | } | 2719 | } |
2743 | 2720 | ||
2721 | /* Set slow start threshold and cwnd not falling to slow start */ | ||
2722 | void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | ||
2723 | { | ||
2724 | struct tcp_sock *tp = tcp_sk(sk); | ||
2725 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2726 | |||
2727 | tp->prior_ssthresh = 0; | ||
2728 | tp->bytes_acked = 0; | ||
2729 | if (icsk->icsk_ca_state < TCP_CA_CWR) { | ||
2730 | tp->undo_marker = 0; | ||
2731 | if (set_ssthresh) | ||
2732 | tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); | ||
2733 | tp->snd_cwnd = min(tp->snd_cwnd, | ||
2734 | tcp_packets_in_flight(tp) + 1U); | ||
2735 | tp->snd_cwnd_cnt = 0; | ||
2736 | tp->high_seq = tp->snd_nxt; | ||
2737 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
2738 | TCP_ECN_queue_cwr(tp); | ||
2739 | |||
2740 | tcp_set_ca_state(sk, TCP_CA_CWR); | ||
2741 | } | ||
2742 | } | ||
2743 | |||
2744 | static void tcp_try_keep_open(struct sock *sk) | 2744 | static void tcp_try_keep_open(struct sock *sk) |
2745 | { | 2745 | { |
2746 | struct tcp_sock *tp = tcp_sk(sk); | 2746 | struct tcp_sock *tp = tcp_sk(sk); |