aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Paasch <christoph.paasch@uclouvain.be>2014-07-14 10:58:32 -0400
committerDavid S. Miller <davem@davemloft.net>2014-07-15 19:19:36 -0400
commit5ee2c941b5969eb1b5592f9731b3ee76a784641f (patch)
treec2d5a3df3af7a9043f368cb2549f1dede4da7af4
parent5517750f058edd111bcabe5e116056cc63b1f39c (diff)
tcp: Remove unnecessary arg from tcp_enter_cwr and tcp_init_cwnd_reduction
Since Yuchung's 9b44190dc11 (tcp: refactor F-RTO), tcp_enter_cwr is always called with set_ssthresh = 1. Thus, we can remove this argument from tcp_enter_cwr. Further, as we remove this one, tcp_init_cwnd_reduction is then always called with set_ssthresh = true, and so we can get rid of this argument as well. Cc: Yuchung Cheng <ycheng@google.com> Signed-off-by: Christoph Paasch <christoph.paasch@uclouvain.be> Acked-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/ipv4/tcp_output.c2
3 files changed, 9 insertions, 10 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index c9a75dbba0c7..0aeb2eb749dc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -928,7 +928,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
928/* Use define here intentionally to get WARN_ON location shown at the caller */ 928/* Use define here intentionally to get WARN_ON location shown at the caller */
929#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) 929#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
930 930
931void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); 931void tcp_enter_cwr(struct sock *sk);
932__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst); 932__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
933 933
934/* The maximum number of MSS of available cwnd for which TSO defers 934/* The maximum number of MSS of available cwnd for which TSO defers
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index bb684967f7a7..2e16afba182c 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2475 * losses and/or application stalls), do not perform any further cwnd 2475 * losses and/or application stalls), do not perform any further cwnd
2476 * reductions, but instead slow start up to ssthresh. 2476 * reductions, but instead slow start up to ssthresh.
2477 */ 2477 */
2478static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh) 2478static void tcp_init_cwnd_reduction(struct sock *sk)
2479{ 2479{
2480 struct tcp_sock *tp = tcp_sk(sk); 2480 struct tcp_sock *tp = tcp_sk(sk);
2481 2481
@@ -2485,8 +2485,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
2485 tp->prior_cwnd = tp->snd_cwnd; 2485 tp->prior_cwnd = tp->snd_cwnd;
2486 tp->prr_delivered = 0; 2486 tp->prr_delivered = 0;
2487 tp->prr_out = 0; 2487 tp->prr_out = 0;
2488 if (set_ssthresh) 2488 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2489 tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
2490 TCP_ECN_queue_cwr(tp); 2489 TCP_ECN_queue_cwr(tp);
2491} 2490}
2492 2491
@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
2528} 2527}
2529 2528
2530/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */ 2529/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
2531void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) 2530void tcp_enter_cwr(struct sock *sk)
2532{ 2531{
2533 struct tcp_sock *tp = tcp_sk(sk); 2532 struct tcp_sock *tp = tcp_sk(sk);
2534 2533
2535 tp->prior_ssthresh = 0; 2534 tp->prior_ssthresh = 0;
2536 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2535 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2537 tp->undo_marker = 0; 2536 tp->undo_marker = 0;
2538 tcp_init_cwnd_reduction(sk, set_ssthresh); 2537 tcp_init_cwnd_reduction(sk);
2539 tcp_set_ca_state(sk, TCP_CA_CWR); 2538 tcp_set_ca_state(sk, TCP_CA_CWR);
2540 } 2539 }
2541} 2540}
@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
2564 tp->retrans_stamp = 0; 2563 tp->retrans_stamp = 0;
2565 2564
2566 if (flag & FLAG_ECE) 2565 if (flag & FLAG_ECE)
2567 tcp_enter_cwr(sk, 1); 2566 tcp_enter_cwr(sk);
2568 2567
2569 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { 2568 if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
2570 tcp_try_keep_open(sk); 2569 tcp_try_keep_open(sk);
@@ -2670,7 +2669,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2669 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2671 if (!ece_ack) 2670 if (!ece_ack)
2672 tp->prior_ssthresh = tcp_current_ssthresh(sk); 2671 tp->prior_ssthresh = tcp_current_ssthresh(sk);
2673 tcp_init_cwnd_reduction(sk, true); 2672 tcp_init_cwnd_reduction(sk);
2674 } 2673 }
2675 tcp_set_ca_state(sk, TCP_CA_Recovery); 2674 tcp_set_ca_state(sk, TCP_CA_Recovery);
2676} 2675}
@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
3346 tp->tlp_high_seq = 0; 3345 tp->tlp_high_seq = 0;
3347 /* Don't reduce cwnd if DSACK arrives for TLP retrans. */ 3346 /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
3348 if (!(flag & FLAG_DSACKING_ACK)) { 3347 if (!(flag & FLAG_DSACKING_ACK)) {
3349 tcp_init_cwnd_reduction(sk, true); 3348 tcp_init_cwnd_reduction(sk);
3350 tcp_set_ca_state(sk, TCP_CA_CWR); 3349 tcp_set_ca_state(sk, TCP_CA_CWR);
3351 tcp_end_cwnd_reduction(sk); 3350 tcp_end_cwnd_reduction(sk);
3352 tcp_try_keep_open(sk); 3351 tcp_try_keep_open(sk);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index bcee13c4627c..306dd5dead7d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -979,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
979 if (likely(err <= 0)) 979 if (likely(err <= 0))
980 return err; 980 return err;
981 981
982 tcp_enter_cwr(sk, 1); 982 tcp_enter_cwr(sk);
983 983
984 return net_xmit_eval(err); 984 return net_xmit_eval(err);
985} 985}