summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2015-07-09 16:16:30 -0400
committerDavid S. Miller <davem@davemloft.net>2015-07-09 17:22:52 -0400
commit76174004a0f19785a328f40388e87e982bbf69b9 (patch)
tree113a691ab55819f20b367cdeb8ba86440aa62177
parent071d5080e33d6f24139e4213c2d9f97a2c21b602 (diff)
tcp: do not slow start when cwnd equals ssthresh
In the original design slow start is only used to raise cwnd when cwnd is stricly below ssthresh. It makes little sense to slow start when cwnd == ssthresh: especially when hystart has set ssthresh in the initial ramp, or after recovery when cwnd resets to ssthresh. Not doing so will also help reduce the buffer bloat slightly. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Nandita Dukkipati <nanditad@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h2
-rw-r--r--net/ipv4/tcp_cdg.c2
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/tcp_hybla.c2
4 files changed, 4 insertions, 6 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index dba22fc1b065..364426a2be5a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -991,7 +991,7 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
991 991
992static inline bool tcp_in_slow_start(const struct tcp_sock *tp) 992static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
993{ 993{
994 return tp->snd_cwnd <= tp->snd_ssthresh; 994 return tp->snd_cwnd < tp->snd_ssthresh;
995} 995}
996 996
997static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp) 997static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 8c6fd3d5e40f..167b6a3e1b98 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -264,7 +264,7 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
264 u32 prior_snd_cwnd; 264 u32 prior_snd_cwnd;
265 u32 incr; 265 u32 incr;
266 266
267 if (tp->snd_cwnd < tp->snd_ssthresh && hystart_detect) 267 if (tcp_in_slow_start(tp) && hystart_detect)
268 tcp_cdg_hystart_update(sk); 268 tcp_cdg_hystart_update(sk);
269 269
270 if (after(ack, ca->rtt_seq) && ca->rtt.v64) { 270 if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 654729a8cb23..a2ed23c595cf 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -365,10 +365,8 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
365 */ 365 */
366u32 tcp_slow_start(struct tcp_sock *tp, u32 acked) 366u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
367{ 367{
368 u32 cwnd = tp->snd_cwnd + acked; 368 u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
369 369
370 if (cwnd > tp->snd_ssthresh)
371 cwnd = tp->snd_ssthresh + 1;
372 acked -= cwnd - tp->snd_cwnd; 370 acked -= cwnd - tp->snd_cwnd;
373 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); 371 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
374 372
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index f963b274f2b0..083831e359df 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -112,7 +112,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
112 112
113 rho_fractions = ca->rho_3ls - (ca->rho << 3); 113 rho_fractions = ca->rho_3ls - (ca->rho << 3);
114 114
115 if (tp->snd_cwnd < tp->snd_ssthresh) { 115 if (tcp_in_slow_start(tp)) {
116 /* 116 /*
117 * slow start 117 * slow start
118 * INC = 2^RHO - 1 118 * INC = 2^RHO - 1