aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_vegas.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-10-31 14:07:31 -0400
committerDavid S. Miller <davem@davemloft.net>2013-11-04 19:57:59 -0500
commit9f9843a751d0a2057f9f3d313886e7e5e6ebaac9 (patch)
treea89df5cc0c5f5280b2cfffba7f6933e4db20736f /net/ipv4/tcp_vegas.c
parent0d41cca490c274352211efac50e9598d39a9dc80 (diff)
tcp: properly handle stretch acks in slow start
Slow start now increases cwnd by 1 if an ACK acknowledges some packets, regardless the number of packets. Consequently slow start performance is highly dependent on the degree of the stretch ACKs caused by receiver or network ACK compression mechanisms (e.g., delayed-ACK, GRO, etc). But slow start algorithm is to send twice the amount of packets of packets left so it should process a stretch ACK of degree N as if N ACKs of degree 1, then exits when cwnd exceeds ssthresh. A follow up patch will use the remainder of the N (if greater than 1) to adjust cwnd in the congestion avoidance phase. In addition this patch retires the experimental limited slow start (LSS) feature. LSS has multiple drawbacks but questionable benefit. The fractional cwnd increase in LSS requires a loop in slow start even though it's rarely used. Configuring such an increase step via a global sysctl on different BDPS seems hard. Finally and most importantly the slow start overshoot concern is now better covered by the Hybrid slow start (hystart) enabled by default. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_vegas.c')
-rw-r--r--net/ipv4/tcp_vegas.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 80fa2bfd7ede..06cae62bf208 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,14 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164} 164}
165 165
166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
167 u32 in_flight)
167{ 168{
168 struct tcp_sock *tp = tcp_sk(sk); 169 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk); 170 struct vegas *vegas = inet_csk_ca(sk);
170 171
171 if (!vegas->doing_vegas_now) { 172 if (!vegas->doing_vegas_now) {
172 tcp_reno_cong_avoid(sk, ack, in_flight); 173 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
173 return; 174 return;
174 } 175 }
175 176
@@ -194,7 +195,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
194 /* We don't have enough RTT samples to do the Vegas 195 /* We don't have enough RTT samples to do the Vegas
195 * calculation, so we'll behave like Reno. 196 * calculation, so we'll behave like Reno.
196 */ 197 */
197 tcp_reno_cong_avoid(sk, ack, in_flight); 198 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
198 } else { 199 } else {
199 u32 rtt, diff; 200 u32 rtt, diff;
200 u64 target_cwnd; 201 u64 target_cwnd;
@@ -243,7 +244,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
243 244
244 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 245 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
245 /* Slow start. */ 246 /* Slow start. */
246 tcp_slow_start(tp); 247 tcp_slow_start(tp, acked);
247 } else { 248 } else {
248 /* Congestion avoidance. */ 249 /* Congestion avoidance. */
249 250
@@ -283,7 +284,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
283 } 284 }
284 /* Use normal slow start */ 285 /* Use normal slow start */
285 else if (tp->snd_cwnd <= tp->snd_ssthresh) 286 else if (tp->snd_cwnd <= tp->snd_ssthresh)
286 tcp_slow_start(tp); 287 tcp_slow_start(tp, acked);
287 288
288} 289}
289 290