diff options
Diffstat (limited to 'net/ipv4/tcp_vegas.c')
| -rw-r--r-- | net/ipv4/tcp_vegas.c | 42 |
1 files changed, 11 insertions, 31 deletions
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 93c5f92070f9..4376814d29fb 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
| @@ -236,8 +236,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
| 236 | /* We don't have enough RTT samples to do the Vegas | 236 | /* We don't have enough RTT samples to do the Vegas |
| 237 | * calculation, so we'll behave like Reno. | 237 | * calculation, so we'll behave like Reno. |
| 238 | */ | 238 | */ |
| 239 | if (tp->snd_cwnd > tp->snd_ssthresh) | 239 | tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, cnt); |
| 240 | tp->snd_cwnd++; | ||
| 241 | } else { | 240 | } else { |
| 242 | u32 rtt, target_cwnd, diff; | 241 | u32 rtt, target_cwnd, diff; |
| 243 | 242 | ||
| @@ -275,7 +274,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
| 275 | */ | 274 | */ |
| 276 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; | 275 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; |
| 277 | 276 | ||
| 278 | if (tp->snd_cwnd < tp->snd_ssthresh) { | 277 | if (tp->snd_cwnd <= tp->snd_ssthresh) { |
| 279 | /* Slow start. */ | 278 | /* Slow start. */ |
| 280 | if (diff > gamma) { | 279 | if (diff > gamma) { |
| 281 | /* Going too fast. Time to slow down | 280 | /* Going too fast. Time to slow down |
| @@ -295,6 +294,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
| 295 | V_PARAM_SHIFT)+1); | 294 | V_PARAM_SHIFT)+1); |
| 296 | 295 | ||
| 297 | } | 296 | } |
| 297 | tcp_slow_start(tp); | ||
| 298 | } else { | 298 | } else { |
| 299 | /* Congestion avoidance. */ | 299 | /* Congestion avoidance. */ |
| 300 | u32 next_snd_cwnd; | 300 | u32 next_snd_cwnd; |
| @@ -327,37 +327,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
| 327 | else if (next_snd_cwnd < tp->snd_cwnd) | 327 | else if (next_snd_cwnd < tp->snd_cwnd) |
| 328 | tp->snd_cwnd--; | 328 | tp->snd_cwnd--; |
| 329 | } | 329 | } |
| 330 | } | ||
| 331 | 330 | ||
| 332 | /* Wipe the slate clean for the next RTT. */ | 331 | if (tp->snd_cwnd < 2) |
| 333 | vegas->cntRTT = 0; | 332 | tp->snd_cwnd = 2; |
| 334 | vegas->minRTT = 0x7fffffff; | 333 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) |
| 334 | tp->snd_cwnd = tp->snd_cwnd_clamp; | ||
| 335 | } | ||
| 335 | } | 336 | } |
| 336 | 337 | ||
| 337 | /* The following code is executed for every ack we receive, | 338 | /* Wipe the slate clean for the next RTT. */ |
| 338 | * except for conditions checked in should_advance_cwnd() | 339 | vegas->cntRTT = 0; |
| 339 | * before the call to tcp_cong_avoid(). Mainly this means that | 340 | vegas->minRTT = 0x7fffffff; |
| 340 | * we only execute this code if the ack actually acked some | ||
| 341 | * data. | ||
| 342 | */ | ||
| 343 | |||
| 344 | /* If we are in slow start, increase our cwnd in response to this ACK. | ||
| 345 | * (If we are not in slow start then we are in congestion avoidance, | ||
| 346 | * and adjust our congestion window only once per RTT. See the code | ||
| 347 | * above.) | ||
| 348 | */ | ||
| 349 | if (tp->snd_cwnd <= tp->snd_ssthresh) | ||
| 350 | tp->snd_cwnd++; | ||
| 351 | |||
| 352 | /* to keep cwnd from growing without bound */ | ||
| 353 | tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); | ||
| 354 | |||
| 355 | /* Make sure that we are never so timid as to reduce our cwnd below | ||
| 356 | * 2 MSS. | ||
| 357 | * | ||
| 358 | * Going below 2 MSS would risk huge delayed ACKs from our receiver. | ||
| 359 | */ | ||
| 360 | tp->snd_cwnd = max(tp->snd_cwnd, 2U); | ||
| 361 | } | 341 | } |
| 362 | 342 | ||
| 363 | /* Extract info for Tcp socket info provided via netlink. */ | 343 | /* Extract info for Tcp socket info provided via netlink. */ |
