diff options
author | Stephen Hemminger <shemminger@osdl.org> | 2005-11-10 20:07:24 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-11-10 20:07:24 -0500 |
commit | 7faffa1c7fb9b8e8917e3225d4e2638270c0a48b (patch) | |
tree | 3c9f998639bc2374a86b1f2251ae421e625c1e9d | |
parent | 2d2abbab63f6726a147ae61ada39bf2c9ee0db9a (diff) |
[TCP]: add tcp_slow_start helper
Move all the code that does linear TCP slowstart to one
inline function to ease later patch to add ABC support.
Signed-off-by: Stephen Hemminger <shemminger@osdl.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_bic.c | 10 | ||||
-rw-r--r-- | net/ipv4/tcp_cong.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_highspeed.c | 7 | ||||
-rw-r--r-- | net/ipv4/tcp_htcp.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_scalable.c | 11 | ||||
-rw-r--r-- | net/ipv4/tcp_vegas.c | 42 |
7 files changed, 43 insertions, 59 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 15bdbc6bd571..54c399886275 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -765,6 +765,16 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
765 | (tp->snd_cwnd >> 2))); | 765 | (tp->snd_cwnd >> 2))); |
766 | } | 766 | } |
767 | 767 | ||
768 | /* | ||
769 | * Linear increase during slow start | ||
770 | */ | ||
771 | static inline void tcp_slow_start(struct tcp_sock *tp) | ||
772 | { | ||
773 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | ||
774 | tp->snd_cwnd++; | ||
775 | } | ||
776 | |||
777 | |||
768 | static inline void tcp_sync_left_out(struct tcp_sock *tp) | 778 | static inline void tcp_sync_left_out(struct tcp_sock *tp) |
769 | { | 779 | { |
770 | if (tp->rx_opt.sack_ok && | 780 | if (tp->rx_opt.sack_ok && |
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c index 5af99b3ef5d7..1d0cd86621b1 100644 --- a/net/ipv4/tcp_bic.c +++ b/net/ipv4/tcp_bic.c | |||
@@ -220,14 +220,12 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, | |||
220 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 220 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
221 | return; | 221 | return; |
222 | 222 | ||
223 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 223 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
224 | /* In "safe" area, increase. */ | 224 | tcp_slow_start(tp); |
225 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 225 | else { |
226 | tp->snd_cwnd++; | ||
227 | } else { | ||
228 | bictcp_update(ca, tp->snd_cwnd); | 226 | bictcp_update(ca, tp->snd_cwnd); |
229 | 227 | ||
230 | /* In dangerous area, increase slowly. | 228 | /* In dangerous area, increase slowly. |
231 | * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd | 229 | * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd |
232 | */ | 230 | */ |
233 | if (tp->snd_cwnd_cnt >= ca->cnt) { | 231 | if (tp->snd_cwnd_cnt >= ca->cnt) { |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 0705b496c6b3..6d3e883b48f6 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -189,12 +189,11 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight, | |||
189 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 189 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
190 | return; | 190 | return; |
191 | 191 | ||
192 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 192 | /* In "safe" area, increase. */ |
193 | /* In "safe" area, increase. */ | 193 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
194 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 194 | tcp_slow_start(tp); |
195 | tp->snd_cwnd++; | 195 | else { |
196 | } else { | 196 | /* In dangerous area, increase slowly. |
197 | /* In dangerous area, increase slowly. | ||
198 | * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd | 197 | * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd |
199 | */ | 198 | */ |
200 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 199 | if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { |
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c index 5e56ad368dd2..82b3c189bd7d 100644 --- a/net/ipv4/tcp_highspeed.c +++ b/net/ipv4/tcp_highspeed.c | |||
@@ -119,10 +119,9 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt, | |||
119 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 119 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
120 | return; | 120 | return; |
121 | 121 | ||
122 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 122 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
123 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 123 | tcp_slow_start(tp); |
124 | tp->snd_cwnd++; | 124 | else { |
125 | } else { | ||
126 | /* Update AIMD parameters */ | 125 | /* Update AIMD parameters */ |
127 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { | 126 | if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) { |
128 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && | 127 | while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd && |
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c index 404a326ba345..3284cfb993e6 100644 --- a/net/ipv4/tcp_htcp.c +++ b/net/ipv4/tcp_htcp.c | |||
@@ -210,11 +210,10 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
210 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 210 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
211 | return; | 211 | return; |
212 | 212 | ||
213 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 213 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
214 | /* In "safe" area, increase. */ | 214 | tcp_slow_start(tp); |
215 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) | 215 | else { |
216 | tp->snd_cwnd++; | 216 | |
217 | } else { | ||
218 | measure_rtt(sk); | 217 | measure_rtt(sk); |
219 | 218 | ||
220 | /* keep track of number of round-trip times since last backoff event */ | 219 | /* keep track of number of round-trip times since last backoff event */ |
@@ -224,7 +223,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
224 | htcp_alpha_update(ca); | 223 | htcp_alpha_update(ca); |
225 | } | 224 | } |
226 | 225 | ||
227 | /* In dangerous area, increase slowly. | 226 | /* In dangerous area, increase slowly. |
228 | * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd | 227 | * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd |
229 | */ | 228 | */ |
230 | if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { | 229 | if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) { |
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c index a2fd25617d24..26d7486ee501 100644 --- a/net/ipv4/tcp_scalable.c +++ b/net/ipv4/tcp_scalable.c | |||
@@ -24,17 +24,16 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
24 | if (!tcp_is_cwnd_limited(sk, in_flight)) | 24 | if (!tcp_is_cwnd_limited(sk, in_flight)) |
25 | return; | 25 | return; |
26 | 26 | ||
27 | if (tp->snd_cwnd <= tp->snd_ssthresh) { | 27 | if (tp->snd_cwnd <= tp->snd_ssthresh) |
28 | tp->snd_cwnd++; | 28 | tcp_slow_start(tp); |
29 | } else { | 29 | else { |
30 | tp->snd_cwnd_cnt++; | 30 | tp->snd_cwnd_cnt++; |
31 | if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ | 31 | if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){ |
32 | tp->snd_cwnd++; | 32 | if (tp->snd_cwnd < tp->snd_cwnd_clamp) |
33 | tp->snd_cwnd++; | ||
33 | tp->snd_cwnd_cnt = 0; | 34 | tp->snd_cwnd_cnt = 0; |
34 | } | 35 | } |
35 | } | 36 | } |
36 | tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); | ||
37 | tp->snd_cwnd_stamp = tcp_time_stamp; | ||
38 | } | 37 | } |
39 | 38 | ||
40 | static u32 tcp_scalable_ssthresh(struct sock *sk) | 39 | static u32 tcp_scalable_ssthresh(struct sock *sk) |
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 93c5f92070f9..4376814d29fb 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -236,8 +236,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
236 | /* We don't have enough RTT samples to do the Vegas | 236 | /* We don't have enough RTT samples to do the Vegas |
237 | * calculation, so we'll behave like Reno. | 237 | * calculation, so we'll behave like Reno. |
238 | */ | 238 | */ |
239 | if (tp->snd_cwnd > tp->snd_ssthresh) | 239 | tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, cnt); |
240 | tp->snd_cwnd++; | ||
241 | } else { | 240 | } else { |
242 | u32 rtt, target_cwnd, diff; | 241 | u32 rtt, target_cwnd, diff; |
243 | 242 | ||
@@ -275,7 +274,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
275 | */ | 274 | */ |
276 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; | 275 | diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd; |
277 | 276 | ||
278 | if (tp->snd_cwnd < tp->snd_ssthresh) { | 277 | if (tp->snd_cwnd <= tp->snd_ssthresh) { |
279 | /* Slow start. */ | 278 | /* Slow start. */ |
280 | if (diff > gamma) { | 279 | if (diff > gamma) { |
281 | /* Going too fast. Time to slow down | 280 | /* Going too fast. Time to slow down |
@@ -295,6 +294,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
295 | V_PARAM_SHIFT)+1); | 294 | V_PARAM_SHIFT)+1); |
296 | 295 | ||
297 | } | 296 | } |
297 | tcp_slow_start(tp); | ||
298 | } else { | 298 | } else { |
299 | /* Congestion avoidance. */ | 299 | /* Congestion avoidance. */ |
300 | u32 next_snd_cwnd; | 300 | u32 next_snd_cwnd; |
@@ -327,37 +327,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, | |||
327 | else if (next_snd_cwnd < tp->snd_cwnd) | 327 | else if (next_snd_cwnd < tp->snd_cwnd) |
328 | tp->snd_cwnd--; | 328 | tp->snd_cwnd--; |
329 | } | 329 | } |
330 | } | ||
331 | 330 | ||
332 | /* Wipe the slate clean for the next RTT. */ | 331 | if (tp->snd_cwnd < 2) |
333 | vegas->cntRTT = 0; | 332 | tp->snd_cwnd = 2; |
334 | vegas->minRTT = 0x7fffffff; | 333 | else if (tp->snd_cwnd > tp->snd_cwnd_clamp) |
334 | tp->snd_cwnd = tp->snd_cwnd_clamp; | ||
335 | } | ||
335 | } | 336 | } |
336 | 337 | ||
337 | /* The following code is executed for every ack we receive, | 338 | /* Wipe the slate clean for the next RTT. */ |
338 | * except for conditions checked in should_advance_cwnd() | 339 | vegas->cntRTT = 0; |
339 | * before the call to tcp_cong_avoid(). Mainly this means that | 340 | vegas->minRTT = 0x7fffffff; |
340 | * we only execute this code if the ack actually acked some | ||
341 | * data. | ||
342 | */ | ||
343 | |||
344 | /* If we are in slow start, increase our cwnd in response to this ACK. | ||
345 | * (If we are not in slow start then we are in congestion avoidance, | ||
346 | * and adjust our congestion window only once per RTT. See the code | ||
347 | * above.) | ||
348 | */ | ||
349 | if (tp->snd_cwnd <= tp->snd_ssthresh) | ||
350 | tp->snd_cwnd++; | ||
351 | |||
352 | /* to keep cwnd from growing without bound */ | ||
353 | tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp); | ||
354 | |||
355 | /* Make sure that we are never so timid as to reduce our cwnd below | ||
356 | * 2 MSS. | ||
357 | * | ||
358 | * Going below 2 MSS would risk huge delayed ACKs from our receiver. | ||
359 | */ | ||
360 | tp->snd_cwnd = max(tp->snd_cwnd, 2U); | ||
361 | } | 341 | } |
362 | 342 | ||
363 | /* Extract info for Tcp socket info provided via netlink. */ | 343 | /* Extract info for Tcp socket info provided via netlink. */ |