aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2009-02-27 23:44:37 -0500
committerDavid S. Miller <davem@davemloft.net>2009-03-02 06:00:15 -0500
commit758ce5c8d11d6fc57fe5f1dbc237aa8ff6386eac (patch)
tree45b3dff5ca179712d50cdeae45846db8ffd51caa /net
parent571a5dd8d01f2a7e279c502fa220a69262d73694 (diff)
tcp: add helper for AI algorithm
It seems that implementation in yeah was inconsistent to what other did as it would increase cwnd one ack earlier than the others do. Size benefits: bictcp_cong_avoid | -36 tcp_cong_avoid_ai | +52 bictcp_cong_avoid | -34 tcp_scalable_cong_avoid | -36 tcp_veno_cong_avoid | -12 tcp_yeah_cong_avoid | -38 = -104 bytes total Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_bic.c11
-rw-r--r--net/ipv4/tcp_cong.c21
-rw-r--r--net/ipv4/tcp_cubic.c11
-rw-r--r--net/ipv4/tcp_scalable.c10
-rw-r--r--net/ipv4/tcp_veno.c7
-rw-r--r--net/ipv4/tcp_yeah.c9
6 files changed, 20 insertions, 49 deletions
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 7eb7636db0d..3b53fd1af23 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -149,16 +149,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
149 tcp_slow_start(tp); 149 tcp_slow_start(tp);
150 else { 150 else {
151 bictcp_update(ca, tp->snd_cwnd); 151 bictcp_update(ca, tp->snd_cwnd);
152 152 tcp_cong_avoid_ai(tp, ca->cnt);
153 /* In dangerous area, increase slowly.
154 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
155 */
156 if (tp->snd_cwnd_cnt >= ca->cnt) {
157 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
158 tp->snd_cwnd++;
159 tp->snd_cwnd_cnt = 0;
160 } else
161 tp->snd_cwnd_cnt++;
162 } 153 }
163 154
164} 155}
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 4ec5b4e97c4..e92beb9e55e 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -336,6 +336,19 @@ void tcp_slow_start(struct tcp_sock *tp)
336} 336}
337EXPORT_SYMBOL_GPL(tcp_slow_start); 337EXPORT_SYMBOL_GPL(tcp_slow_start);
338 338
339/* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd (or alternative w) */
340void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w)
341{
342 if (tp->snd_cwnd_cnt >= w) {
343 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
344 tp->snd_cwnd++;
345 tp->snd_cwnd_cnt = 0;
346 } else {
347 tp->snd_cwnd_cnt++;
348 }
349}
350EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
351
339/* 352/*
340 * TCP Reno congestion control 353 * TCP Reno congestion control
341 * This is special case used for fallback as well. 354 * This is special case used for fallback as well.
@@ -365,13 +378,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
365 tp->snd_cwnd++; 378 tp->snd_cwnd++;
366 } 379 }
367 } else { 380 } else {
368 /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */ 381 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
369 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
370 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
371 tp->snd_cwnd++;
372 tp->snd_cwnd_cnt = 0;
373 } else
374 tp->snd_cwnd_cnt++;
375 } 382 }
376} 383}
377EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid); 384EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index ee467ec40c4..71d5f2f29fa 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -294,16 +294,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
294 tcp_slow_start(tp); 294 tcp_slow_start(tp);
295 } else { 295 } else {
296 bictcp_update(ca, tp->snd_cwnd); 296 bictcp_update(ca, tp->snd_cwnd);
297 297 tcp_cong_avoid_ai(tp, ca->cnt);
298 /* In dangerous area, increase slowly.
299 * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
300 */
301 if (tp->snd_cwnd_cnt >= ca->cnt) {
302 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
303 tp->snd_cwnd++;
304 tp->snd_cwnd_cnt = 0;
305 } else
306 tp->snd_cwnd_cnt++;
307 } 298 }
308 299
309} 300}
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 4660b088a8c..a76513779e2 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -24,14 +24,8 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
24 24
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp); 26 tcp_slow_start(tp);
27 else { 27 else
28 tp->snd_cwnd_cnt++; 28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
29 if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
30 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
31 tp->snd_cwnd++;
32 tp->snd_cwnd_cnt = 0;
33 }
34 }
35} 29}
36 30
37static u32 tcp_scalable_ssthresh(struct sock *sk) 31static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index d08b2e855c2..e9bbff74648 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -159,12 +159,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
159 /* In the "non-congestive state", increase cwnd 159 /* In the "non-congestive state", increase cwnd
160 * every rtt. 160 * every rtt.
161 */ 161 */
162 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) { 162 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
163 if (tp->snd_cwnd < tp->snd_cwnd_clamp)
164 tp->snd_cwnd++;
165 tp->snd_cwnd_cnt = 0;
166 } else
167 tp->snd_cwnd_cnt++;
168 } else { 163 } else {
169 /* In the "congestive state", increase cwnd 164 /* In the "congestive state", increase cwnd
170 * every other rtt. 165 * every other rtt.
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 9ec843a9bbb..66b6821b984 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -94,14 +94,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
94 94
95 } else { 95 } else {
96 /* Reno */ 96 /* Reno */
97 97 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
98 if (tp->snd_cwnd_cnt < tp->snd_cwnd)
99 tp->snd_cwnd_cnt++;
100
101 if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
102 tp->snd_cwnd++;
103 tp->snd_cwnd_cnt = 0;
104 }
105 } 98 }
106 99
107 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt. 100 /* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.