aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Hemminger <shemminger@osdl.org>2006-06-05 20:30:08 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2006-06-18 00:29:29 -0400
commit72dc5b9225c53310c010b68a70ea97c8c8e24bdf (patch)
treeebd23e7cbe9846414b6fa8f8327f37043447e019
parenta4ed25849532728effaa0665c92e08e029e41407 (diff)
[TCP]: Minimum congestion window consolidation.
Many of the TCP congestion methods all just use ssthresh as the minimum congestion window on decrease. Rather than duplicating the code, just have that be the default if that handle in the ops structure is not set. Minor behaviour change to TCP compound. It probably wants to use this (ssthresh) as lower bound, rather than ssthresh/2 because the latter causes undershoot on loss. Signed-off-by: Stephen Hemminger <shemminger@osdl.org> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/tcp.h4
-rw-r--r--net/ipv4/tcp_bic.c7
-rw-r--r--net/ipv4/tcp_compound.c1
-rw-r--r--net/ipv4/tcp_cong.c6
-rw-r--r--net/ipv4/tcp_cubic.c6
-rw-r--r--net/ipv4/tcp_htcp.c9
-rw-r--r--net/ipv4/tcp_input.c13
-rw-r--r--net/ipv4/tcp_veno.c7
-rw-r--r--net/ipv4/tcp_westwood.c18
9 files changed, 23 insertions, 48 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f1f472746e6c..de88c5472bfc 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -632,7 +632,7 @@ struct tcp_congestion_ops {
632 /* return slow start threshold (required) */ 632 /* return slow start threshold (required) */
633 u32 (*ssthresh)(struct sock *sk); 633 u32 (*ssthresh)(struct sock *sk);
634 /* lower bound for congestion window (optional) */ 634 /* lower bound for congestion window (optional) */
635 u32 (*min_cwnd)(struct sock *sk); 635 u32 (*min_cwnd)(const struct sock *sk);
636 /* do new cwnd calculation (required) */ 636 /* do new cwnd calculation (required) */
637 void (*cong_avoid)(struct sock *sk, u32 ack, 637 void (*cong_avoid)(struct sock *sk, u32 ack,
638 u32 rtt, u32 in_flight, int good_ack); 638 u32 rtt, u32 in_flight, int good_ack);
@@ -667,7 +667,7 @@ extern struct tcp_congestion_ops tcp_init_congestion_ops;
667extern u32 tcp_reno_ssthresh(struct sock *sk); 667extern u32 tcp_reno_ssthresh(struct sock *sk);
668extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack, 668extern void tcp_reno_cong_avoid(struct sock *sk, u32 ack,
669 u32 rtt, u32 in_flight, int flag); 669 u32 rtt, u32 in_flight, int flag);
670extern u32 tcp_reno_min_cwnd(struct sock *sk); 670extern u32 tcp_reno_min_cwnd(const struct sock *sk);
671extern struct tcp_congestion_ops tcp_reno; 671extern struct tcp_congestion_ops tcp_reno;
672 672
673static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 673static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 035f2092d73a..b2d9021ad22b 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -198,12 +198,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
198 return max(tp->snd_cwnd, ca->last_max_cwnd); 198 return max(tp->snd_cwnd, ca->last_max_cwnd);
199} 199}
200 200
201static u32 bictcp_min_cwnd(struct sock *sk)
202{
203 const struct tcp_sock *tp = tcp_sk(sk);
204 return tp->snd_ssthresh;
205}
206
207static void bictcp_state(struct sock *sk, u8 new_state) 201static void bictcp_state(struct sock *sk, u8 new_state)
208{ 202{
209 if (new_state == TCP_CA_Loss) 203 if (new_state == TCP_CA_Loss)
@@ -231,7 +225,6 @@ static struct tcp_congestion_ops bictcp = {
231 .cong_avoid = bictcp_cong_avoid, 225 .cong_avoid = bictcp_cong_avoid,
232 .set_state = bictcp_state, 226 .set_state = bictcp_state,
233 .undo_cwnd = bictcp_undo_cwnd, 227 .undo_cwnd = bictcp_undo_cwnd,
234 .min_cwnd = bictcp_min_cwnd,
235 .pkts_acked = bictcp_acked, 228 .pkts_acked = bictcp_acked,
236 .owner = THIS_MODULE, 229 .owner = THIS_MODULE,
237 .name = "bic", 230 .name = "bic",
diff --git a/net/ipv4/tcp_compound.c b/net/ipv4/tcp_compound.c
index ec68cb8081c1..bc54f7e9aea9 100644
--- a/net/ipv4/tcp_compound.c
+++ b/net/ipv4/tcp_compound.c
@@ -419,7 +419,6 @@ static struct tcp_congestion_ops tcp_compound = {
419 .init = tcp_compound_init, 419 .init = tcp_compound_init,
420 .ssthresh = tcp_reno_ssthresh, 420 .ssthresh = tcp_reno_ssthresh,
421 .cong_avoid = tcp_compound_cong_avoid, 421 .cong_avoid = tcp_compound_cong_avoid,
422 .min_cwnd = tcp_reno_min_cwnd,
423 .rtt_sample = tcp_compound_rtt_calc, 422 .rtt_sample = tcp_compound_rtt_calc,
424 .set_state = tcp_compound_state, 423 .set_state = tcp_compound_state,
425 .cwnd_event = tcp_compound_cwnd_event, 424 .cwnd_event = tcp_compound_cwnd_event,
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 91c2f41c7f58..857eefc52aab 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -38,7 +38,7 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
38 int ret = 0; 38 int ret = 0;
39 39
40 /* all algorithms must implement ssthresh and cong_avoid ops */ 40 /* all algorithms must implement ssthresh and cong_avoid ops */
41 if (!ca->ssthresh || !ca->cong_avoid || !ca->min_cwnd) { 41 if (!ca->ssthresh || !ca->cong_avoid) {
42 printk(KERN_ERR "TCP %s does not implement required ops\n", 42 printk(KERN_ERR "TCP %s does not implement required ops\n",
43 ca->name); 43 ca->name);
44 return -EINVAL; 44 return -EINVAL;
@@ -251,8 +251,8 @@ u32 tcp_reno_ssthresh(struct sock *sk)
251} 251}
252EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); 252EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
253 253
254/* Lower bound on congestion window. */ 254/* Lower bound on congestion window with halving. */
255u32 tcp_reno_min_cwnd(struct sock *sk) 255u32 tcp_reno_min_cwnd(const struct sock *sk)
256{ 256{
257 const struct tcp_sock *tp = tcp_sk(sk); 257 const struct tcp_sock *tp = tcp_sk(sk);
258 return tp->snd_ssthresh/2; 258 return tp->snd_ssthresh/2;
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 31a4986dfbf7..78b7a6b9e4de 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -325,11 +325,6 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
325 return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); 325 return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd);
326} 326}
327 327
328static u32 bictcp_min_cwnd(struct sock *sk)
329{
330 return tcp_sk(sk)->snd_ssthresh;
331}
332
333static void bictcp_state(struct sock *sk, u8 new_state) 328static void bictcp_state(struct sock *sk, u8 new_state)
334{ 329{
335 if (new_state == TCP_CA_Loss) 330 if (new_state == TCP_CA_Loss)
@@ -357,7 +352,6 @@ static struct tcp_congestion_ops cubictcp = {
357 .cong_avoid = bictcp_cong_avoid, 352 .cong_avoid = bictcp_cong_avoid,
358 .set_state = bictcp_state, 353 .set_state = bictcp_state,
359 .undo_cwnd = bictcp_undo_cwnd, 354 .undo_cwnd = bictcp_undo_cwnd,
360 .min_cwnd = bictcp_min_cwnd,
361 .pkts_acked = bictcp_acked, 355 .pkts_acked = bictcp_acked,
362 .owner = THIS_MODULE, 356 .owner = THIS_MODULE,
363 .name = "cubic", 357 .name = "cubic",
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 1b2ff53f98ed..3d92c1859267 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -246,14 +246,6 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
246 } 246 }
247} 247}
248 248
249/* Lower bound on congestion window. */
250static u32 htcp_min_cwnd(struct sock *sk)
251{
252 const struct tcp_sock *tp = tcp_sk(sk);
253 return tp->snd_ssthresh;
254}
255
256
257static void htcp_init(struct sock *sk) 249static void htcp_init(struct sock *sk)
258{ 250{
259 struct htcp *ca = inet_csk_ca(sk); 251 struct htcp *ca = inet_csk_ca(sk);
@@ -285,7 +277,6 @@ static void htcp_state(struct sock *sk, u8 new_state)
285static struct tcp_congestion_ops htcp = { 277static struct tcp_congestion_ops htcp = {
286 .init = htcp_init, 278 .init = htcp_init,
287 .ssthresh = htcp_recalc_ssthresh, 279 .ssthresh = htcp_recalc_ssthresh,
288 .min_cwnd = htcp_min_cwnd,
289 .cong_avoid = htcp_cong_avoid, 280 .cong_avoid = htcp_cong_avoid,
290 .set_state = htcp_state, 281 .set_state = htcp_state,
291 .undo_cwnd = htcp_cwnd_undo, 282 .undo_cwnd = htcp_cwnd_undo,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6d167889a4b0..e08245bdda3a 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1689,17 +1689,26 @@ static inline void tcp_moderate_cwnd(struct tcp_sock *tp)
1689 tp->snd_cwnd_stamp = tcp_time_stamp; 1689 tp->snd_cwnd_stamp = tcp_time_stamp;
1690} 1690}
1691 1691
1692/* Lower bound on congestion window is slow start threshold
1693 * unless congestion avoidance choice decides to overide it.
1694 */
1695static inline u32 tcp_cwnd_min(const struct sock *sk)
1696{
1697 const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
1698
1699 return ca_ops->min_cwnd ? ca_ops->min_cwnd(sk) : tcp_sk(sk)->snd_ssthresh;
1700}
1701
1692/* Decrease cwnd each second ack. */ 1702/* Decrease cwnd each second ack. */
1693static void tcp_cwnd_down(struct sock *sk) 1703static void tcp_cwnd_down(struct sock *sk)
1694{ 1704{
1695 const struct inet_connection_sock *icsk = inet_csk(sk);
1696 struct tcp_sock *tp = tcp_sk(sk); 1705 struct tcp_sock *tp = tcp_sk(sk);
1697 int decr = tp->snd_cwnd_cnt + 1; 1706 int decr = tp->snd_cwnd_cnt + 1;
1698 1707
1699 tp->snd_cwnd_cnt = decr&1; 1708 tp->snd_cwnd_cnt = decr&1;
1700 decr >>= 1; 1709 decr >>= 1;
1701 1710
1702 if (decr && tp->snd_cwnd > icsk->icsk_ca_ops->min_cwnd(sk)) 1711 if (decr && tp->snd_cwnd > tcp_cwnd_min(sk))
1703 tp->snd_cwnd -= decr; 1712 tp->snd_cwnd -= decr;
1704 1713
1705 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1); 1714 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 1091671751c4..11b42a7135c1 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -199,17 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
199 return max(tp->snd_cwnd >> 1U, 2U); 199 return max(tp->snd_cwnd >> 1U, 2U);
200} 200}
201 201
202static u32 tcp_veno_min_cwnd(struct sock * sk)
203{
204 const struct tcp_sock *tp = tcp_sk(sk);
205 return tp->snd_ssthresh;
206}
207
208static struct tcp_congestion_ops tcp_veno = { 202static struct tcp_congestion_ops tcp_veno = {
209 .init = tcp_veno_init, 203 .init = tcp_veno_init,
210 .ssthresh = tcp_veno_ssthresh, 204 .ssthresh = tcp_veno_ssthresh,
211 .cong_avoid = tcp_veno_cong_avoid, 205 .cong_avoid = tcp_veno_cong_avoid,
212 .min_cwnd = tcp_veno_min_cwnd,
213 .rtt_sample = tcp_veno_rtt_calc, 206 .rtt_sample = tcp_veno_rtt_calc,
214 .set_state = tcp_veno_state, 207 .set_state = tcp_veno_state,
215 .cwnd_event = tcp_veno_cwnd_event, 208 .cwnd_event = tcp_veno_cwnd_event,
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c
index 0c340c3756c2..29eb258b6d82 100644
--- a/net/ipv4/tcp_westwood.c
+++ b/net/ipv4/tcp_westwood.c
@@ -162,12 +162,6 @@ static inline u32 westwood_acked_count(struct sock *sk)
162 return w->cumul_ack; 162 return w->cumul_ack;
163} 163}
164 164
165static inline u32 westwood_bw_rttmin(const struct sock *sk)
166{
167 const struct tcp_sock *tp = tcp_sk(sk);
168 const struct westwood *w = inet_csk_ca(sk);
169 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
170}
171 165
172/* 166/*
173 * TCP Westwood 167 * TCP Westwood
@@ -175,9 +169,11 @@ static inline u32 westwood_bw_rttmin(const struct sock *sk)
175 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2 169 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
176 * so avoids ever returning 0. 170 * so avoids ever returning 0.
177 */ 171 */
178static u32 tcp_westwood_cwnd_min(struct sock *sk) 172static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
179{ 173{
180 return westwood_bw_rttmin(sk); 174 const struct tcp_sock *tp = tcp_sk(sk);
175 const struct westwood *w = inet_csk_ca(sk);
176 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
181} 177}
182 178
183static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event) 179static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
@@ -191,11 +187,11 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
191 break; 187 break;
192 188
193 case CA_EVENT_COMPLETE_CWR: 189 case CA_EVENT_COMPLETE_CWR:
194 tp->snd_cwnd = tp->snd_ssthresh = westwood_bw_rttmin(sk); 190 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
195 break; 191 break;
196 192
197 case CA_EVENT_FRTO: 193 case CA_EVENT_FRTO:
198 tp->snd_ssthresh = westwood_bw_rttmin(sk); 194 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
199 break; 195 break;
200 196
201 case CA_EVENT_SLOW_ACK: 197 case CA_EVENT_SLOW_ACK:
@@ -235,7 +231,7 @@ static struct tcp_congestion_ops tcp_westwood = {
235 .init = tcp_westwood_init, 231 .init = tcp_westwood_init,
236 .ssthresh = tcp_reno_ssthresh, 232 .ssthresh = tcp_reno_ssthresh,
237 .cong_avoid = tcp_reno_cong_avoid, 233 .cong_avoid = tcp_reno_cong_avoid,
238 .min_cwnd = tcp_westwood_cwnd_min, 234 .min_cwnd = tcp_westwood_bw_rttmin,
239 .cwnd_event = tcp_westwood_event, 235 .cwnd_event = tcp_westwood_event,
240 .get_info = tcp_westwood_info, 236 .get_info = tcp_westwood_info,
241 .pkts_acked = tcp_westwood_pkts_acked, 237 .pkts_acked = tcp_westwood_pkts_acked,