diff options
author | Florian Westphal <fw@strlen.de> | 2016-11-21 08:18:38 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-11-21 13:20:17 -0500 |
commit | e97991832a4ea4a5f47d65f068a4c966a2eb5730 (patch) | |
tree | 870e5c09c37890d286872c9ae22382d6bf336b37 | |
parent | 85f7e7508a1d288b513493196ef406c6c06134e1 (diff) |
tcp: make undo_cwnd mandatory for congestion modules
The undo_cwnd fallback in the stack doubles cwnd based on ssthresh,
which un-does reno halving behaviour.
It seems more appropriate to let congctl algorithms pair .ssthresh
and .undo_cwnd properly. Add a 'tcp_reno_undo_cwnd' function and wire it
up for all congestion algorithms that used to rely on the fallback.
Cc: Eric Dumazet <edumazet@google.com>
Cc: Yuchung Cheng <ycheng@google.com>
Cc: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_cong.c | 14 | ||||
-rw-r--r-- | net/ipv4/tcp_dctcp.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_hybla.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 5 | ||||
-rw-r--r-- | net/ipv4/tcp_lp.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_vegas.c | 1 | ||||
-rw-r--r-- | net/ipv4/tcp_westwood.c | 1 |
8 files changed, 19 insertions, 6 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index 123979fe12bf..7de80739adab 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -958,6 +958,7 @@ u32 tcp_slow_start(struct tcp_sock *tp, u32 acked); | |||
958 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); | 958 | void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked); |
959 | 959 | ||
960 | u32 tcp_reno_ssthresh(struct sock *sk); | 960 | u32 tcp_reno_ssthresh(struct sock *sk); |
961 | u32 tcp_reno_undo_cwnd(struct sock *sk); | ||
961 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); | 962 | void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked); |
962 | extern struct tcp_congestion_ops tcp_reno; | 963 | extern struct tcp_congestion_ops tcp_reno; |
963 | 964 | ||
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 1294af4e0127..38905ec5f508 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -68,8 +68,9 @@ int tcp_register_congestion_control(struct tcp_congestion_ops *ca) | |||
68 | { | 68 | { |
69 | int ret = 0; | 69 | int ret = 0; |
70 | 70 | ||
71 | /* all algorithms must implement ssthresh and cong_avoid ops */ | 71 | /* all algorithms must implement these */ |
72 | if (!ca->ssthresh || !(ca->cong_avoid || ca->cong_control)) { | 72 | if (!ca->ssthresh || !ca->undo_cwnd || |
73 | !(ca->cong_avoid || ca->cong_control)) { | ||
73 | pr_err("%s does not implement required ops\n", ca->name); | 74 | pr_err("%s does not implement required ops\n", ca->name); |
74 | return -EINVAL; | 75 | return -EINVAL; |
75 | } | 76 | } |
@@ -441,10 +442,19 @@ u32 tcp_reno_ssthresh(struct sock *sk) | |||
441 | } | 442 | } |
442 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); | 443 | EXPORT_SYMBOL_GPL(tcp_reno_ssthresh); |
443 | 444 | ||
445 | u32 tcp_reno_undo_cwnd(struct sock *sk) | ||
446 | { | ||
447 | const struct tcp_sock *tp = tcp_sk(sk); | ||
448 | |||
449 | return max(tp->snd_cwnd, tp->snd_ssthresh << 1); | ||
450 | } | ||
451 | EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd); | ||
452 | |||
444 | struct tcp_congestion_ops tcp_reno = { | 453 | struct tcp_congestion_ops tcp_reno = { |
445 | .flags = TCP_CONG_NON_RESTRICTED, | 454 | .flags = TCP_CONG_NON_RESTRICTED, |
446 | .name = "reno", | 455 | .name = "reno", |
447 | .owner = THIS_MODULE, | 456 | .owner = THIS_MODULE, |
448 | .ssthresh = tcp_reno_ssthresh, | 457 | .ssthresh = tcp_reno_ssthresh, |
449 | .cong_avoid = tcp_reno_cong_avoid, | 458 | .cong_avoid = tcp_reno_cong_avoid, |
459 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
450 | }; | 460 | }; |
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c index 51139175bf61..bde22ebb92a8 100644 --- a/net/ipv4/tcp_dctcp.c +++ b/net/ipv4/tcp_dctcp.c | |||
@@ -342,6 +342,7 @@ static struct tcp_congestion_ops dctcp __read_mostly = { | |||
342 | static struct tcp_congestion_ops dctcp_reno __read_mostly = { | 342 | static struct tcp_congestion_ops dctcp_reno __read_mostly = { |
343 | .ssthresh = tcp_reno_ssthresh, | 343 | .ssthresh = tcp_reno_ssthresh, |
344 | .cong_avoid = tcp_reno_cong_avoid, | 344 | .cong_avoid = tcp_reno_cong_avoid, |
345 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
345 | .get_info = dctcp_get_info, | 346 | .get_info = dctcp_get_info, |
346 | .owner = THIS_MODULE, | 347 | .owner = THIS_MODULE, |
347 | .name = "dctcp-reno", | 348 | .name = "dctcp-reno", |
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c index 083831e359df..0f7175c3338e 100644 --- a/net/ipv4/tcp_hybla.c +++ b/net/ipv4/tcp_hybla.c | |||
@@ -166,6 +166,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked) | |||
166 | static struct tcp_congestion_ops tcp_hybla __read_mostly = { | 166 | static struct tcp_congestion_ops tcp_hybla __read_mostly = { |
167 | .init = hybla_init, | 167 | .init = hybla_init, |
168 | .ssthresh = tcp_reno_ssthresh, | 168 | .ssthresh = tcp_reno_ssthresh, |
169 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
169 | .cong_avoid = hybla_cong_avoid, | 170 | .cong_avoid = hybla_cong_avoid, |
170 | .set_state = hybla_state, | 171 | .set_state = hybla_state, |
171 | 172 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index a70046fea0e8..22e6a2097ff6 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2394,10 +2394,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) | |||
2394 | if (tp->prior_ssthresh) { | 2394 | if (tp->prior_ssthresh) { |
2395 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2395 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2396 | 2396 | ||
2397 | if (icsk->icsk_ca_ops->undo_cwnd) | 2397 | tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); |
2398 | tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); | ||
2399 | else | ||
2400 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); | ||
2401 | 2398 | ||
2402 | if (tp->prior_ssthresh > tp->snd_ssthresh) { | 2399 | if (tp->prior_ssthresh > tp->snd_ssthresh) { |
2403 | tp->snd_ssthresh = tp->prior_ssthresh; | 2400 | tp->snd_ssthresh = tp->prior_ssthresh; |
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c index c67ece1390c2..046fd3910873 100644 --- a/net/ipv4/tcp_lp.c +++ b/net/ipv4/tcp_lp.c | |||
@@ -316,6 +316,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) | |||
316 | static struct tcp_congestion_ops tcp_lp __read_mostly = { | 316 | static struct tcp_congestion_ops tcp_lp __read_mostly = { |
317 | .init = tcp_lp_init, | 317 | .init = tcp_lp_init, |
318 | .ssthresh = tcp_reno_ssthresh, | 318 | .ssthresh = tcp_reno_ssthresh, |
319 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
319 | .cong_avoid = tcp_lp_cong_avoid, | 320 | .cong_avoid = tcp_lp_cong_avoid, |
320 | .pkts_acked = tcp_lp_pkts_acked, | 321 | .pkts_acked = tcp_lp_pkts_acked, |
321 | 322 | ||
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c index 4c4bac1b5eab..218cfcc77650 100644 --- a/net/ipv4/tcp_vegas.c +++ b/net/ipv4/tcp_vegas.c | |||
@@ -307,6 +307,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_get_info); | |||
307 | static struct tcp_congestion_ops tcp_vegas __read_mostly = { | 307 | static struct tcp_congestion_ops tcp_vegas __read_mostly = { |
308 | .init = tcp_vegas_init, | 308 | .init = tcp_vegas_init, |
309 | .ssthresh = tcp_reno_ssthresh, | 309 | .ssthresh = tcp_reno_ssthresh, |
310 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
310 | .cong_avoid = tcp_vegas_cong_avoid, | 311 | .cong_avoid = tcp_vegas_cong_avoid, |
311 | .pkts_acked = tcp_vegas_pkts_acked, | 312 | .pkts_acked = tcp_vegas_pkts_acked, |
312 | .set_state = tcp_vegas_state, | 313 | .set_state = tcp_vegas_state, |
diff --git a/net/ipv4/tcp_westwood.c b/net/ipv4/tcp_westwood.c index 4b03a2e2a050..fed66dc0e0f5 100644 --- a/net/ipv4/tcp_westwood.c +++ b/net/ipv4/tcp_westwood.c | |||
@@ -278,6 +278,7 @@ static struct tcp_congestion_ops tcp_westwood __read_mostly = { | |||
278 | .init = tcp_westwood_init, | 278 | .init = tcp_westwood_init, |
279 | .ssthresh = tcp_reno_ssthresh, | 279 | .ssthresh = tcp_reno_ssthresh, |
280 | .cong_avoid = tcp_reno_cong_avoid, | 280 | .cong_avoid = tcp_reno_cong_avoid, |
281 | .undo_cwnd = tcp_reno_undo_cwnd, | ||
281 | .cwnd_event = tcp_westwood_event, | 282 | .cwnd_event = tcp_westwood_event, |
282 | .in_ack_event = tcp_westwood_ack, | 283 | .in_ack_event = tcp_westwood_ack, |
283 | .get_info = tcp_westwood_info, | 284 | .get_info = tcp_westwood_info, |