aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeal Cardwell <ncardwell@google.com>2012-01-18 12:47:59 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-20 14:17:26 -0500
commit5a45f0086a2dcf50db7e6a0bf5be933880f85127 (patch)
treef67d6ddb68c276bcf34077e7442b0ca722144c75 /net
parentfc16dcd8c2e1e9bc91ed765957e1f2bbf334253e (diff)
tcp: fix undo after RTO for CUBIC
This patch fixes CUBIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell <ncardwell@google.com> Acked-by: Stephen Hemminger <shemminger@vyatta.com> Acked-by: Sangtae Ha <sangtae.ha@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_cubic.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index f376b05cca81..a9077f441cb2 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca)
107{ 107{
108 ca->cnt = 0; 108 ca->cnt = 0;
109 ca->last_max_cwnd = 0; 109 ca->last_max_cwnd = 0;
110 ca->loss_cwnd = 0;
111 ca->last_cwnd = 0; 110 ca->last_cwnd = 0;
112 ca->last_time = 0; 111 ca->last_time = 0;
113 ca->bic_origin_point = 0; 112 ca->bic_origin_point = 0;
@@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk)
142 141
143static void bictcp_init(struct sock *sk) 142static void bictcp_init(struct sock *sk)
144{ 143{
145 bictcp_reset(inet_csk_ca(sk)); 144 struct bictcp *ca = inet_csk_ca(sk);
145
146 bictcp_reset(ca);
147 ca->loss_cwnd = 0;
146 148
147 if (hystart) 149 if (hystart)
148 bictcp_hystart_reset(sk); 150 bictcp_hystart_reset(sk);
@@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
275 * The initial growth of cubic function may be too conservative 277 * The initial growth of cubic function may be too conservative
276 * when the available bandwidth is still unknown. 278 * when the available bandwidth is still unknown.
277 */ 279 */
278 if (ca->loss_cwnd == 0 && ca->cnt > 20) 280 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
279 ca->cnt = 20; /* increase cwnd 5% per RTT */ 281 ca->cnt = 20; /* increase cwnd 5% per RTT */
280 282
281 /* TCP Friendly */ 283 /* TCP Friendly */
@@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
342{ 344{
343 struct bictcp *ca = inet_csk_ca(sk); 345 struct bictcp *ca = inet_csk_ca(sk);
344 346
345 return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd); 347 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
346} 348}
347 349
348static void bictcp_state(struct sock *sk, u8 new_state) 350static void bictcp_state(struct sock *sk, u8 new_state)