aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorNeal Cardwell <ncardwell@google.com>2012-01-18 12:47:58 -0500
committerDavid S. Miller <davem@davemloft.net>2012-01-20 14:17:26 -0500
commitfc16dcd8c2e1e9bc91ed765957e1f2bbf334253e (patch)
treec7174ce035611b3088e2da38003655cf26e8ce0b /net
parentb67f231ded332461dd31123c4f659c4681223fb1 (diff)
tcp: fix undo after RTO for BIC
This patch fixes BIC so that cwnd reductions made during RTOs can be undone (just as they already can be undone when using the default/Reno behavior). When undoing cwnd reductions, BIC-derived congestion control modules were restoring the cwnd from last_max_cwnd. There were two problems with using last_max_cwnd to restore a cwnd during undo: (a) last_max_cwnd was set to 0 on state transitions into TCP_CA_Loss (by calling the module's reset() functions), so cwnd reductions from RTOs could not be undone. (b) when fast_covergence is enabled (which it is by default) last_max_cwnd does not actually hold the value of snd_cwnd before the loss; instead, it holds a scaled-down version of snd_cwnd. This patch makes the following changes: (1) upon undo, revert snd_cwnd to ca->loss_cwnd, which is already, as the existing comment notes, the "congestion window at last loss" (2) stop forgetting ca->loss_cwnd on TCP_CA_Loss events (3) use ca->last_max_cwnd to check if we're in slow start Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_bic.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 6187eb4d1dcf..f45e1c242440 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca)
63{ 63{
64 ca->cnt = 0; 64 ca->cnt = 0;
65 ca->last_max_cwnd = 0; 65 ca->last_max_cwnd = 0;
66 ca->loss_cwnd = 0;
67 ca->last_cwnd = 0; 66 ca->last_cwnd = 0;
68 ca->last_time = 0; 67 ca->last_time = 0;
69 ca->epoch_start = 0; 68 ca->epoch_start = 0;
@@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca)
72 71
73static void bictcp_init(struct sock *sk) 72static void bictcp_init(struct sock *sk)
74{ 73{
75 bictcp_reset(inet_csk_ca(sk)); 74 struct bictcp *ca = inet_csk_ca(sk);
75
76 bictcp_reset(ca);
77 ca->loss_cwnd = 0;
78
76 if (initial_ssthresh) 79 if (initial_ssthresh)
77 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 80 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
78} 81}
@@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
127 } 130 }
128 131
129 /* if in slow start or link utilization is very low */ 132 /* if in slow start or link utilization is very low */
130 if (ca->loss_cwnd == 0) { 133 if (ca->last_max_cwnd == 0) {
131 if (ca->cnt > 20) /* increase cwnd 5% per RTT */ 134 if (ca->cnt > 20) /* increase cwnd 5% per RTT */
132 ca->cnt = 20; 135 ca->cnt = 20;
133 } 136 }
@@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
185{ 188{
186 const struct tcp_sock *tp = tcp_sk(sk); 189 const struct tcp_sock *tp = tcp_sk(sk);
187 const struct bictcp *ca = inet_csk_ca(sk); 190 const struct bictcp *ca = inet_csk_ca(sk);
188 return max(tp->snd_cwnd, ca->last_max_cwnd); 191 return max(tp->snd_cwnd, ca->loss_cwnd);
189} 192}
190 193
191static void bictcp_state(struct sock *sk, u8 new_state) 194static void bictcp_state(struct sock *sk, u8 new_state)