summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2017-08-03 23:38:52 -0400
committerDavid S. Miller <davem@davemloft.net>2017-08-07 00:25:10 -0400
commitf1722a1be19dc38e0a4b282d4e6e6ec5e1b11a67 (patch)
tree5d8917911405f39aa7f8ba34d26ce9ce4e60afee
parent4faf783998b8cb88294e9df89032f473f8771b78 (diff)
tcp: consolidate congestion control undo functions
Most TCP congestion controls are using identical logic to undo cwnd except BBR. This patch consolidates these similar functions to the one used currently by Reno and others. Suggested-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--net/ipv4/tcp_bic.c14
-rw-r--r--net/ipv4/tcp_cdg.c12
-rw-r--r--net/ipv4/tcp_cubic.c13
-rw-r--r--net/ipv4/tcp_highspeed.c11
-rw-r--r--net/ipv4/tcp_illinois.c11
-rw-r--r--net/ipv4/tcp_nv.c13
-rw-r--r--net/ipv4/tcp_scalable.c16
-rw-r--r--net/ipv4/tcp_veno.c11
-rw-r--r--net/ipv4/tcp_yeah.c11
9 files changed, 9 insertions, 103 deletions
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 609965f0e298..fc3614377413 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -49,7 +49,6 @@ MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wma
49struct bictcp { 49struct bictcp {
50 u32 cnt; /* increase cwnd by 1 after ACKs */ 50 u32 cnt; /* increase cwnd by 1 after ACKs */
51 u32 last_max_cwnd; /* last maximum snd_cwnd */ 51 u32 last_max_cwnd; /* last maximum snd_cwnd */
52 u32 loss_cwnd; /* congestion window at last loss */
53 u32 last_cwnd; /* the last snd_cwnd */ 52 u32 last_cwnd; /* the last snd_cwnd */
54 u32 last_time; /* time when updated last_cwnd */ 53 u32 last_time; /* time when updated last_cwnd */
55 u32 epoch_start; /* beginning of an epoch */ 54 u32 epoch_start; /* beginning of an epoch */
@@ -72,7 +71,6 @@ static void bictcp_init(struct sock *sk)
72 struct bictcp *ca = inet_csk_ca(sk); 71 struct bictcp *ca = inet_csk_ca(sk);
73 72
74 bictcp_reset(ca); 73 bictcp_reset(ca);
75 ca->loss_cwnd = 0;
76 74
77 if (initial_ssthresh) 75 if (initial_ssthresh)
78 tcp_sk(sk)->snd_ssthresh = initial_ssthresh; 76 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
@@ -172,22 +170,12 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
172 else 170 else
173 ca->last_max_cwnd = tp->snd_cwnd; 171 ca->last_max_cwnd = tp->snd_cwnd;
174 172
175 ca->loss_cwnd = tp->snd_cwnd;
176
177 if (tp->snd_cwnd <= low_window) 173 if (tp->snd_cwnd <= low_window)
178 return max(tp->snd_cwnd >> 1U, 2U); 174 return max(tp->snd_cwnd >> 1U, 2U);
179 else 175 else
180 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 176 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
181} 177}
182 178
183static u32 bictcp_undo_cwnd(struct sock *sk)
184{
185 const struct tcp_sock *tp = tcp_sk(sk);
186 const struct bictcp *ca = inet_csk_ca(sk);
187
188 return max(tp->snd_cwnd, ca->loss_cwnd);
189}
190
191static void bictcp_state(struct sock *sk, u8 new_state) 179static void bictcp_state(struct sock *sk, u8 new_state)
192{ 180{
193 if (new_state == TCP_CA_Loss) 181 if (new_state == TCP_CA_Loss)
@@ -214,7 +202,7 @@ static struct tcp_congestion_ops bictcp __read_mostly = {
214 .ssthresh = bictcp_recalc_ssthresh, 202 .ssthresh = bictcp_recalc_ssthresh,
215 .cong_avoid = bictcp_cong_avoid, 203 .cong_avoid = bictcp_cong_avoid,
216 .set_state = bictcp_state, 204 .set_state = bictcp_state,
217 .undo_cwnd = bictcp_undo_cwnd, 205 .undo_cwnd = tcp_reno_undo_cwnd,
218 .pkts_acked = bictcp_acked, 206 .pkts_acked = bictcp_acked,
219 .owner = THIS_MODULE, 207 .owner = THIS_MODULE,
220 .name = "bic", 208 .name = "bic",
diff --git a/net/ipv4/tcp_cdg.c b/net/ipv4/tcp_cdg.c
index 50a0f3e51d5b..66ac69f7bd19 100644
--- a/net/ipv4/tcp_cdg.c
+++ b/net/ipv4/tcp_cdg.c
@@ -85,7 +85,6 @@ struct cdg {
85 u8 state; 85 u8 state;
86 u8 delack; 86 u8 delack;
87 u32 rtt_seq; 87 u32 rtt_seq;
88 u32 undo_cwnd;
89 u32 shadow_wnd; 88 u32 shadow_wnd;
90 u16 backoff_cnt; 89 u16 backoff_cnt;
91 u16 sample_cnt; 90 u16 sample_cnt;
@@ -330,8 +329,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
330 struct cdg *ca = inet_csk_ca(sk); 329 struct cdg *ca = inet_csk_ca(sk);
331 struct tcp_sock *tp = tcp_sk(sk); 330 struct tcp_sock *tp = tcp_sk(sk);
332 331
333 ca->undo_cwnd = tp->snd_cwnd;
334
335 if (ca->state == CDG_BACKOFF) 332 if (ca->state == CDG_BACKOFF)
336 return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10); 333 return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
337 334
@@ -344,13 +341,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
344 return max(2U, tp->snd_cwnd >> 1); 341 return max(2U, tp->snd_cwnd >> 1);
345} 342}
346 343
347static u32 tcp_cdg_undo_cwnd(struct sock *sk)
348{
349 struct cdg *ca = inet_csk_ca(sk);
350
351 return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd);
352}
353
354static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) 344static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
355{ 345{
356 struct cdg *ca = inet_csk_ca(sk); 346 struct cdg *ca = inet_csk_ca(sk);
@@ -403,7 +393,7 @@ struct tcp_congestion_ops tcp_cdg __read_mostly = {
403 .cong_avoid = tcp_cdg_cong_avoid, 393 .cong_avoid = tcp_cdg_cong_avoid,
404 .cwnd_event = tcp_cdg_cwnd_event, 394 .cwnd_event = tcp_cdg_cwnd_event,
405 .pkts_acked = tcp_cdg_acked, 395 .pkts_acked = tcp_cdg_acked,
406 .undo_cwnd = tcp_cdg_undo_cwnd, 396 .undo_cwnd = tcp_reno_undo_cwnd,
407 .ssthresh = tcp_cdg_ssthresh, 397 .ssthresh = tcp_cdg_ssthresh,
408 .release = tcp_cdg_release, 398 .release = tcp_cdg_release,
409 .init = tcp_cdg_init, 399 .init = tcp_cdg_init,
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 57ae5b5ae643..78bfadfcf342 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -83,7 +83,6 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse
83struct bictcp { 83struct bictcp {
84 u32 cnt; /* increase cwnd by 1 after ACKs */ 84 u32 cnt; /* increase cwnd by 1 after ACKs */
85 u32 last_max_cwnd; /* last maximum snd_cwnd */ 85 u32 last_max_cwnd; /* last maximum snd_cwnd */
86 u32 loss_cwnd; /* congestion window at last loss */
87 u32 last_cwnd; /* the last snd_cwnd */ 86 u32 last_cwnd; /* the last snd_cwnd */
88 u32 last_time; /* time when updated last_cwnd */ 87 u32 last_time; /* time when updated last_cwnd */
89 u32 bic_origin_point;/* origin point of bic function */ 88 u32 bic_origin_point;/* origin point of bic function */
@@ -142,7 +141,6 @@ static void bictcp_init(struct sock *sk)
142 struct bictcp *ca = inet_csk_ca(sk); 141 struct bictcp *ca = inet_csk_ca(sk);
143 142
144 bictcp_reset(ca); 143 bictcp_reset(ca);
145 ca->loss_cwnd = 0;
146 144
147 if (hystart) 145 if (hystart)
148 bictcp_hystart_reset(sk); 146 bictcp_hystart_reset(sk);
@@ -366,18 +364,9 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
366 else 364 else
367 ca->last_max_cwnd = tp->snd_cwnd; 365 ca->last_max_cwnd = tp->snd_cwnd;
368 366
369 ca->loss_cwnd = tp->snd_cwnd;
370
371 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); 367 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
372} 368}
373 369
374static u32 bictcp_undo_cwnd(struct sock *sk)
375{
376 struct bictcp *ca = inet_csk_ca(sk);
377
378 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
379}
380
381static void bictcp_state(struct sock *sk, u8 new_state) 370static void bictcp_state(struct sock *sk, u8 new_state)
382{ 371{
383 if (new_state == TCP_CA_Loss) { 372 if (new_state == TCP_CA_Loss) {
@@ -470,7 +459,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
470 .ssthresh = bictcp_recalc_ssthresh, 459 .ssthresh = bictcp_recalc_ssthresh,
471 .cong_avoid = bictcp_cong_avoid, 460 .cong_avoid = bictcp_cong_avoid,
472 .set_state = bictcp_state, 461 .set_state = bictcp_state,
473 .undo_cwnd = bictcp_undo_cwnd, 462 .undo_cwnd = tcp_reno_undo_cwnd,
474 .cwnd_event = bictcp_cwnd_event, 463 .cwnd_event = bictcp_cwnd_event,
475 .pkts_acked = bictcp_acked, 464 .pkts_acked = bictcp_acked,
476 .owner = THIS_MODULE, 465 .owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 6d9879e93648..d1c33c91eadc 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -94,7 +94,6 @@ static const struct hstcp_aimd_val {
94 94
95struct hstcp { 95struct hstcp {
96 u32 ai; 96 u32 ai;
97 u32 loss_cwnd;
98}; 97};
99 98
100static void hstcp_init(struct sock *sk) 99static void hstcp_init(struct sock *sk)
@@ -153,22 +152,14 @@ static u32 hstcp_ssthresh(struct sock *sk)
153 const struct tcp_sock *tp = tcp_sk(sk); 152 const struct tcp_sock *tp = tcp_sk(sk);
154 struct hstcp *ca = inet_csk_ca(sk); 153 struct hstcp *ca = inet_csk_ca(sk);
155 154
156 ca->loss_cwnd = tp->snd_cwnd;
157 /* Do multiplicative decrease */ 155 /* Do multiplicative decrease */
158 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); 156 return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
159} 157}
160 158
161static u32 hstcp_cwnd_undo(struct sock *sk)
162{
163 const struct hstcp *ca = inet_csk_ca(sk);
164
165 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
166}
167
168static struct tcp_congestion_ops tcp_highspeed __read_mostly = { 159static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
169 .init = hstcp_init, 160 .init = hstcp_init,
170 .ssthresh = hstcp_ssthresh, 161 .ssthresh = hstcp_ssthresh,
171 .undo_cwnd = hstcp_cwnd_undo, 162 .undo_cwnd = tcp_reno_undo_cwnd,
172 .cong_avoid = hstcp_cong_avoid, 163 .cong_avoid = hstcp_cong_avoid,
173 164
174 .owner = THIS_MODULE, 165 .owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 60352ff4f5a8..7c843578f233 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -48,7 +48,6 @@ struct illinois {
48 u32 end_seq; /* right edge of current RTT */ 48 u32 end_seq; /* right edge of current RTT */
49 u32 alpha; /* Additive increase */ 49 u32 alpha; /* Additive increase */
50 u32 beta; /* Muliplicative decrease */ 50 u32 beta; /* Muliplicative decrease */
51 u32 loss_cwnd; /* cwnd on loss */
52 u16 acked; /* # packets acked by current ACK */ 51 u16 acked; /* # packets acked by current ACK */
53 u8 rtt_above; /* average rtt has gone above threshold */ 52 u8 rtt_above; /* average rtt has gone above threshold */
54 u8 rtt_low; /* # of rtts measurements below threshold */ 53 u8 rtt_low; /* # of rtts measurements below threshold */
@@ -297,18 +296,10 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
297 struct tcp_sock *tp = tcp_sk(sk); 296 struct tcp_sock *tp = tcp_sk(sk);
298 struct illinois *ca = inet_csk_ca(sk); 297 struct illinois *ca = inet_csk_ca(sk);
299 298
300 ca->loss_cwnd = tp->snd_cwnd;
301 /* Multiplicative decrease */ 299 /* Multiplicative decrease */
302 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); 300 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
303} 301}
304 302
305static u32 tcp_illinois_cwnd_undo(struct sock *sk)
306{
307 const struct illinois *ca = inet_csk_ca(sk);
308
309 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
310}
311
312/* Extract info for Tcp socket info provided via netlink. */ 303/* Extract info for Tcp socket info provided via netlink. */
313static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, 304static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
314 union tcp_cc_info *info) 305 union tcp_cc_info *info)
@@ -336,7 +327,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
336static struct tcp_congestion_ops tcp_illinois __read_mostly = { 327static struct tcp_congestion_ops tcp_illinois __read_mostly = {
337 .init = tcp_illinois_init, 328 .init = tcp_illinois_init,
338 .ssthresh = tcp_illinois_ssthresh, 329 .ssthresh = tcp_illinois_ssthresh,
339 .undo_cwnd = tcp_illinois_cwnd_undo, 330 .undo_cwnd = tcp_reno_undo_cwnd,
340 .cong_avoid = tcp_illinois_cong_avoid, 331 .cong_avoid = tcp_illinois_cong_avoid,
341 .set_state = tcp_illinois_state, 332 .set_state = tcp_illinois_state,
342 .get_info = tcp_illinois_info, 333 .get_info = tcp_illinois_info,
diff --git a/net/ipv4/tcp_nv.c b/net/ipv4/tcp_nv.c
index 6d650ed3cb59..1ff73982e28c 100644
--- a/net/ipv4/tcp_nv.c
+++ b/net/ipv4/tcp_nv.c
@@ -86,7 +86,6 @@ struct tcpnv {
86 * < 0 => less than 1 packet/RTT */ 86 * < 0 => less than 1 packet/RTT */
87 u8 available8; 87 u8 available8;
88 u16 available16; 88 u16 available16;
89 u32 loss_cwnd; /* cwnd at last loss */
90 u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */ 89 u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */
91 nv_reset:1, /* whether to reset values */ 90 nv_reset:1, /* whether to reset values */
92 nv_catchup:1; /* whether we are growing because 91 nv_catchup:1; /* whether we are growing because
@@ -121,7 +120,6 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
121 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
122 121
123 ca->nv_reset = 0; 122 ca->nv_reset = 0;
124 ca->loss_cwnd = 0;
125 ca->nv_no_cong_cnt = 0; 123 ca->nv_no_cong_cnt = 0;
126 ca->nv_rtt_cnt = 0; 124 ca->nv_rtt_cnt = 0;
127 ca->nv_last_rtt = 0; 125 ca->nv_last_rtt = 0;
@@ -177,19 +175,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
177static u32 tcpnv_recalc_ssthresh(struct sock *sk) 175static u32 tcpnv_recalc_ssthresh(struct sock *sk)
178{ 176{
179 const struct tcp_sock *tp = tcp_sk(sk); 177 const struct tcp_sock *tp = tcp_sk(sk);
180 struct tcpnv *ca = inet_csk_ca(sk);
181 178
182 ca->loss_cwnd = tp->snd_cwnd;
183 return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); 179 return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
184} 180}
185 181
186static u32 tcpnv_undo_cwnd(struct sock *sk)
187{
188 struct tcpnv *ca = inet_csk_ca(sk);
189
190 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
191}
192
193static void tcpnv_state(struct sock *sk, u8 new_state) 182static void tcpnv_state(struct sock *sk, u8 new_state)
194{ 183{
195 struct tcpnv *ca = inet_csk_ca(sk); 184 struct tcpnv *ca = inet_csk_ca(sk);
@@ -446,7 +435,7 @@ static struct tcp_congestion_ops tcpnv __read_mostly = {
446 .ssthresh = tcpnv_recalc_ssthresh, 435 .ssthresh = tcpnv_recalc_ssthresh,
447 .cong_avoid = tcpnv_cong_avoid, 436 .cong_avoid = tcpnv_cong_avoid,
448 .set_state = tcpnv_state, 437 .set_state = tcpnv_state,
449 .undo_cwnd = tcpnv_undo_cwnd, 438 .undo_cwnd = tcp_reno_undo_cwnd,
450 .pkts_acked = tcpnv_acked, 439 .pkts_acked = tcpnv_acked,
451 .get_info = tcpnv_get_info, 440 .get_info = tcpnv_get_info,
452 441
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index f2123075ce6e..addc122f8818 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,10 +15,6 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18struct scalable {
19 u32 loss_cwnd;
20};
21
22static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
23{ 19{
24 struct tcp_sock *tp = tcp_sk(sk); 20 struct tcp_sock *tp = tcp_sk(sk);
@@ -36,23 +32,13 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
36static u32 tcp_scalable_ssthresh(struct sock *sk) 32static u32 tcp_scalable_ssthresh(struct sock *sk)
37{ 33{
38 const struct tcp_sock *tp = tcp_sk(sk); 34 const struct tcp_sock *tp = tcp_sk(sk);
39 struct scalable *ca = inet_csk_ca(sk);
40
41 ca->loss_cwnd = tp->snd_cwnd;
42 35
43 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); 36 return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
44} 37}
45 38
46static u32 tcp_scalable_cwnd_undo(struct sock *sk)
47{
48 const struct scalable *ca = inet_csk_ca(sk);
49
50 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
51}
52
53static struct tcp_congestion_ops tcp_scalable __read_mostly = { 39static struct tcp_congestion_ops tcp_scalable __read_mostly = {
54 .ssthresh = tcp_scalable_ssthresh, 40 .ssthresh = tcp_scalable_ssthresh,
55 .undo_cwnd = tcp_scalable_cwnd_undo, 41 .undo_cwnd = tcp_reno_undo_cwnd,
56 .cong_avoid = tcp_scalable_cong_avoid, 42 .cong_avoid = tcp_scalable_cong_avoid,
57 43
58 .owner = THIS_MODULE, 44 .owner = THIS_MODULE,
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 76005d4b8dfc..6fcf482d611b 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -30,7 +30,6 @@ struct veno {
30 u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ 30 u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
31 u32 inc; /* decide whether to increase cwnd */ 31 u32 inc; /* decide whether to increase cwnd */
32 u32 diff; /* calculate the diff rate */ 32 u32 diff; /* calculate the diff rate */
33 u32 loss_cwnd; /* cwnd when loss occured */
34}; 33};
35 34
36/* There are several situations when we must "re-start" Veno: 35/* There are several situations when we must "re-start" Veno:
@@ -194,7 +193,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
194 const struct tcp_sock *tp = tcp_sk(sk); 193 const struct tcp_sock *tp = tcp_sk(sk);
195 struct veno *veno = inet_csk_ca(sk); 194 struct veno *veno = inet_csk_ca(sk);
196 195
197 veno->loss_cwnd = tp->snd_cwnd;
198 if (veno->diff < beta) 196 if (veno->diff < beta)
199 /* in "non-congestive state", cut cwnd by 1/5 */ 197 /* in "non-congestive state", cut cwnd by 1/5 */
200 return max(tp->snd_cwnd * 4 / 5, 2U); 198 return max(tp->snd_cwnd * 4 / 5, 2U);
@@ -203,17 +201,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
203 return max(tp->snd_cwnd >> 1U, 2U); 201 return max(tp->snd_cwnd >> 1U, 2U);
204} 202}
205 203
206static u32 tcp_veno_cwnd_undo(struct sock *sk)
207{
208 const struct veno *veno = inet_csk_ca(sk);
209
210 return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd);
211}
212
213static struct tcp_congestion_ops tcp_veno __read_mostly = { 204static struct tcp_congestion_ops tcp_veno __read_mostly = {
214 .init = tcp_veno_init, 205 .init = tcp_veno_init,
215 .ssthresh = tcp_veno_ssthresh, 206 .ssthresh = tcp_veno_ssthresh,
216 .undo_cwnd = tcp_veno_cwnd_undo, 207 .undo_cwnd = tcp_reno_undo_cwnd,
217 .cong_avoid = tcp_veno_cong_avoid, 208 .cong_avoid = tcp_veno_cong_avoid,
218 .pkts_acked = tcp_veno_pkts_acked, 209 .pkts_acked = tcp_veno_pkts_acked,
219 .set_state = tcp_veno_state, 210 .set_state = tcp_veno_state,
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index e6ff99c4bd3b..96e829b2e2fc 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -37,7 +37,6 @@ struct yeah {
37 u32 fast_count; 37 u32 fast_count;
38 38
39 u32 pkts_acked; 39 u32 pkts_acked;
40 u32 loss_cwnd;
41}; 40};
42 41
43static void tcp_yeah_init(struct sock *sk) 42static void tcp_yeah_init(struct sock *sk)
@@ -220,22 +219,14 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
220 219
221 yeah->fast_count = 0; 220 yeah->fast_count = 0;
222 yeah->reno_count = max(yeah->reno_count>>1, 2U); 221 yeah->reno_count = max(yeah->reno_count>>1, 2U);
223 yeah->loss_cwnd = tp->snd_cwnd;
224 222
225 return max_t(int, tp->snd_cwnd - reduction, 2); 223 return max_t(int, tp->snd_cwnd - reduction, 2);
226} 224}
227 225
228static u32 tcp_yeah_cwnd_undo(struct sock *sk)
229{
230 const struct yeah *yeah = inet_csk_ca(sk);
231
232 return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd);
233}
234
235static struct tcp_congestion_ops tcp_yeah __read_mostly = { 226static struct tcp_congestion_ops tcp_yeah __read_mostly = {
236 .init = tcp_yeah_init, 227 .init = tcp_yeah_init,
237 .ssthresh = tcp_yeah_ssthresh, 228 .ssthresh = tcp_yeah_ssthresh,
238 .undo_cwnd = tcp_yeah_cwnd_undo, 229 .undo_cwnd = tcp_reno_undo_cwnd,
239 .cong_avoid = tcp_yeah_cong_avoid, 230 .cong_avoid = tcp_yeah_cong_avoid,
240 .set_state = tcp_vegas_state, 231 .set_state = tcp_vegas_state,
241 .cwnd_event = tcp_vegas_cwnd_event, 232 .cwnd_event = tcp_vegas_cwnd_event,