aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2013-10-31 14:07:31 -0400
committerDavid S. Miller <davem@davemloft.net>2013-11-04 19:57:59 -0500
commit9f9843a751d0a2057f9f3d313886e7e5e6ebaac9 (patch)
treea89df5cc0c5f5280b2cfffba7f6933e4db20736f
parent0d41cca490c274352211efac50e9598d39a9dc80 (diff)
tcp: properly handle stretch acks in slow start
Slow start now increases cwnd by 1 if an ACK acknowledges some packets, regardless the number of packets. Consequently slow start performance is highly dependent on the degree of the stretch ACKs caused by receiver or network ACK compression mechanisms (e.g., delayed-ACK, GRO, etc). But slow start algorithm is to send twice the amount of packets of packets left so it should process a stretch ACK of degree N as if N ACKs of degree 1, then exits when cwnd exceeds ssthresh. A follow up patch will use the remainder of the N (if greater than 1) to adjust cwnd in the congestion avoidance phase. In addition this patch retires the experimental limited slow start (LSS) feature. LSS has multiple drawbacks but questionable benefit. The fractional cwnd increase in LSS requires a loop in slow start even though it's rarely used. Configuring such an increase step via a global sysctl on different BDPS seems hard. Finally and most importantly the slow start overshoot concern is now better covered by the Hybrid slow start (hystart) enabled by default. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--Documentation/networking/ip-sysctl.txt11
-rw-r--r--include/net/tcp.h7
-rw-r--r--net/ipv4/sysctl_net_ipv4.c7
-rw-r--r--net/ipv4/tcp_bic.c5
-rw-r--r--net/ipv4/tcp_cong.c47
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c4
-rw-r--r--net/ipv4/tcp_hybla.c5
-rw-r--r--net/ipv4/tcp_illinois.c5
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_scalable.c5
-rw-r--r--net/ipv4/tcp_vegas.c11
-rw-r--r--net/ipv4/tcp_veno.c9
-rw-r--r--net/ipv4/tcp_yeah.c5
16 files changed, 59 insertions, 82 deletions
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 6c0098359ca6..8b8a05787641 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -267,17 +267,6 @@ tcp_max_orphans - INTEGER
267 more aggressively. Let me to remind again: each orphan eats 267 more aggressively. Let me to remind again: each orphan eats
268 up to ~64K of unswappable memory. 268 up to ~64K of unswappable memory.
269 269
270tcp_max_ssthresh - INTEGER
271 Limited Slow-Start for TCP with large congestion windows (cwnd) defined in
272 RFC3742. Limited slow-start is a mechanism to limit growth of the cwnd
273 on the region where cwnd is larger than tcp_max_ssthresh. TCP increases cwnd
274 by at most tcp_max_ssthresh segments, and by at least tcp_max_ssthresh/2
275 segments per RTT when the cwnd is above tcp_max_ssthresh.
276 If TCP connection increased cwnd to thousands (or tens of thousands) segments,
277 and thousands of packets were being dropped during slow-start, you can set
278 tcp_max_ssthresh to improve performance for new TCP connection.
279 Default: 0 (off)
280
281tcp_max_syn_backlog - INTEGER 270tcp_max_syn_backlog - INTEGER
282 Maximal number of remembered connection requests, which have not 271 Maximal number of remembered connection requests, which have not
283 received an acknowledgment from connecting client. 272 received an acknowledgment from connecting client.
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 2d7b4bdc972f..70e55d200610 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -275,7 +275,6 @@ extern int sysctl_tcp_mtu_probing;
275extern int sysctl_tcp_base_mss; 275extern int sysctl_tcp_base_mss;
276extern int sysctl_tcp_workaround_signed_windows; 276extern int sysctl_tcp_workaround_signed_windows;
277extern int sysctl_tcp_slow_start_after_idle; 277extern int sysctl_tcp_slow_start_after_idle;
278extern int sysctl_tcp_max_ssthresh;
279extern int sysctl_tcp_thin_linear_timeouts; 278extern int sysctl_tcp_thin_linear_timeouts;
280extern int sysctl_tcp_thin_dupack; 279extern int sysctl_tcp_thin_dupack;
281extern int sysctl_tcp_early_retrans; 280extern int sysctl_tcp_early_retrans;
@@ -797,7 +796,7 @@ struct tcp_congestion_ops {
797 /* lower bound for congestion window (optional) */ 796 /* lower bound for congestion window (optional) */
798 u32 (*min_cwnd)(const struct sock *sk); 797 u32 (*min_cwnd)(const struct sock *sk);
799 /* do new cwnd calculation (required) */ 798 /* do new cwnd calculation (required) */
800 void (*cong_avoid)(struct sock *sk, u32 ack, u32 in_flight); 799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
801 /* call before changing ca_state (optional) */ 800 /* call before changing ca_state (optional) */
802 void (*set_state)(struct sock *sk, u8 new_state); 801 void (*set_state)(struct sock *sk, u8 new_state);
803 /* call when cwnd event occurs (optional) */ 802 /* call when cwnd event occurs (optional) */
@@ -824,12 +823,12 @@ void tcp_get_available_congestion_control(char *buf, size_t len);
824void tcp_get_allowed_congestion_control(char *buf, size_t len); 823void tcp_get_allowed_congestion_control(char *buf, size_t len);
825int tcp_set_allowed_congestion_control(char *allowed); 824int tcp_set_allowed_congestion_control(char *allowed);
826int tcp_set_congestion_control(struct sock *sk, const char *name); 825int tcp_set_congestion_control(struct sock *sk, const char *name);
827void tcp_slow_start(struct tcp_sock *tp); 826int tcp_slow_start(struct tcp_sock *tp, u32 acked);
828void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w); 827void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
829 828
830extern struct tcp_congestion_ops tcp_init_congestion_ops; 829extern struct tcp_congestion_ops tcp_init_congestion_ops;
831u32 tcp_reno_ssthresh(struct sock *sk); 830u32 tcp_reno_ssthresh(struct sock *sk);
832void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight); 831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight);
833u32 tcp_reno_min_cwnd(const struct sock *sk); 832u32 tcp_reno_min_cwnd(const struct sock *sk);
834extern struct tcp_congestion_ops tcp_reno; 833extern struct tcp_congestion_ops tcp_reno;
835 834
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d5b1390eebbe..3d69ec8dac57 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -701,13 +701,6 @@ static struct ctl_table ipv4_table[] = {
701 .proc_handler = proc_allowed_congestion_control, 701 .proc_handler = proc_allowed_congestion_control,
702 }, 702 },
703 { 703 {
704 .procname = "tcp_max_ssthresh",
705 .data = &sysctl_tcp_max_ssthresh,
706 .maxlen = sizeof(int),
707 .mode = 0644,
708 .proc_handler = proc_dointvec,
709 },
710 {
711 .procname = "tcp_thin_linear_timeouts", 704 .procname = "tcp_thin_linear_timeouts",
712 .data = &sysctl_tcp_thin_linear_timeouts, 705 .data = &sysctl_tcp_thin_linear_timeouts,
713 .maxlen = sizeof(int), 706 .maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index f45e1c242440..821846fb0a7e 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -140,7 +140,8 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
140 ca->cnt = 1; 140 ca->cnt = 1;
141} 141}
142 142
143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
144 u32 in_flight)
144{ 145{
145 struct tcp_sock *tp = tcp_sk(sk); 146 struct tcp_sock *tp = tcp_sk(sk);
146 struct bictcp *ca = inet_csk_ca(sk); 147 struct bictcp *ca = inet_csk_ca(sk);
@@ -149,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
149 return; 150 return;
150 151
151 if (tp->snd_cwnd <= tp->snd_ssthresh) 152 if (tp->snd_cwnd <= tp->snd_ssthresh)
152 tcp_slow_start(tp); 153 tcp_slow_start(tp, acked);
153 else { 154 else {
154 bictcp_update(ca, tp->snd_cwnd); 155 bictcp_update(ca, tp->snd_cwnd);
155 tcp_cong_avoid_ai(tp, ca->cnt); 156 tcp_cong_avoid_ai(tp, ca->cnt);
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index 019c2389a341..ad37bf18ae4b 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -15,8 +15,6 @@
15#include <linux/gfp.h> 15#include <linux/gfp.h>
16#include <net/tcp.h> 16#include <net/tcp.h>
17 17
18int sysctl_tcp_max_ssthresh = 0;
19
20static DEFINE_SPINLOCK(tcp_cong_list_lock); 18static DEFINE_SPINLOCK(tcp_cong_list_lock);
21static LIST_HEAD(tcp_cong_list); 19static LIST_HEAD(tcp_cong_list);
22 20
@@ -299,35 +297,24 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
299} 297}
300EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited); 298EXPORT_SYMBOL_GPL(tcp_is_cwnd_limited);
301 299
302/* 300/* Slow start is used when congestion window is no greater than the slow start
303 * Slow start is used when congestion window is less than slow start 301 * threshold. We base on RFC2581 and also handle stretch ACKs properly.
304 * threshold. This version implements the basic RFC2581 version 302 * We do not implement RFC3465 Appropriate Byte Counting (ABC) per se but
305 * and optionally supports: 303 * something better;) a packet is only considered (s)acked in its entirety to
306 * RFC3742 Limited Slow Start - growth limited to max_ssthresh 304 * defend the ACK attacks described in the RFC. Slow start processes a stretch
307 * RFC3465 Appropriate Byte Counting - growth limited by bytes acknowledged 305 * ACK of degree N as if N acks of degree 1 are received back to back except
306 * ABC caps N to 2. Slow start exits when cwnd grows over ssthresh and
307 * returns the leftover acks to adjust cwnd in congestion avoidance mode.
308 */ 308 */
309void tcp_slow_start(struct tcp_sock *tp) 309int tcp_slow_start(struct tcp_sock *tp, u32 acked)
310{ 310{
311 int cnt; /* increase in packets */ 311 u32 cwnd = tp->snd_cwnd + acked;
312 unsigned int delta = 0;
313 u32 snd_cwnd = tp->snd_cwnd;
314
315 if (unlikely(!snd_cwnd)) {
316 pr_err_once("snd_cwnd is nul, please report this bug.\n");
317 snd_cwnd = 1U;
318 }
319 312
320 if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) 313 if (cwnd > tp->snd_ssthresh)
321 cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ 314 cwnd = tp->snd_ssthresh + 1;
322 else 315 acked -= cwnd - tp->snd_cwnd;
323 cnt = snd_cwnd; /* exponential increase */ 316 tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
324 317 return acked;
325 tp->snd_cwnd_cnt += cnt;
326 while (tp->snd_cwnd_cnt >= snd_cwnd) {
327 tp->snd_cwnd_cnt -= snd_cwnd;
328 delta++;
329 }
330 tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp);
331} 318}
332EXPORT_SYMBOL_GPL(tcp_slow_start); 319EXPORT_SYMBOL_GPL(tcp_slow_start);
333 320
@@ -351,7 +338,7 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
351/* This is Jacobson's slow start and congestion avoidance. 338/* This is Jacobson's slow start and congestion avoidance.
352 * SIGCOMM '88, p. 328. 339 * SIGCOMM '88, p. 328.
353 */ 340 */
354void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 341void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
355{ 342{
356 struct tcp_sock *tp = tcp_sk(sk); 343 struct tcp_sock *tp = tcp_sk(sk);
357 344
@@ -360,7 +347,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
360 347
361 /* In "safe" area, increase. */ 348 /* In "safe" area, increase. */
362 if (tp->snd_cwnd <= tp->snd_ssthresh) 349 if (tp->snd_cwnd <= tp->snd_ssthresh)
363 tcp_slow_start(tp); 350 tcp_slow_start(tp, acked);
364 /* In dangerous area, increase slowly. */ 351 /* In dangerous area, increase slowly. */
365 else 352 else
366 tcp_cong_avoid_ai(tp, tp->snd_cwnd); 353 tcp_cong_avoid_ai(tp, tp->snd_cwnd);
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index b6ae92a51f58..828e4c3ffbaf 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -304,7 +304,8 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
304 ca->cnt = 1; 304 ca->cnt = 1;
305} 305}
306 306
307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
308 u32 in_flight)
308{ 309{
309 struct tcp_sock *tp = tcp_sk(sk); 310 struct tcp_sock *tp = tcp_sk(sk);
310 struct bictcp *ca = inet_csk_ca(sk); 311 struct bictcp *ca = inet_csk_ca(sk);
@@ -315,7 +316,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
315 if (tp->snd_cwnd <= tp->snd_ssthresh) { 316 if (tp->snd_cwnd <= tp->snd_ssthresh) {
316 if (hystart && after(ack, ca->end_seq)) 317 if (hystart && after(ack, ca->end_seq))
317 bictcp_hystart_reset(sk); 318 bictcp_hystart_reset(sk);
318 tcp_slow_start(tp); 319 tcp_slow_start(tp, acked);
319 } else { 320 } else {
320 bictcp_update(ca, tp->snd_cwnd); 321 bictcp_update(ca, tp->snd_cwnd);
321 tcp_cong_avoid_ai(tp, ca->cnt); 322 tcp_cong_avoid_ai(tp, ca->cnt);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 30f27f6b3655..8ed9305dfdf4 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,7 +109,7 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight) 112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
113{ 113{
114 struct tcp_sock *tp = tcp_sk(sk); 114 struct tcp_sock *tp = tcp_sk(sk);
115 struct hstcp *ca = inet_csk_ca(sk); 115 struct hstcp *ca = inet_csk_ca(sk);
@@ -118,7 +118,7 @@ static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 in_flight)
118 return; 118 return;
119 119
120 if (tp->snd_cwnd <= tp->snd_ssthresh) 120 if (tp->snd_cwnd <= tp->snd_ssthresh)
121 tcp_slow_start(tp); 121 tcp_slow_start(tp, acked);
122 else { 122 else {
123 /* Update AIMD parameters. 123 /* Update AIMD parameters.
124 * 124 *
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index c1a8175361e8..4a194acfd923 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -227,7 +227,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
228} 228}
229 229
230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
231{ 231{
232 struct tcp_sock *tp = tcp_sk(sk); 232 struct tcp_sock *tp = tcp_sk(sk);
233 struct htcp *ca = inet_csk_ca(sk); 233 struct htcp *ca = inet_csk_ca(sk);
@@ -236,7 +236,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
236 return; 236 return;
237 237
238 if (tp->snd_cwnd <= tp->snd_ssthresh) 238 if (tp->snd_cwnd <= tp->snd_ssthresh)
239 tcp_slow_start(tp); 239 tcp_slow_start(tp, acked);
240 else { 240 else {
241 /* In dangerous area, increase slowly. 241 /* In dangerous area, increase slowly.
242 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd 242 * In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 57bdd17dff4d..478fe82611bf 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -85,7 +85,8 @@ static inline u32 hybla_fraction(u32 odds)
85 * o Give cwnd a new value based on the model proposed 85 * o Give cwnd a new value based on the model proposed
86 * o remember increments <1 86 * o remember increments <1
87 */ 87 */
88static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 88static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
89 u32 in_flight)
89{ 90{
90 struct tcp_sock *tp = tcp_sk(sk); 91 struct tcp_sock *tp = tcp_sk(sk);
91 struct hybla *ca = inet_csk_ca(sk); 92 struct hybla *ca = inet_csk_ca(sk);
@@ -102,7 +103,7 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
102 return; 103 return;
103 104
104 if (!ca->hybla_en) { 105 if (!ca->hybla_en) {
105 tcp_reno_cong_avoid(sk, ack, in_flight); 106 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
106 return; 107 return;
107 } 108 }
108 109
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 834857f3c871..8a520996f3d2 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -256,7 +256,8 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
256/* 256/*
257 * Increase window in response to successful acknowledgment. 257 * Increase window in response to successful acknowledgment.
258 */ 258 */
259static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 259static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
260 u32 in_flight)
260{ 261{
261 struct tcp_sock *tp = tcp_sk(sk); 262 struct tcp_sock *tp = tcp_sk(sk);
262 struct illinois *ca = inet_csk_ca(sk); 263 struct illinois *ca = inet_csk_ca(sk);
@@ -270,7 +271,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
270 271
271 /* In slow start */ 272 /* In slow start */
272 if (tp->snd_cwnd <= tp->snd_ssthresh) 273 if (tp->snd_cwnd <= tp->snd_ssthresh)
273 tcp_slow_start(tp); 274 tcp_slow_start(tp, acked);
274 275
275 else { 276 else {
276 u32 delta; 277 u32 delta;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 63095b218b4a..c53b7f35c51d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2934,10 +2934,10 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1); 2934 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt, -1);
2935} 2935}
2936 2936
2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 2937static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight)
2938{ 2938{
2939 const struct inet_connection_sock *icsk = inet_csk(sk); 2939 const struct inet_connection_sock *icsk = inet_csk(sk);
2940 icsk->icsk_ca_ops->cong_avoid(sk, ack, in_flight); 2940 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight);
2941 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2941 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2942} 2942}
2943 2943
@@ -3454,7 +3454,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3454 3454
3455 /* Advance cwnd if state allows */ 3455 /* Advance cwnd if state allows */
3456 if (tcp_may_raise_cwnd(sk, flag)) 3456 if (tcp_may_raise_cwnd(sk, flag))
3457 tcp_cong_avoid(sk, ack, prior_in_flight); 3457 tcp_cong_avoid(sk, ack, acked, prior_in_flight);
3458 3458
3459 if (tcp_ack_is_dubious(sk, flag)) { 3459 if (tcp_ack_is_dubious(sk, flag)) {
3460 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3460 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index 72f7218b03f5..991d62a2f9bb 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,12 +115,13 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked,
119 u32 in_flight)
119{ 120{
120 struct lp *lp = inet_csk_ca(sk); 121 struct lp *lp = inet_csk_ca(sk);
121 122
122 if (!(lp->flag & LP_WITHIN_INF)) 123 if (!(lp->flag & LP_WITHIN_INF))
123 tcp_reno_cong_avoid(sk, ack, in_flight); 124 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
124} 125}
125 126
126/** 127/**
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 8ce55b8aaec8..19ea6c2951f3 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,7 +15,8 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked,
19 u32 in_flight)
19{ 20{
20 struct tcp_sock *tp = tcp_sk(sk); 21 struct tcp_sock *tp = tcp_sk(sk);
21 22
@@ -23,7 +24,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
23 return; 24 return;
24 25
25 if (tp->snd_cwnd <= tp->snd_ssthresh) 26 if (tp->snd_cwnd <= tp->snd_ssthresh)
26 tcp_slow_start(tp); 27 tcp_slow_start(tp, acked);
27 else 28 else
28 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)); 29 tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT));
29} 30}
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 80fa2bfd7ede..06cae62bf208 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,13 +163,14 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164} 164}
165 165
166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
167 u32 in_flight)
167{ 168{
168 struct tcp_sock *tp = tcp_sk(sk); 169 struct tcp_sock *tp = tcp_sk(sk);
169 struct vegas *vegas = inet_csk_ca(sk); 170 struct vegas *vegas = inet_csk_ca(sk);
170 171
171 if (!vegas->doing_vegas_now) { 172 if (!vegas->doing_vegas_now) {
172 tcp_reno_cong_avoid(sk, ack, in_flight); 173 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
173 return; 174 return;
174 } 175 }
175 176
@@ -194,7 +195,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
194 /* We don't have enough RTT samples to do the Vegas 195 /* We don't have enough RTT samples to do the Vegas
195 * calculation, so we'll behave like Reno. 196 * calculation, so we'll behave like Reno.
196 */ 197 */
197 tcp_reno_cong_avoid(sk, ack, in_flight); 198 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
198 } else { 199 } else {
199 u32 rtt, diff; 200 u32 rtt, diff;
200 u64 target_cwnd; 201 u64 target_cwnd;
@@ -243,7 +244,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
243 244
244 } else if (tp->snd_cwnd <= tp->snd_ssthresh) { 245 } else if (tp->snd_cwnd <= tp->snd_ssthresh) {
245 /* Slow start. */ 246 /* Slow start. */
246 tcp_slow_start(tp); 247 tcp_slow_start(tp, acked);
247 } else { 248 } else {
248 /* Congestion avoidance. */ 249 /* Congestion avoidance. */
249 250
@@ -283,7 +284,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
283 } 284 }
284 /* Use normal slow start */ 285 /* Use normal slow start */
285 else if (tp->snd_cwnd <= tp->snd_ssthresh) 286 else if (tp->snd_cwnd <= tp->snd_ssthresh)
286 tcp_slow_start(tp); 287 tcp_slow_start(tp, acked);
287 288
288} 289}
289 290
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index ac43cd747bce..326475a94865 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,13 +114,14 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114 tcp_veno_init(sk); 114 tcp_veno_init(sk);
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
118 u32 in_flight)
118{ 119{
119 struct tcp_sock *tp = tcp_sk(sk); 120 struct tcp_sock *tp = tcp_sk(sk);
120 struct veno *veno = inet_csk_ca(sk); 121 struct veno *veno = inet_csk_ca(sk);
121 122
122 if (!veno->doing_veno_now) { 123 if (!veno->doing_veno_now) {
123 tcp_reno_cong_avoid(sk, ack, in_flight); 124 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
124 return; 125 return;
125 } 126 }
126 127
@@ -133,7 +134,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
133 /* We don't have enough rtt samples to do the Veno 134 /* We don't have enough rtt samples to do the Veno
134 * calculation, so we'll behave like Reno. 135 * calculation, so we'll behave like Reno.
135 */ 136 */
136 tcp_reno_cong_avoid(sk, ack, in_flight); 137 tcp_reno_cong_avoid(sk, ack, acked, in_flight);
137 } else { 138 } else {
138 u64 target_cwnd; 139 u64 target_cwnd;
139 u32 rtt; 140 u32 rtt;
@@ -152,7 +153,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
152 153
153 if (tp->snd_cwnd <= tp->snd_ssthresh) { 154 if (tp->snd_cwnd <= tp->snd_ssthresh) {
154 /* Slow start. */ 155 /* Slow start. */
155 tcp_slow_start(tp); 156 tcp_slow_start(tp, acked);
156 } else { 157 } else {
157 /* Congestion avoidance. */ 158 /* Congestion avoidance. */
158 if (veno->diff < beta) { 159 if (veno->diff < beta) {
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 05c3b6f0e8e1..a347a078ee07 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -69,7 +69,8 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight) 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked,
73 u32 in_flight)
73{ 74{
74 struct tcp_sock *tp = tcp_sk(sk); 75 struct tcp_sock *tp = tcp_sk(sk);
75 struct yeah *yeah = inet_csk_ca(sk); 76 struct yeah *yeah = inet_csk_ca(sk);
@@ -78,7 +79,7 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 in_flight)
78 return; 79 return;
79 80
80 if (tp->snd_cwnd <= tp->snd_ssthresh) 81 if (tp->snd_cwnd <= tp->snd_ssthresh)
81 tcp_slow_start(tp); 82 tcp_slow_start(tp, acked);
82 83
83 else if (!yeah->doing_reno_now) { 84 else if (!yeah->doing_reno_now) {
84 /* Scalable */ 85 /* Scalable */