aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/tcp.h8
-rw-r--r--net/ipv4/tcp_bic.c5
-rw-r--r--net/ipv4/tcp_cong.c4
-rw-r--r--net/ipv4/tcp_cubic.c5
-rw-r--r--net/ipv4/tcp_highspeed.c4
-rw-r--r--net/ipv4/tcp_htcp.c4
-rw-r--r--net/ipv4/tcp_hybla.c7
-rw-r--r--net/ipv4/tcp_illinois.c5
-rw-r--r--net/ipv4/tcp_input.c9
-rw-r--r--net/ipv4/tcp_lp.c5
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/tcp_scalable.c5
-rw-r--r--net/ipv4/tcp_vegas.c7
-rw-r--r--net/ipv4/tcp_veno.c9
-rw-r--r--net/ipv4/tcp_yeah.c5
15 files changed, 36 insertions, 48 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a9fe7bc4f4bb..3c9418456640 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -796,7 +796,7 @@ struct tcp_congestion_ops {
796 /* return slow start threshold (required) */ 796 /* return slow start threshold (required) */
797 u32 (*ssthresh)(struct sock *sk); 797 u32 (*ssthresh)(struct sock *sk);
798 /* do new cwnd calculation (required) */ 798 /* do new cwnd calculation (required) */
799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 799 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
800 /* call before changing ca_state (optional) */ 800 /* call before changing ca_state (optional) */
801 void (*set_state)(struct sock *sk, u8 new_state); 801 void (*set_state)(struct sock *sk, u8 new_state);
802 /* call when cwnd event occurs (optional) */ 802 /* call when cwnd event occurs (optional) */
@@ -828,7 +828,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
828 828
829extern struct tcp_congestion_ops tcp_init_congestion_ops; 829extern struct tcp_congestion_ops tcp_init_congestion_ops;
830u32 tcp_reno_ssthresh(struct sock *sk); 830u32 tcp_reno_ssthresh(struct sock *sk);
831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight); 831void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
832extern struct tcp_congestion_ops tcp_reno; 832extern struct tcp_congestion_ops tcp_reno;
833 833
834static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state) 834static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
@@ -986,10 +986,8 @@ static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
986 * risks 100% overshoot. The advantage is that we discourage application to 986 * risks 100% overshoot. The advantage is that we discourage application to
987 * either send more filler packets or data to artificially blow up the cwnd 987 * either send more filler packets or data to artificially blow up the cwnd
988 * usage, and allow application-limited process to probe bw more aggressively. 988 * usage, and allow application-limited process to probe bw more aggressively.
989 *
990 * TODO: remove in_flight once we can fix all callers, and their callers...
991 */ 989 */
992static inline bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight) 990static inline bool tcp_is_cwnd_limited(const struct sock *sk)
993{ 991{
994 const struct tcp_sock *tp = tcp_sk(sk); 992 const struct tcp_sock *tp = tcp_sk(sk);
995 993
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index 821846fb0a7e..d5de69bc04f5 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -140,13 +140,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
140 ca->cnt = 1; 140 ca->cnt = 1;
141} 141}
142 142
143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 143static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
144 u32 in_flight)
145{ 144{
146 struct tcp_sock *tp = tcp_sk(sk); 145 struct tcp_sock *tp = tcp_sk(sk);
147 struct bictcp *ca = inet_csk_ca(sk); 146 struct bictcp *ca = inet_csk_ca(sk);
148 147
149 if (!tcp_is_cwnd_limited(sk, in_flight)) 148 if (!tcp_is_cwnd_limited(sk))
150 return; 149 return;
151 150
152 if (tp->snd_cwnd <= tp->snd_ssthresh) 151 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index a93b41ba05ff..7b09d8b49fa5 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -317,11 +317,11 @@ EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
317/* This is Jacobson's slow start and congestion avoidance. 317/* This is Jacobson's slow start and congestion avoidance.
318 * SIGCOMM '88, p. 328. 318 * SIGCOMM '88, p. 328.
319 */ 319 */
320void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 320void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
321{ 321{
322 struct tcp_sock *tp = tcp_sk(sk); 322 struct tcp_sock *tp = tcp_sk(sk);
323 323
324 if (!tcp_is_cwnd_limited(sk, in_flight)) 324 if (!tcp_is_cwnd_limited(sk))
325 return; 325 return;
326 326
327 /* In "safe" area, increase. */ 327 /* In "safe" area, increase. */
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 8bf224516ba2..ba2a4f3a6a1e 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -304,13 +304,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
304 ca->cnt = 1; 304 ca->cnt = 1;
305} 305}
306 306
307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 307static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
308 u32 in_flight)
309{ 308{
310 struct tcp_sock *tp = tcp_sk(sk); 309 struct tcp_sock *tp = tcp_sk(sk);
311 struct bictcp *ca = inet_csk_ca(sk); 310 struct bictcp *ca = inet_csk_ca(sk);
312 311
313 if (!tcp_is_cwnd_limited(sk, in_flight)) 312 if (!tcp_is_cwnd_limited(sk))
314 return; 313 return;
315 314
316 if (tp->snd_cwnd <= tp->snd_ssthresh) { 315 if (tp->snd_cwnd <= tp->snd_ssthresh) {
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 8b9e7bad77c0..1c4908280d92 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -109,12 +109,12 @@ static void hstcp_init(struct sock *sk)
109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128); 109 tp->snd_cwnd_clamp = min_t(u32, tp->snd_cwnd_clamp, 0xffffffff/128);
110} 110}
111 111
112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 112static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
113{ 113{
114 struct tcp_sock *tp = tcp_sk(sk); 114 struct tcp_sock *tp = tcp_sk(sk);
115 struct hstcp *ca = inet_csk_ca(sk); 115 struct hstcp *ca = inet_csk_ca(sk);
116 116
117 if (!tcp_is_cwnd_limited(sk, in_flight)) 117 if (!tcp_is_cwnd_limited(sk))
118 return; 118 return;
119 119
120 if (tp->snd_cwnd <= tp->snd_ssthresh) 120 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index 4a194acfd923..031361311a8b 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -227,12 +227,12 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U); 227 return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
228} 228}
229 229
230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 230static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
231{ 231{
232 struct tcp_sock *tp = tcp_sk(sk); 232 struct tcp_sock *tp = tcp_sk(sk);
233 struct htcp *ca = inet_csk_ca(sk); 233 struct htcp *ca = inet_csk_ca(sk);
234 234
235 if (!tcp_is_cwnd_limited(sk, in_flight)) 235 if (!tcp_is_cwnd_limited(sk))
236 return; 236 return;
237 237
238 if (tp->snd_cwnd <= tp->snd_ssthresh) 238 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index a15a799bf768..d8f8f05a4951 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -87,8 +87,7 @@ static inline u32 hybla_fraction(u32 odds)
87 * o Give cwnd a new value based on the model proposed 87 * o Give cwnd a new value based on the model proposed
88 * o remember increments <1 88 * o remember increments <1
89 */ 89 */
90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked, 90static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
91 u32 in_flight)
92{ 91{
93 struct tcp_sock *tp = tcp_sk(sk); 92 struct tcp_sock *tp = tcp_sk(sk);
94 struct hybla *ca = inet_csk_ca(sk); 93 struct hybla *ca = inet_csk_ca(sk);
@@ -101,11 +100,11 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked,
101 ca->minrtt_us = tp->srtt_us; 100 ca->minrtt_us = tp->srtt_us;
102 } 101 }
103 102
104 if (!tcp_is_cwnd_limited(sk, in_flight)) 103 if (!tcp_is_cwnd_limited(sk))
105 return; 104 return;
106 105
107 if (!ca->hybla_en) { 106 if (!ca->hybla_en) {
108 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 107 tcp_reno_cong_avoid(sk, ack, acked);
109 return; 108 return;
110 } 109 }
111 110
diff --git a/net/ipv4/tcp_illinois.c b/net/ipv4/tcp_illinois.c
index 863d105e3015..5999b3972e64 100644
--- a/net/ipv4/tcp_illinois.c
+++ b/net/ipv4/tcp_illinois.c
@@ -255,8 +255,7 @@ static void tcp_illinois_state(struct sock *sk, u8 new_state)
255/* 255/*
256 * Increase window in response to successful acknowledgment. 256 * Increase window in response to successful acknowledgment.
257 */ 257 */
258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked, 258static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
259 u32 in_flight)
260{ 259{
261 struct tcp_sock *tp = tcp_sk(sk); 260 struct tcp_sock *tp = tcp_sk(sk);
262 struct illinois *ca = inet_csk_ca(sk); 261 struct illinois *ca = inet_csk_ca(sk);
@@ -265,7 +264,7 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked,
265 update_params(sk); 264 update_params(sk);
266 265
267 /* RFC2861 only increase cwnd if fully utilized */ 266 /* RFC2861 only increase cwnd if fully utilized */
268 if (!tcp_is_cwnd_limited(sk, in_flight)) 267 if (!tcp_is_cwnd_limited(sk))
269 return; 268 return;
270 269
271 /* In slow start */ 270 /* In slow start */
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6efed134ab63..350b2072f0ab 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2938,10 +2938,11 @@ static void tcp_synack_rtt_meas(struct sock *sk, const u32 synack_stamp)
2938 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L); 2938 tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, seq_rtt_us, -1L);
2939} 2939}
2940 2940
2941static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked, u32 in_flight) 2941static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
2942{ 2942{
2943 const struct inet_connection_sock *icsk = inet_csk(sk); 2943 const struct inet_connection_sock *icsk = inet_csk(sk);
2944 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked, in_flight); 2944
2945 icsk->icsk_ca_ops->cong_avoid(sk, ack, acked);
2945 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; 2946 tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp;
2946} 2947}
2947 2948
@@ -3364,7 +3365,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3364 u32 ack_seq = TCP_SKB_CB(skb)->seq; 3365 u32 ack_seq = TCP_SKB_CB(skb)->seq;
3365 u32 ack = TCP_SKB_CB(skb)->ack_seq; 3366 u32 ack = TCP_SKB_CB(skb)->ack_seq;
3366 bool is_dupack = false; 3367 bool is_dupack = false;
3367 u32 prior_in_flight;
3368 u32 prior_fackets; 3368 u32 prior_fackets;
3369 int prior_packets = tp->packets_out; 3369 int prior_packets = tp->packets_out;
3370 const int prior_unsacked = tp->packets_out - tp->sacked_out; 3370 const int prior_unsacked = tp->packets_out - tp->sacked_out;
@@ -3397,7 +3397,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3397 flag |= FLAG_SND_UNA_ADVANCED; 3397 flag |= FLAG_SND_UNA_ADVANCED;
3398 3398
3399 prior_fackets = tp->fackets_out; 3399 prior_fackets = tp->fackets_out;
3400 prior_in_flight = tcp_packets_in_flight(tp);
3401 3400
3402 /* ts_recent update must be made after we are sure that the packet 3401 /* ts_recent update must be made after we are sure that the packet
3403 * is in window. 3402 * is in window.
@@ -3452,7 +3451,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
3452 3451
3453 /* Advance cwnd if state allows */ 3452 /* Advance cwnd if state allows */
3454 if (tcp_may_raise_cwnd(sk, flag)) 3453 if (tcp_may_raise_cwnd(sk, flag))
3455 tcp_cong_avoid(sk, ack, acked, prior_in_flight); 3454 tcp_cong_avoid(sk, ack, acked);
3456 3455
3457 if (tcp_ack_is_dubious(sk, flag)) { 3456 if (tcp_ack_is_dubious(sk, flag)) {
3458 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP)); 3457 is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
diff --git a/net/ipv4/tcp_lp.c b/net/ipv4/tcp_lp.c
index c9aecae31327..1e70fa8fa793 100644
--- a/net/ipv4/tcp_lp.c
+++ b/net/ipv4/tcp_lp.c
@@ -115,13 +115,12 @@ static void tcp_lp_init(struct sock *sk)
115 * Will only call newReno CA when away from inference. 115 * Will only call newReno CA when away from inference.
116 * From TCP-LP's paper, this will be handled in additive increasement. 116 * From TCP-LP's paper, this will be handled in additive increasement.
117 */ 117 */
118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked, 118static void tcp_lp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
119 u32 in_flight)
120{ 119{
121 struct lp *lp = inet_csk_ca(sk); 120 struct lp *lp = inet_csk_ca(sk);
122 121
123 if (!(lp->flag & LP_WITHIN_INF)) 122 if (!(lp->flag & LP_WITHIN_INF))
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125} 124}
126 125
127/** 126/**
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index f9181a133462..89277a34f2c9 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1408,7 +1408,7 @@ static void tcp_cwnd_validate(struct sock *sk, u32 unsent_segs)
1408 1408
1409 tp->lsnd_pending = tp->packets_out + unsent_segs; 1409 tp->lsnd_pending = tp->packets_out + unsent_segs;
1410 1410
1411 if (tcp_is_cwnd_limited(sk, 0)) { 1411 if (tcp_is_cwnd_limited(sk)) {
1412 /* Network is feed fully. */ 1412 /* Network is feed fully. */
1413 tp->snd_cwnd_used = 0; 1413 tp->snd_cwnd_used = 0;
1414 tp->snd_cwnd_stamp = tcp_time_stamp; 1414 tp->snd_cwnd_stamp = tcp_time_stamp;
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 0ac50836da4d..8250949b8853 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -15,12 +15,11 @@
15#define TCP_SCALABLE_AI_CNT 50U 15#define TCP_SCALABLE_AI_CNT 50U
16#define TCP_SCALABLE_MD_SCALE 3 16#define TCP_SCALABLE_MD_SCALE 3
17 17
18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked, 18static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
19 u32 in_flight)
20{ 19{
21 struct tcp_sock *tp = tcp_sk(sk); 20 struct tcp_sock *tp = tcp_sk(sk);
22 21
23 if (!tcp_is_cwnd_limited(sk, in_flight)) 22 if (!tcp_is_cwnd_limited(sk))
24 return; 23 return;
25 24
26 if (tp->snd_cwnd <= tp->snd_ssthresh) 25 if (tp->snd_cwnd <= tp->snd_ssthresh)
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 48539fff6357..9a5e05f27f4f 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -163,14 +163,13 @@ static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
163 return min(tp->snd_ssthresh, tp->snd_cwnd-1); 163 return min(tp->snd_ssthresh, tp->snd_cwnd-1);
164} 164}
165 165
166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked, 166static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
167 u32 in_flight)
168{ 167{
169 struct tcp_sock *tp = tcp_sk(sk); 168 struct tcp_sock *tp = tcp_sk(sk);
170 struct vegas *vegas = inet_csk_ca(sk); 169 struct vegas *vegas = inet_csk_ca(sk);
171 170
172 if (!vegas->doing_vegas_now) { 171 if (!vegas->doing_vegas_now) {
173 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 172 tcp_reno_cong_avoid(sk, ack, acked);
174 return; 173 return;
175 } 174 }
176 175
@@ -195,7 +194,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked,
195 /* We don't have enough RTT samples to do the Vegas 194 /* We don't have enough RTT samples to do the Vegas
196 * calculation, so we'll behave like Reno. 195 * calculation, so we'll behave like Reno.
197 */ 196 */
198 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 197 tcp_reno_cong_avoid(sk, ack, acked);
199 } else { 198 } else {
200 u32 rtt, diff; 199 u32 rtt, diff;
201 u64 target_cwnd; 200 u64 target_cwnd;
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 1b8e28fcd7e1..27b9825753d1 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -114,19 +114,18 @@ static void tcp_veno_cwnd_event(struct sock *sk, enum tcp_ca_event event)
114 tcp_veno_init(sk); 114 tcp_veno_init(sk);
115} 115}
116 116
117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked, 117static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
118 u32 in_flight)
119{ 118{
120 struct tcp_sock *tp = tcp_sk(sk); 119 struct tcp_sock *tp = tcp_sk(sk);
121 struct veno *veno = inet_csk_ca(sk); 120 struct veno *veno = inet_csk_ca(sk);
122 121
123 if (!veno->doing_veno_now) { 122 if (!veno->doing_veno_now) {
124 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 123 tcp_reno_cong_avoid(sk, ack, acked);
125 return; 124 return;
126 } 125 }
127 126
128 /* limited by applications */ 127 /* limited by applications */
129 if (!tcp_is_cwnd_limited(sk, in_flight)) 128 if (!tcp_is_cwnd_limited(sk))
130 return; 129 return;
131 130
132 /* We do the Veno calculations only if we got enough rtt samples */ 131 /* We do the Veno calculations only if we got enough rtt samples */
@@ -134,7 +133,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked,
134 /* We don't have enough rtt samples to do the Veno 133 /* We don't have enough rtt samples to do the Veno
135 * calculation, so we'll behave like Reno. 134 * calculation, so we'll behave like Reno.
136 */ 135 */
137 tcp_reno_cong_avoid(sk, ack, acked, in_flight); 136 tcp_reno_cong_avoid(sk, ack, acked);
138 } else { 137 } else {
139 u64 target_cwnd; 138 u64 target_cwnd;
140 u32 rtt; 139 u32 rtt;
diff --git a/net/ipv4/tcp_yeah.c b/net/ipv4/tcp_yeah.c
index 5ede0e727945..599b79b8eac0 100644
--- a/net/ipv4/tcp_yeah.c
+++ b/net/ipv4/tcp_yeah.c
@@ -69,13 +69,12 @@ static void tcp_yeah_pkts_acked(struct sock *sk, u32 pkts_acked, s32 rtt_us)
69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us); 69 tcp_vegas_pkts_acked(sk, pkts_acked, rtt_us);
70} 70}
71 71
72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked, 72static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
73 u32 in_flight)
74{ 73{
75 struct tcp_sock *tp = tcp_sk(sk); 74 struct tcp_sock *tp = tcp_sk(sk);
76 struct yeah *yeah = inet_csk_ca(sk); 75 struct yeah *yeah = inet_csk_ca(sk);
77 76
78 if (!tcp_is_cwnd_limited(sk, in_flight)) 77 if (!tcp_is_cwnd_limited(sk))
79 return; 78 return;
80 79
81 if (tp->snd_cwnd <= tp->snd_ssthresh) 80 if (tp->snd_cwnd <= tp->snd_ssthresh)