aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_cubic.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/tcp_cubic.c')
-rw-r--r--net/ipv4/tcp_cubic.c46
1 files changed, 18 insertions, 28 deletions
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 0c44bb67a671..485d7ea35f75 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -246,38 +246,12 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
246 ca->cnt = 1; 246 ca->cnt = 1;
247} 247}
248 248
249
250/* Keep track of minimum rtt */
251static inline void measure_delay(struct sock *sk)
252{
253 const struct tcp_sock *tp = tcp_sk(sk);
254 struct bictcp *ca = inet_csk_ca(sk);
255 u32 delay;
256
257 /* No time stamp */
258 if (!(tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr) ||
259 /* Discard delay samples right after fast recovery */
260 (s32)(tcp_time_stamp - ca->epoch_start) < HZ)
261 return;
262
263 delay = (tcp_time_stamp - tp->rx_opt.rcv_tsecr)<<3;
264 if (delay == 0)
265 delay = 1;
266
267 /* first time call or link delay decreases */
268 if (ca->delay_min == 0 || ca->delay_min > delay)
269 ca->delay_min = delay;
270}
271
272static void bictcp_cong_avoid(struct sock *sk, u32 ack, 249static void bictcp_cong_avoid(struct sock *sk, u32 ack,
273 u32 in_flight, int data_acked) 250 u32 in_flight, int data_acked)
274{ 251{
275 struct tcp_sock *tp = tcp_sk(sk); 252 struct tcp_sock *tp = tcp_sk(sk);
276 struct bictcp *ca = inet_csk_ca(sk); 253 struct bictcp *ca = inet_csk_ca(sk);
277 254
278 if (data_acked)
279 measure_delay(sk);
280
281 if (!tcp_is_cwnd_limited(sk, in_flight)) 255 if (!tcp_is_cwnd_limited(sk, in_flight))
282 return; 256 return;
283 257
@@ -337,14 +311,30 @@ static void bictcp_state(struct sock *sk, u8 new_state)
337static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) 311static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us)
338{ 312{
339 const struct inet_connection_sock *icsk = inet_csk(sk); 313 const struct inet_connection_sock *icsk = inet_csk(sk);
314 struct bictcp *ca = inet_csk_ca(sk);
315 u32 delay;
340 316
341 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) { 317 if (cnt > 0 && icsk->icsk_ca_state == TCP_CA_Open) {
342 struct bictcp *ca = inet_csk_ca(sk);
343 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT; 318 cnt -= ca->delayed_ack >> ACK_RATIO_SHIFT;
344 ca->delayed_ack += cnt; 319 ca->delayed_ack += cnt;
345 } 320 }
346}
347 321
322 /* Some calls are for duplicates without timetamps */
323 if (rtt_us < 0)
324 return;
325
326 /* Discard delay samples right after fast recovery */
327 if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ)
328 return;
329
330 delay = usecs_to_jiffies(rtt_us) << 3;
331 if (delay == 0)
332 delay = 1;
333
334 /* first time call or link delay decreases */
335 if (ca->delay_min == 0 || ca->delay_min > delay)
336 ca->delay_min = delay;
337}
348 338
349static struct tcp_congestion_ops cubictcp = { 339static struct tcp_congestion_ops cubictcp = {
350 .init = bictcp_init, 340 .init = bictcp_init,