aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_cubic.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2014-12-04 19:13:23 -0500
committerDavid S. Miller <davem@davemloft.net>2014-12-09 14:58:23 -0500
commit6e3a8a937c2f86ee0b2d354808fc026a143b4518 (patch)
treef787c8960845386d7950fa04d704224d94c4cc38 /net/ipv4/tcp_cubic.c
parent5cccc702fd54e5c3dc5ee16a129770aae79ae60b (diff)
tcp_cubic: add SNMP counters to track how effective is Hystart
When deploying FQ pacing, one thing we noticed is that CUBIC Hystart triggers too soon. Having SNMP counters to have an idea of how often the various Hystart methods trigger is useful prior to any modifications. This patch adds SNMP counters tracking, how many time "ack train" or "Delay" based Hystart triggers, and cumulative sum of cwnd at the time Hystart decided to end SS (Slow Start) myhost:~# nstat -a | grep Hystart TcpExtTCPHystartTrainDetect 9 0.0 TcpExtTCPHystartTrainCwnd 20650 0.0 TcpExtTCPHystartDelayDetect 10 0.0 TcpExtTCPHystartDelayCwnd 360 0.0 -> Train detection was triggered 9 times, and average cwnd was 20650/9=2294, Delay detection was triggered 10 times and average cwnd was 36 Signed-off-by: Eric Dumazet <edumazet@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_cubic.c')
-rw-r--r--net/ipv4/tcp_cubic.c31
1 files changed, 22 insertions, 9 deletions
diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c
index 20de0118c98e..c1d07c7ed03d 100644
--- a/net/ipv4/tcp_cubic.c
+++ b/net/ipv4/tcp_cubic.c
@@ -363,16 +363,28 @@ static void hystart_update(struct sock *sk, u32 delay)
363 struct tcp_sock *tp = tcp_sk(sk); 363 struct tcp_sock *tp = tcp_sk(sk);
364 struct bictcp *ca = inet_csk_ca(sk); 364 struct bictcp *ca = inet_csk_ca(sk);
365 365
366 if (!(ca->found & hystart_detect)) { 366 if (ca->found & hystart_detect)
367 return;
368
369 if (hystart_detect & HYSTART_ACK_TRAIN) {
367 u32 now = bictcp_clock(); 370 u32 now = bictcp_clock();
368 371
369 /* first detection parameter - ack-train detection */ 372 /* first detection parameter - ack-train detection */
370 if ((s32)(now - ca->last_ack) <= hystart_ack_delta) { 373 if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
371 ca->last_ack = now; 374 ca->last_ack = now;
372 if ((s32)(now - ca->round_start) > ca->delay_min >> 4) 375 if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
373 ca->found |= HYSTART_ACK_TRAIN; 376 ca->found |= HYSTART_ACK_TRAIN;
377 NET_INC_STATS_BH(sock_net(sk),
378 LINUX_MIB_TCPHYSTARTTRAINDETECT);
379 NET_ADD_STATS_BH(sock_net(sk),
380 LINUX_MIB_TCPHYSTARTTRAINCWND,
381 tp->snd_cwnd);
382 tp->snd_ssthresh = tp->snd_cwnd;
383 }
374 } 384 }
385 }
375 386
387 if (hystart_detect & HYSTART_DELAY) {
376 /* obtain the minimum delay of more than sampling packets */ 388 /* obtain the minimum delay of more than sampling packets */
377 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) { 389 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
378 if (ca->curr_rtt == 0 || ca->curr_rtt > delay) 390 if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
@@ -381,15 +393,16 @@ static void hystart_update(struct sock *sk, u32 delay)
381 ca->sample_cnt++; 393 ca->sample_cnt++;
382 } else { 394 } else {
383 if (ca->curr_rtt > ca->delay_min + 395 if (ca->curr_rtt > ca->delay_min +
384 HYSTART_DELAY_THRESH(ca->delay_min>>4)) 396 HYSTART_DELAY_THRESH(ca->delay_min>>4)) {
385 ca->found |= HYSTART_DELAY; 397 ca->found |= HYSTART_DELAY;
398 NET_INC_STATS_BH(sock_net(sk),
399 LINUX_MIB_TCPHYSTARTDELAYDETECT);
400 NET_ADD_STATS_BH(sock_net(sk),
401 LINUX_MIB_TCPHYSTARTDELAYCWND,
402 tp->snd_cwnd);
403 tp->snd_ssthresh = tp->snd_cwnd;
404 }
386 } 405 }
387 /*
388 * Either one of two conditions are met,
389 * we exit from slow start immediately.
390 */
391 if (ca->found & hystart_detect)
392 tp->snd_ssthresh = tp->snd_cwnd;
393 } 406 }
394} 407}
395 408