aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2017-04-04 17:15:40 -0400
committerDavid S. Miller <davem@davemloft.net>2017-04-05 21:41:27 -0400
commit2d2517ee314ef1de0517f74d06c2825fbf597ba3 (patch)
tree50f26c23c14c62b33bd884da0a6715f310b3c90c /net/ipv4/tcp_input.c
parentecde8f36f8a05a023b9d026e9094571aab421d36 (diff)
tcp: fix reordering SNMP under-counting
Currently the reordering SNMP counters only increase if a connection sees a higher degree then it has previously seen. It ignores if the reordering degree is not greater than the default system threshold. This significantly under-counts the number of reordering events and falsely convey that reordering is rare on the network. This patch properly and faithfully records the number of reordering events detected by the TCP stack, just like the comment says "this exciting event is worth to be remembered". Note that even so TCP still under-estimate the actual reordering events because TCP requires TS options or certain packet sequences to detect reordering (i.e. ACKing never-retransmitted sequence in recovery or disordered state). Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 97ac6776e47d..2c1f59386a7b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -878,22 +878,11 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
878 const int ts) 878 const int ts)
879{ 879{
880 struct tcp_sock *tp = tcp_sk(sk); 880 struct tcp_sock *tp = tcp_sk(sk);
881 if (metric > tp->reordering) { 881 int mib_idx;
882 int mib_idx;
883 882
883 if (metric > tp->reordering) {
884 tp->reordering = min(sysctl_tcp_max_reordering, metric); 884 tp->reordering = min(sysctl_tcp_max_reordering, metric);
885 885
886 /* This exciting event is worth to be remembered. 8) */
887 if (ts)
888 mib_idx = LINUX_MIB_TCPTSREORDER;
889 else if (tcp_is_reno(tp))
890 mib_idx = LINUX_MIB_TCPRENOREORDER;
891 else if (tcp_is_fack(tp))
892 mib_idx = LINUX_MIB_TCPFACKREORDER;
893 else
894 mib_idx = LINUX_MIB_TCPSACKREORDER;
895
896 NET_INC_STATS(sock_net(sk), mib_idx);
897#if FASTRETRANS_DEBUG > 1 886#if FASTRETRANS_DEBUG > 1
898 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", 887 pr_debug("Disorder%d %d %u f%u s%u rr%d\n",
899 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, 888 tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state,
@@ -906,6 +895,18 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
906 } 895 }
907 896
908 tp->rack.reord = 1; 897 tp->rack.reord = 1;
898
899 /* This exciting event is worth to be remembered. 8) */
900 if (ts)
901 mib_idx = LINUX_MIB_TCPTSREORDER;
902 else if (tcp_is_reno(tp))
903 mib_idx = LINUX_MIB_TCPRENOREORDER;
904 else if (tcp_is_fack(tp))
905 mib_idx = LINUX_MIB_TCPFACKREORDER;
906 else
907 mib_idx = LINUX_MIB_TCPSACKREORDER;
908
909 NET_INC_STATS(sock_net(sk), mib_idx);
909} 910}
910 911
911/* This must be called before lost_out is incremented */ 912/* This must be called before lost_out is incremented */