aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorFrancis Yan <francisyyan@gmail.com>2016-11-28 02:07:13 -0500
committerDavid S. Miller <davem@davemloft.net>2016-11-30 10:04:24 -0500
commit05b055e89121394058c75dc354e9a46e1e765579 (patch)
treef4f070fbc44650c700ab3b57fe3ff35080e4df08 /net/ipv4/tcp_output.c
parenta090994980a15f8cc14fc188b5929bd61d2ae9c3 (diff)
tcp: instrument tcp sender limits chronographs
This patch implements the skeleton of the TCP chronograph instrumentation on sender side limits: 1) idle (unspec) 2) busy sending data other than 3-4 below 3) rwnd-limited 4) sndbuf-limited The limits are enumerated 'tcp_chrono'. Since a connection in theory can idle forever, we do not track the actual length of this uninteresting idle period. For the rest we track how long the sender spends in each limit. At any point during the life time of a connection, the sender must be in one of the four states. If there are multiple conditions worthy of tracking in a chronograph then the highest priority enum takes precedence over the other conditions. So that if something "more interesting" starts happening, stop the previous chrono and start a new one. The time unit is jiffy(u32) in order to save space in tcp_sock. This implies application must sample the stats no longer than every 49 days of 1ms jiffy. Signed-off-by: Francis Yan <francisyyan@gmail.com> Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 19105b46a304..34f751776a01 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2081,6 +2081,36 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
2081 return false; 2081 return false;
2082} 2082}
2083 2083
2084static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
2085{
2086 const u32 now = tcp_time_stamp;
2087
2088 if (tp->chrono_type > TCP_CHRONO_UNSPEC)
2089 tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
2090 tp->chrono_start = now;
2091 tp->chrono_type = new;
2092}
2093
2094void tcp_chrono_start(struct sock *sk, const enum tcp_chrono type)
2095{
2096 struct tcp_sock *tp = tcp_sk(sk);
2097
2098 /* If there are multiple conditions worthy of tracking in a
2099 * chronograph then the highest priority enum takes precedence over
2100 * the other conditions. So that if something "more interesting"
2101 * starts happening, stop the previous chrono and start a new one.
2102 */
2103 if (type > tp->chrono_type)
2104 tcp_chrono_set(tp, type);
2105}
2106
2107void tcp_chrono_stop(struct sock *sk, const enum tcp_chrono type)
2108{
2109 struct tcp_sock *tp = tcp_sk(sk);
2110
2111 tcp_chrono_set(tp, TCP_CHRONO_UNSPEC);
2112}
2113
2084/* This routine writes packets to the network. It advances the 2114/* This routine writes packets to the network. It advances the
2085 * send_head. This happens as incoming acks open up the remote 2115 * send_head. This happens as incoming acks open up the remote
2086 * window for us. 2116 * window for us.