diff options
author | Yuchung Cheng <ycheng@google.com> | 2018-05-16 19:40:14 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-05-17 15:41:29 -0400 |
commit | 2ad55f5660158d8a12c06486d32a7c47fa5f116b (patch) | |
tree | 7808086db33bbd720587b803323447da526ae176 /net/ipv4/tcp_input.c | |
parent | d716bfdb10b4250617783c94253e48b0e85adcb1 (diff) |
tcp: new helper tcp_timeout_mark_lost
Refactor using a new helper, tcp_timeout_mark_loss(), that marks packets
lost upon RTO.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Reviewed-by: Eric Dumazet <edumazet@google.com>
Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com>
Reviewed-by: Priyaranjan Jha <priyarjha@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 50 |
1 files changed, 29 insertions, 21 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 6fb0a28977a0..af32accda2a9 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1917,18 +1917,43 @@ static inline void tcp_init_undo(struct tcp_sock *tp) | |||
1917 | tp->undo_retrans = tp->retrans_out ? : -1; | 1917 | tp->undo_retrans = tp->retrans_out ? : -1; |
1918 | } | 1918 | } |
1919 | 1919 | ||
1920 | /* Enter Loss state. If we detect SACK reneging, forget all SACK information | 1920 | /* If we detect SACK reneging, forget all SACK information |
1921 | * and reset tags completely, otherwise preserve SACKs. If receiver | 1921 | * and reset tags completely, otherwise preserve SACKs. If receiver |
1922 | * dropped its ofo queue, we will know this due to reneging detection. | 1922 | * dropped its ofo queue, we will know this due to reneging detection. |
1923 | */ | 1923 | */ |
1924 | static void tcp_timeout_mark_lost(struct sock *sk) | ||
1925 | { | ||
1926 | struct tcp_sock *tp = tcp_sk(sk); | ||
1927 | struct sk_buff *skb; | ||
1928 | bool is_reneg; /* is receiver reneging on SACKs? */ | ||
1929 | |||
1930 | skb = tcp_rtx_queue_head(sk); | ||
1931 | is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); | ||
1932 | if (is_reneg) { | ||
1933 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); | ||
1934 | tp->sacked_out = 0; | ||
1935 | /* Mark SACK reneging until we recover from this loss event. */ | ||
1936 | tp->is_sack_reneg = 1; | ||
1937 | } else if (tcp_is_reno(tp)) { | ||
1938 | tcp_reset_reno_sack(tp); | ||
1939 | } | ||
1940 | |||
1941 | skb_rbtree_walk_from(skb) { | ||
1942 | if (is_reneg) | ||
1943 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; | ||
1944 | tcp_mark_skb_lost(sk, skb); | ||
1945 | } | ||
1946 | tcp_verify_left_out(tp); | ||
1947 | tcp_clear_all_retrans_hints(tp); | ||
1948 | } | ||
1949 | |||
1950 | /* Enter Loss state. */ | ||
1924 | void tcp_enter_loss(struct sock *sk) | 1951 | void tcp_enter_loss(struct sock *sk) |
1925 | { | 1952 | { |
1926 | const struct inet_connection_sock *icsk = inet_csk(sk); | 1953 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1927 | struct tcp_sock *tp = tcp_sk(sk); | 1954 | struct tcp_sock *tp = tcp_sk(sk); |
1928 | struct net *net = sock_net(sk); | 1955 | struct net *net = sock_net(sk); |
1929 | struct sk_buff *skb; | ||
1930 | bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; | 1956 | bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; |
1931 | bool is_reneg; /* is receiver reneging on SACKs? */ | ||
1932 | 1957 | ||
1933 | /* Reduce ssthresh if it has not yet been made inside this window. */ | 1958 | /* Reduce ssthresh if it has not yet been made inside this window. */ |
1934 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || | 1959 | if (icsk->icsk_ca_state <= TCP_CA_Disorder || |
@@ -1944,24 +1969,7 @@ void tcp_enter_loss(struct sock *sk) | |||
1944 | tp->snd_cwnd_cnt = 0; | 1969 | tp->snd_cwnd_cnt = 0; |
1945 | tp->snd_cwnd_stamp = tcp_jiffies32; | 1970 | tp->snd_cwnd_stamp = tcp_jiffies32; |
1946 | 1971 | ||
1947 | if (tcp_is_reno(tp)) | 1972 | tcp_timeout_mark_lost(sk); |
1948 | tcp_reset_reno_sack(tp); | ||
1949 | |||
1950 | skb = tcp_rtx_queue_head(sk); | ||
1951 | is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); | ||
1952 | if (is_reneg) { | ||
1953 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); | ||
1954 | tp->sacked_out = 0; | ||
1955 | /* Mark SACK reneging until we recover from this loss event. */ | ||
1956 | tp->is_sack_reneg = 1; | ||
1957 | } | ||
1958 | skb_rbtree_walk_from(skb) { | ||
1959 | if (is_reneg) | ||
1960 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; | ||
1961 | tcp_mark_skb_lost(sk, skb); | ||
1962 | } | ||
1963 | tcp_verify_left_out(tp); | ||
1964 | tcp_clear_all_retrans_hints(tp); | ||
1965 | 1973 | ||
1966 | /* Timeout in disordered state after receiving substantial DUPACKs | 1974 | /* Timeout in disordered state after receiving substantial DUPACKs |
1967 | * suggests that the degree of reordering is over-estimated. | 1975 | * suggests that the degree of reordering is over-estimated. |