aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2018-05-16 19:40:12 -0400
committerDavid S. Miller <davem@davemloft.net>2018-05-17 15:41:28 -0400
commit6ac06ecd3a5d1dd1aaea5c2a8f6d6e4c81d5de6a (patch)
tree66759ad3e1deda045d5656bd794ac30e54611ca6 /net/ipv4/tcp_input.c
parentb38a51fec1c1f693f03b1aa19d0622123634d4b7 (diff)
tcp: simpler NewReno implementation
This is a rewrite of NewReno loss recovery implementation that is simpler and standalone for readability and better performance by using less states. Note that NewReno refers to RFC6582 as a modification to the fast recovery algorithm. It is used only if the connection does not support SACK in Linux. It should not to be confused with the Reno (AIMD) congestion control. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: Priyaranjan Jha <priyarjha@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c19
1 files changed, 11 insertions, 8 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ccbe04f80040..076206873e3e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2223,9 +2223,7 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2223{ 2223{
2224 struct tcp_sock *tp = tcp_sk(sk); 2224 struct tcp_sock *tp = tcp_sk(sk);
2225 2225
2226 if (tcp_is_reno(tp)) { 2226 if (tcp_is_sack(tp)) {
2227 tcp_mark_head_lost(sk, 1, 1);
2228 } else {
2229 int sacked_upto = tp->sacked_out - tp->reordering; 2227 int sacked_upto = tp->sacked_out - tp->reordering;
2230 if (sacked_upto >= 0) 2228 if (sacked_upto >= 0)
2231 tcp_mark_head_lost(sk, sacked_upto, 0); 2229 tcp_mark_head_lost(sk, sacked_upto, 0);
@@ -2723,11 +2721,16 @@ static bool tcp_try_undo_partial(struct sock *sk, u32 prior_snd_una)
2723 return false; 2721 return false;
2724} 2722}
2725 2723
2726static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag) 2724static void tcp_identify_packet_loss(struct sock *sk, int *ack_flag)
2727{ 2725{
2728 struct tcp_sock *tp = tcp_sk(sk); 2726 struct tcp_sock *tp = tcp_sk(sk);
2729 2727
2730 if (tcp_is_rack(sk)) { 2728 if (tcp_rtx_queue_empty(sk))
2729 return;
2730
2731 if (unlikely(tcp_is_reno(tp))) {
2732 tcp_newreno_mark_lost(sk, *ack_flag & FLAG_SND_UNA_ADVANCED);
2733 } else if (tcp_is_rack(sk)) {
2731 u32 prior_retrans = tp->retrans_out; 2734 u32 prior_retrans = tp->retrans_out;
2732 2735
2733 tcp_rack_mark_lost(sk); 2736 tcp_rack_mark_lost(sk);
@@ -2823,11 +2826,11 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2823 tcp_try_keep_open(sk); 2826 tcp_try_keep_open(sk);
2824 return; 2827 return;
2825 } 2828 }
2826 tcp_rack_identify_loss(sk, ack_flag); 2829 tcp_identify_packet_loss(sk, ack_flag);
2827 break; 2830 break;
2828 case TCP_CA_Loss: 2831 case TCP_CA_Loss:
2829 tcp_process_loss(sk, flag, is_dupack, rexmit); 2832 tcp_process_loss(sk, flag, is_dupack, rexmit);
2830 tcp_rack_identify_loss(sk, ack_flag); 2833 tcp_identify_packet_loss(sk, ack_flag);
2831 if (!(icsk->icsk_ca_state == TCP_CA_Open || 2834 if (!(icsk->icsk_ca_state == TCP_CA_Open ||
2832 (*ack_flag & FLAG_LOST_RETRANS))) 2835 (*ack_flag & FLAG_LOST_RETRANS)))
2833 return; 2836 return;
@@ -2844,7 +2847,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
2844 if (icsk->icsk_ca_state <= TCP_CA_Disorder) 2847 if (icsk->icsk_ca_state <= TCP_CA_Disorder)
2845 tcp_try_undo_dsack(sk); 2848 tcp_try_undo_dsack(sk);
2846 2849
2847 tcp_rack_identify_loss(sk, ack_flag); 2850 tcp_identify_packet_loss(sk, ack_flag);
2848 if (!tcp_time_to_recover(sk, flag)) { 2851 if (!tcp_time_to_recover(sk, flag)) {
2849 tcp_try_to_open(sk, flag); 2852 tcp_try_to_open(sk, flag);
2850 return; 2853 return;