aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorYuchung Cheng <ycheng@google.com>2015-10-17 00:57:43 -0400
committerDavid S. Miller <davem@davemloft.net>2015-10-21 10:00:44 -0400
commitaf82f4e84866ecd360a53f770d6217637116e6c1 (patch)
treea7302a289932f09cb577b8e1611d9de9c86e28b6 /net/ipv4/tcp_input.c
parentf672258391b42a5c7cc2732c9c063e56a85c8dbe (diff)
tcp: remove tcp_mark_lost_retrans()
Remove the existing lost retransmit detection because RACK subsumes it completely. This also stops the overloading the ack_seq field of the skb control block. Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c65
1 files changed, 0 insertions, 65 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index eedb25db3947..5a776897a8c7 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1048,70 +1048,6 @@ static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack,
1048 return !before(start_seq, end_seq - tp->max_window); 1048 return !before(start_seq, end_seq - tp->max_window);
1049} 1049}
1050 1050
1051/* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
1052 * Event "B". Later note: FACK people cheated me again 8), we have to account
1053 * for reordering! Ugly, but should help.
1054 *
1055 * Search retransmitted skbs from write_queue that were sent when snd_nxt was
1056 * less than what is now known to be received by the other end (derived from
1057 * highest SACK block). Also calculate the lowest snd_nxt among the remaining
1058 * retransmitted skbs to avoid some costly processing per ACKs.
1059 */
1060static void tcp_mark_lost_retrans(struct sock *sk, int *flag)
1061{
1062 const struct inet_connection_sock *icsk = inet_csk(sk);
1063 struct tcp_sock *tp = tcp_sk(sk);
1064 struct sk_buff *skb;
1065 int cnt = 0;
1066 u32 new_low_seq = tp->snd_nxt;
1067 u32 received_upto = tcp_highest_sack_seq(tp);
1068
1069 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1070 !after(received_upto, tp->lost_retrans_low) ||
1071 icsk->icsk_ca_state != TCP_CA_Recovery)
1072 return;
1073
1074 tcp_for_write_queue(skb, sk) {
1075 u32 ack_seq = TCP_SKB_CB(skb)->ack_seq;
1076
1077 if (skb == tcp_send_head(sk))
1078 break;
1079 if (cnt == tp->retrans_out)
1080 break;
1081 if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una))
1082 continue;
1083
1084 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS))
1085 continue;
1086
1087 /* TODO: We would like to get rid of tcp_is_fack(tp) only
1088 * constraint here (see above) but figuring out that at
1089 * least tp->reordering SACK blocks reside between ack_seq
1090 * and received_upto is not easy task to do cheaply with
1091 * the available datastructures.
1092 *
1093 * Whether FACK should check here for tp->reordering segs
1094 * in-between one could argue for either way (it would be
1095 * rather simple to implement as we could count fack_count
1096 * during the walk and do tp->fackets_out - fack_count).
1097 */
1098 if (after(received_upto, ack_seq)) {
1099 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1100 tp->retrans_out -= tcp_skb_pcount(skb);
1101 *flag |= FLAG_LOST_RETRANS;
1102 tcp_skb_mark_lost_uncond_verify(tp, skb);
1103 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
1104 } else {
1105 if (before(ack_seq, new_low_seq))
1106 new_low_seq = ack_seq;
1107 cnt += tcp_skb_pcount(skb);
1108 }
1109 }
1110
1111 if (tp->retrans_out)
1112 tp->lost_retrans_low = new_low_seq;
1113}
1114
1115static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, 1051static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1116 struct tcp_sack_block_wire *sp, int num_sacks, 1052 struct tcp_sack_block_wire *sp, int num_sacks,
1117 u32 prior_snd_una) 1053 u32 prior_snd_una)
@@ -1838,7 +1774,6 @@ advance_sp:
1838 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) 1774 ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
1839 tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); 1775 tcp_update_reordering(sk, tp->fackets_out - state->reord, 0);
1840 1776
1841 tcp_mark_lost_retrans(sk, &state->flag);
1842 tcp_verify_left_out(tp); 1777 tcp_verify_left_out(tp);
1843out: 1778out:
1844 1779