aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorNandita Dukkipati <nanditad@google.com>2013-03-11 06:00:44 -0400
committerDavid S. Miller <davem@davemloft.net>2013-03-12 08:30:34 -0400
commit9b717a8d245075ffb8e95a2dfb4ee97ce4747457 (patch)
tree08e1ee37c89b11e4c08734c671a2427edb942944 /net/ipv4/tcp_output.c
parent6ba8a3b19e764b6a65e4030ab0999be50c291e6c (diff)
tcp: TLP loss detection.
This is the second of the TLP patch series; it augments the basic TLP algorithm with a loss detection scheme. This patch implements a mechanism for loss detection when a Tail loss probe retransmission plugs a hole thereby masking packet loss from the sender. The loss detection algorithm relies on counting TLP dupacks as outlined in Sec. 3 of: http://tools.ietf.org/html/draft-dukkipati-tcpm-tcp-loss-probe-01 The basic idea is: Sender keeps track of TLP "episode" upon retransmission of a TLP packet. An episode ends when the sender receives an ACK above the SND.NXT (tracked by tlp_high_seq) at the time of the episode. We want to make sure that before the episode ends the sender receives a "TLP dupack", indicating that the TLP retransmission was unnecessary, so there was no loss/hole that needed plugging. If the sender gets no TLP dupack before the end of the episode, then it reduces ssthresh and the congestion window, because the TLP packet arriving at the receiver probably plugged a hole. Signed-off-by: Nandita Dukkipati <nanditad@google.com> Acked-by: Neal Cardwell <ncardwell@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c9
1 files changed, 9 insertions, 0 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index beb63dbc85f5..8e7742f0b5d2 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2132,6 +2132,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2132 */ 2132 */
2133void tcp_send_loss_probe(struct sock *sk) 2133void tcp_send_loss_probe(struct sock *sk)
2134{ 2134{
2135 struct tcp_sock *tp = tcp_sk(sk);
2135 struct sk_buff *skb; 2136 struct sk_buff *skb;
2136 int pcount; 2137 int pcount;
2137 int mss = tcp_current_mss(sk); 2138 int mss = tcp_current_mss(sk);
@@ -2142,6 +2143,10 @@ void tcp_send_loss_probe(struct sock *sk)
2142 goto rearm_timer; 2143 goto rearm_timer;
2143 } 2144 }
2144 2145
2146 /* At most one outstanding TLP retransmission. */
2147 if (tp->tlp_high_seq)
2148 goto rearm_timer;
2149
2145 /* Retransmit last segment. */ 2150 /* Retransmit last segment. */
2146 skb = tcp_write_queue_tail(sk); 2151 skb = tcp_write_queue_tail(sk);
2147 if (WARN_ON(!skb)) 2152 if (WARN_ON(!skb))
@@ -2164,6 +2169,10 @@ void tcp_send_loss_probe(struct sock *sk)
2164 if (skb->len > 0) 2169 if (skb->len > 0)
2165 err = __tcp_retransmit_skb(sk, skb); 2170 err = __tcp_retransmit_skb(sk, skb);
2166 2171
2172 /* Record snd_nxt for loss detection. */
2173 if (likely(!err))
2174 tp->tlp_high_seq = tp->snd_nxt;
2175
2167rearm_timer: 2176rearm_timer:
2168 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, 2177 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
2169 inet_csk(sk)->icsk_rto, 2178 inet_csk(sk)->icsk_rto,