aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2008-09-21 00:20:20 -0400
committerDavid S. Miller <davem@davemloft.net>2008-09-21 00:20:20 -0400
commit006f582c73f4eda35e06fd323193c3df43fb3459 (patch)
treed82762cfb6fb5e6889a52d316fa263a80c4f9fb3 /net/ipv4/tcp_output.c
parent41ea36e35a0daa75377b3e70680e5c3a3f83fe27 (diff)
tcp: convert retransmit_cnt_hint to seqno
Main benefit in this is that we can then freely point the retransmit_skb_hint to anywhere we want to because there's no longer need to know what would be the count changes involve, and since this is really used only as a terminator, unnecessary work is one time walk at most, and if some retransmissions are necessary after that point later on, the walk is not full waste of time anyway. Since retransmit_high must be kept valid, all lost markers must ensure that. Now I also have learned how those "holes" in the rexmittable skbs can appear, mtu probe does them. So I removed the misleading comment as well. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c25
1 files changed, 7 insertions, 18 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 11490958a096..cfae61b40c44 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -1838,7 +1838,7 @@ void tcp_simple_retransmit(struct sock *sk)
1838 struct tcp_sock *tp = tcp_sk(sk); 1838 struct tcp_sock *tp = tcp_sk(sk);
1839 struct sk_buff *skb; 1839 struct sk_buff *skb;
1840 unsigned int mss = tcp_current_mss(sk, 0); 1840 unsigned int mss = tcp_current_mss(sk, 0);
1841 int lost = 0; 1841 u32 prior_lost = tp->lost_out;
1842 1842
1843 tcp_for_write_queue(skb, sk) { 1843 tcp_for_write_queue(skb, sk) {
1844 if (skb == tcp_send_head(sk)) 1844 if (skb == tcp_send_head(sk))
@@ -1849,17 +1849,13 @@ void tcp_simple_retransmit(struct sock *sk)
1849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; 1849 TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
1850 tp->retrans_out -= tcp_skb_pcount(skb); 1850 tp->retrans_out -= tcp_skb_pcount(skb);
1851 } 1851 }
1852 if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { 1852 tcp_skb_mark_lost_uncond_verify(tp, skb);
1853 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
1854 tp->lost_out += tcp_skb_pcount(skb);
1855 lost = 1;
1856 }
1857 } 1853 }
1858 } 1854 }
1859 1855
1860 tcp_clear_all_retrans_hints(tp); 1856 tcp_clear_all_retrans_hints(tp);
1861 1857
1862 if (!lost) 1858 if (prior_lost == tp->lost_out)
1863 return; 1859 return;
1864 1860
1865 if (tcp_is_reno(tp)) 1861 if (tcp_is_reno(tp))
@@ -2009,15 +2005,11 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2009 const struct inet_connection_sock *icsk = inet_csk(sk); 2005 const struct inet_connection_sock *icsk = inet_csk(sk);
2010 struct tcp_sock *tp = tcp_sk(sk); 2006 struct tcp_sock *tp = tcp_sk(sk);
2011 struct sk_buff *skb; 2007 struct sk_buff *skb;
2012 int packet_cnt;
2013 2008
2014 if (tp->retransmit_skb_hint) { 2009 if (tp->retransmit_skb_hint)
2015 skb = tp->retransmit_skb_hint; 2010 skb = tp->retransmit_skb_hint;
2016 packet_cnt = tp->retransmit_cnt_hint; 2011 else
2017 } else {
2018 skb = tcp_write_queue_head(sk); 2012 skb = tcp_write_queue_head(sk);
2019 packet_cnt = 0;
2020 }
2021 2013
2022 /* First pass: retransmit lost packets. */ 2014 /* First pass: retransmit lost packets. */
2023 if (tp->lost_out) { 2015 if (tp->lost_out) {
@@ -2028,7 +2020,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2028 break; 2020 break;
2029 /* we could do better than to assign each time */ 2021 /* we could do better than to assign each time */
2030 tp->retransmit_skb_hint = skb; 2022 tp->retransmit_skb_hint = skb;
2031 tp->retransmit_cnt_hint = packet_cnt;
2032 2023
2033 /* Assume this retransmit will generate 2024 /* Assume this retransmit will generate
2034 * only one packet for congestion window 2025 * only one packet for congestion window
@@ -2039,6 +2030,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2039 */ 2030 */
2040 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2031 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
2041 return; 2032 return;
2033 if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high))
2034 break;
2042 2035
2043 if (sacked & TCPCB_LOST) { 2036 if (sacked & TCPCB_LOST) {
2044 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { 2037 if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
@@ -2059,10 +2052,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2059 inet_csk(sk)->icsk_rto, 2052 inet_csk(sk)->icsk_rto,
2060 TCP_RTO_MAX); 2053 TCP_RTO_MAX);
2061 } 2054 }
2062
2063 packet_cnt += tcp_skb_pcount(skb);
2064 if (packet_cnt >= tp->lost_out)
2065 break;
2066 } 2055 }
2067 } 2056 }
2068 } 2057 }