aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-04-14 07:11:02 -0400
committerPaul Mackerras <paulus@samba.org>2008-04-14 07:11:02 -0400
commitac7c5353b189e10cf5dd27399f64f7b013abffc6 (patch)
tree8222d92b774c256d6ec4399c716d76b3f05ddc4b /net/ipv4/tcp_input.c
parenta8f75ea70c58546205fb7673be41455b9da5d9a7 (diff)
parent120dd64cacd4fb796bca0acba3665553f1d9ecaa (diff)
Merge branch 'linux-2.6'
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c65
1 files changed, 48 insertions, 17 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 7facdb0f6960..5119856017ab 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1625,13 +1625,11 @@ out:
1625 return flag; 1625 return flag;
1626} 1626}
1627 1627
1628/* If we receive more dupacks than we expected counting segments 1628/* Limits sacked_out so that sum with lost_out isn't ever larger than
1629 * in assumption of absent reordering, interpret this as reordering. 1629 * packets_out. Returns zero if sacked_out adjustement wasn't necessary.
1630 * The only another reason could be bug in receiver TCP.
1631 */ 1630 */
1632static void tcp_check_reno_reordering(struct sock *sk, const int addend) 1631int tcp_limit_reno_sacked(struct tcp_sock *tp)
1633{ 1632{
1634 struct tcp_sock *tp = tcp_sk(sk);
1635 u32 holes; 1633 u32 holes;
1636 1634
1637 holes = max(tp->lost_out, 1U); 1635 holes = max(tp->lost_out, 1U);
@@ -1639,8 +1637,20 @@ static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1639 1637
1640 if ((tp->sacked_out + holes) > tp->packets_out) { 1638 if ((tp->sacked_out + holes) > tp->packets_out) {
1641 tp->sacked_out = tp->packets_out - holes; 1639 tp->sacked_out = tp->packets_out - holes;
1642 tcp_update_reordering(sk, tp->packets_out + addend, 0); 1640 return 1;
1643 } 1641 }
1642 return 0;
1643}
1644
1645/* If we receive more dupacks than we expected counting segments
1646 * in assumption of absent reordering, interpret this as reordering.
1647 * The only another reason could be bug in receiver TCP.
1648 */
1649static void tcp_check_reno_reordering(struct sock *sk, const int addend)
1650{
1651 struct tcp_sock *tp = tcp_sk(sk);
1652 if (tcp_limit_reno_sacked(tp))
1653 tcp_update_reordering(sk, tp->packets_out + addend, 0);
1644} 1654}
1645 1655
1646/* Emulate SACKs for SACKless connection: account for a new dupack. */ 1656/* Emulate SACKs for SACKless connection: account for a new dupack. */
@@ -1681,11 +1691,16 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp)
1681int tcp_use_frto(struct sock *sk) 1691int tcp_use_frto(struct sock *sk)
1682{ 1692{
1683 const struct tcp_sock *tp = tcp_sk(sk); 1693 const struct tcp_sock *tp = tcp_sk(sk);
1694 const struct inet_connection_sock *icsk = inet_csk(sk);
1684 struct sk_buff *skb; 1695 struct sk_buff *skb;
1685 1696
1686 if (!sysctl_tcp_frto) 1697 if (!sysctl_tcp_frto)
1687 return 0; 1698 return 0;
1688 1699
1700 /* MTU probe and F-RTO won't really play nicely along currently */
1701 if (icsk->icsk_mtup.probe_size)
1702 return 0;
1703
1689 if (IsSackFrto()) 1704 if (IsSackFrto())
1690 return 1; 1705 return 1;
1691 1706
@@ -2134,11 +2149,13 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
2134/* Mark head of queue up as lost. With RFC3517 SACK, the packets is 2149/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
2135 * is against sacked "cnt", otherwise it's against facked "cnt" 2150 * is against sacked "cnt", otherwise it's against facked "cnt"
2136 */ 2151 */
2137static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit) 2152static void tcp_mark_head_lost(struct sock *sk, int packets)
2138{ 2153{
2139 struct tcp_sock *tp = tcp_sk(sk); 2154 struct tcp_sock *tp = tcp_sk(sk);
2140 struct sk_buff *skb; 2155 struct sk_buff *skb;
2141 int cnt; 2156 int cnt, oldcnt;
2157 int err;
2158 unsigned int mss;
2142 2159
2143 BUG_TRAP(packets <= tp->packets_out); 2160 BUG_TRAP(packets <= tp->packets_out);
2144 if (tp->lost_skb_hint) { 2161 if (tp->lost_skb_hint) {
@@ -2157,13 +2174,25 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int fast_rexmit)
2157 tp->lost_skb_hint = skb; 2174 tp->lost_skb_hint = skb;
2158 tp->lost_cnt_hint = cnt; 2175 tp->lost_cnt_hint = cnt;
2159 2176
2177 if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
2178 break;
2179
2180 oldcnt = cnt;
2160 if (tcp_is_fack(tp) || tcp_is_reno(tp) || 2181 if (tcp_is_fack(tp) || tcp_is_reno(tp) ||
2161 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) 2182 (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))
2162 cnt += tcp_skb_pcount(skb); 2183 cnt += tcp_skb_pcount(skb);
2163 2184
2164 if (((!fast_rexmit || (tp->lost_out > 0)) && (cnt > packets)) || 2185 if (cnt > packets) {
2165 after(TCP_SKB_CB(skb)->end_seq, tp->high_seq)) 2186 if (tcp_is_sack(tp) || (oldcnt >= packets))
2166 break; 2187 break;
2188
2189 mss = skb_shinfo(skb)->gso_size;
2190 err = tcp_fragment(sk, skb, (packets - oldcnt) * mss, mss);
2191 if (err < 0)
2192 break;
2193 cnt = packets;
2194 }
2195
2167 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) { 2196 if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_SACKED_ACKED|TCPCB_LOST))) {
2168 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; 2197 TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
2169 tp->lost_out += tcp_skb_pcount(skb); 2198 tp->lost_out += tcp_skb_pcount(skb);
@@ -2180,17 +2209,17 @@ static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit)
2180 struct tcp_sock *tp = tcp_sk(sk); 2209 struct tcp_sock *tp = tcp_sk(sk);
2181 2210
2182 if (tcp_is_reno(tp)) { 2211 if (tcp_is_reno(tp)) {
2183 tcp_mark_head_lost(sk, 1, fast_rexmit); 2212 tcp_mark_head_lost(sk, 1);
2184 } else if (tcp_is_fack(tp)) { 2213 } else if (tcp_is_fack(tp)) {
2185 int lost = tp->fackets_out - tp->reordering; 2214 int lost = tp->fackets_out - tp->reordering;
2186 if (lost <= 0) 2215 if (lost <= 0)
2187 lost = 1; 2216 lost = 1;
2188 tcp_mark_head_lost(sk, lost, fast_rexmit); 2217 tcp_mark_head_lost(sk, lost);
2189 } else { 2218 } else {
2190 int sacked_upto = tp->sacked_out - tp->reordering; 2219 int sacked_upto = tp->sacked_out - tp->reordering;
2191 if (sacked_upto < 0) 2220 if (sacked_upto < fast_rexmit)
2192 sacked_upto = 0; 2221 sacked_upto = fast_rexmit;
2193 tcp_mark_head_lost(sk, sacked_upto, fast_rexmit); 2222 tcp_mark_head_lost(sk, sacked_upto);
2194 } 2223 }
2195 2224
2196 /* New heuristics: it is possible only after we switched 2225 /* New heuristics: it is possible only after we switched
@@ -2524,7 +2553,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2524 before(tp->snd_una, tp->high_seq) && 2553 before(tp->snd_una, tp->high_seq) &&
2525 icsk->icsk_ca_state != TCP_CA_Open && 2554 icsk->icsk_ca_state != TCP_CA_Open &&
2526 tp->fackets_out > tp->reordering) { 2555 tp->fackets_out > tp->reordering) {
2527 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0); 2556 tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering);
2528 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); 2557 NET_INC_STATS_BH(LINUX_MIB_TCPLOSS);
2529 } 2558 }
2530 2559
@@ -2586,6 +2615,8 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag)
2586 case TCP_CA_Loss: 2615 case TCP_CA_Loss:
2587 if (flag & FLAG_DATA_ACKED) 2616 if (flag & FLAG_DATA_ACKED)
2588 icsk->icsk_retransmits = 0; 2617 icsk->icsk_retransmits = 0;
2618 if (tcp_is_reno(tp) && flag & FLAG_SND_UNA_ADVANCED)
2619 tcp_reset_reno_sack(tp);
2589 if (!tcp_try_undo_loss(sk)) { 2620 if (!tcp_try_undo_loss(sk)) {
2590 tcp_moderate_cwnd(tp); 2621 tcp_moderate_cwnd(tp);
2591 tcp_xmit_retransmit_queue(sk); 2622 tcp_xmit_retransmit_queue(sk);