diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 145 |
1 files changed, 86 insertions, 59 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 2fbfc2e4209c..633389390788 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -235,9 +235,9 @@ static void tcp_fixup_sndbuf(struct sock *sk) | |||
235 | */ | 235 | */ |
236 | 236 | ||
237 | /* Slow part of check#2. */ | 237 | /* Slow part of check#2. */ |
238 | static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, | 238 | static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) |
239 | const struct sk_buff *skb) | ||
240 | { | 239 | { |
240 | struct tcp_sock *tp = tcp_sk(sk); | ||
241 | /* Optimize this! */ | 241 | /* Optimize this! */ |
242 | int truesize = tcp_win_from_space(skb->truesize)/2; | 242 | int truesize = tcp_win_from_space(skb->truesize)/2; |
243 | int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; | 243 | int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2; |
@@ -252,9 +252,11 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp, | |||
252 | return 0; | 252 | return 0; |
253 | } | 253 | } |
254 | 254 | ||
255 | static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | 255 | static void tcp_grow_window(struct sock *sk, |
256 | struct sk_buff *skb) | 256 | struct sk_buff *skb) |
257 | { | 257 | { |
258 | struct tcp_sock *tp = tcp_sk(sk); | ||
259 | |||
258 | /* Check #1 */ | 260 | /* Check #1 */ |
259 | if (tp->rcv_ssthresh < tp->window_clamp && | 261 | if (tp->rcv_ssthresh < tp->window_clamp && |
260 | (int)tp->rcv_ssthresh < tcp_space(sk) && | 262 | (int)tp->rcv_ssthresh < tcp_space(sk) && |
@@ -267,7 +269,7 @@ static void tcp_grow_window(struct sock *sk, struct tcp_sock *tp, | |||
267 | if (tcp_win_from_space(skb->truesize) <= skb->len) | 269 | if (tcp_win_from_space(skb->truesize) <= skb->len) |
268 | incr = 2*tp->advmss; | 270 | incr = 2*tp->advmss; |
269 | else | 271 | else |
270 | incr = __tcp_grow_window(sk, tp, skb); | 272 | incr = __tcp_grow_window(sk, skb); |
271 | 273 | ||
272 | if (incr) { | 274 | if (incr) { |
273 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); | 275 | tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); |
@@ -330,8 +332,9 @@ static void tcp_init_buffer_space(struct sock *sk) | |||
330 | } | 332 | } |
331 | 333 | ||
332 | /* 5. Recalculate window clamp after socket hit its memory bounds. */ | 334 | /* 5. Recalculate window clamp after socket hit its memory bounds. */ |
333 | static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp) | 335 | static void tcp_clamp_window(struct sock *sk) |
334 | { | 336 | { |
337 | struct tcp_sock *tp = tcp_sk(sk); | ||
335 | struct inet_connection_sock *icsk = inet_csk(sk); | 338 | struct inet_connection_sock *icsk = inet_csk(sk); |
336 | 339 | ||
337 | icsk->icsk_ack.quick = 0; | 340 | icsk->icsk_ack.quick = 0; |
@@ -503,8 +506,9 @@ new_measure: | |||
503 | * each ACK we send, he increments snd_cwnd and transmits more of his | 506 | * each ACK we send, he increments snd_cwnd and transmits more of his |
504 | * queue. -DaveM | 507 | * queue. -DaveM |
505 | */ | 508 | */ |
506 | static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) | 509 | static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) |
507 | { | 510 | { |
511 | struct tcp_sock *tp = tcp_sk(sk); | ||
508 | struct inet_connection_sock *icsk = inet_csk(sk); | 512 | struct inet_connection_sock *icsk = inet_csk(sk); |
509 | u32 now; | 513 | u32 now; |
510 | 514 | ||
@@ -545,7 +549,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ | |||
545 | TCP_ECN_check_ce(tp, skb); | 549 | TCP_ECN_check_ce(tp, skb); |
546 | 550 | ||
547 | if (skb->len >= 128) | 551 | if (skb->len >= 128) |
548 | tcp_grow_window(sk, tp, skb); | 552 | tcp_grow_window(sk, skb); |
549 | } | 553 | } |
550 | 554 | ||
551 | /* Called to compute a smoothed rtt estimate. The data fed to this | 555 | /* Called to compute a smoothed rtt estimate. The data fed to this |
@@ -1541,8 +1545,10 @@ static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) | |||
1541 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); | 1545 | return (tcp_time_stamp - TCP_SKB_CB(skb)->when > inet_csk(sk)->icsk_rto); |
1542 | } | 1546 | } |
1543 | 1547 | ||
1544 | static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | 1548 | static inline int tcp_head_timedout(struct sock *sk) |
1545 | { | 1549 | { |
1550 | struct tcp_sock *tp = tcp_sk(sk); | ||
1551 | |||
1546 | return tp->packets_out && | 1552 | return tp->packets_out && |
1547 | tcp_skb_timedout(sk, tcp_write_queue_head(sk)); | 1553 | tcp_skb_timedout(sk, tcp_write_queue_head(sk)); |
1548 | } | 1554 | } |
@@ -1640,8 +1646,9 @@ static inline int tcp_head_timedout(struct sock *sk, struct tcp_sock *tp) | |||
1640 | * Main question: may we further continue forward transmission | 1646 | * Main question: may we further continue forward transmission |
1641 | * with the same cwnd? | 1647 | * with the same cwnd? |
1642 | */ | 1648 | */ |
1643 | static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | 1649 | static int tcp_time_to_recover(struct sock *sk) |
1644 | { | 1650 | { |
1651 | struct tcp_sock *tp = tcp_sk(sk); | ||
1645 | __u32 packets_out; | 1652 | __u32 packets_out; |
1646 | 1653 | ||
1647 | /* Do not perform any recovery during FRTO algorithm */ | 1654 | /* Do not perform any recovery during FRTO algorithm */ |
@@ -1659,7 +1666,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | |||
1659 | /* Trick#3 : when we use RFC2988 timer restart, fast | 1666 | /* Trick#3 : when we use RFC2988 timer restart, fast |
1660 | * retransmit can be triggered by timeout of queue head. | 1667 | * retransmit can be triggered by timeout of queue head. |
1661 | */ | 1668 | */ |
1662 | if (tcp_head_timedout(sk, tp)) | 1669 | if (tcp_head_timedout(sk)) |
1663 | return 1; | 1670 | return 1; |
1664 | 1671 | ||
1665 | /* Trick#4: It is still not OK... But will it be useful to delay | 1672 | /* Trick#4: It is still not OK... But will it be useful to delay |
@@ -1668,7 +1675,7 @@ static int tcp_time_to_recover(struct sock *sk, struct tcp_sock *tp) | |||
1668 | packets_out = tp->packets_out; | 1675 | packets_out = tp->packets_out; |
1669 | if (packets_out <= tp->reordering && | 1676 | if (packets_out <= tp->reordering && |
1670 | tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && | 1677 | tp->sacked_out >= max_t(__u32, packets_out/2, sysctl_tcp_reordering) && |
1671 | !tcp_may_send_now(sk, tp)) { | 1678 | !tcp_may_send_now(sk)) { |
1672 | /* We have nothing to send. This connection is limited | 1679 | /* We have nothing to send. This connection is limited |
1673 | * either by receiver window or by application. | 1680 | * either by receiver window or by application. |
1674 | */ | 1681 | */ |
@@ -1708,8 +1715,10 @@ static void tcp_add_reno_sack(struct sock *sk) | |||
1708 | 1715 | ||
1709 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ | 1716 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ |
1710 | 1717 | ||
1711 | static void tcp_remove_reno_sacks(struct sock *sk, struct tcp_sock *tp, int acked) | 1718 | static void tcp_remove_reno_sacks(struct sock *sk, int acked) |
1712 | { | 1719 | { |
1720 | struct tcp_sock *tp = tcp_sk(sk); | ||
1721 | |||
1713 | if (acked > 0) { | 1722 | if (acked > 0) { |
1714 | /* One ACK acked hole. The rest eat duplicate ACKs. */ | 1723 | /* One ACK acked hole. The rest eat duplicate ACKs. */ |
1715 | if (acked-1 >= tp->sacked_out) | 1724 | if (acked-1 >= tp->sacked_out) |
@@ -1728,9 +1737,10 @@ static inline void tcp_reset_reno_sack(struct tcp_sock *tp) | |||
1728 | } | 1737 | } |
1729 | 1738 | ||
1730 | /* Mark head of queue up as lost. */ | 1739 | /* Mark head of queue up as lost. */ |
1731 | static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, | 1740 | static void tcp_mark_head_lost(struct sock *sk, |
1732 | int packets, u32 high_seq) | 1741 | int packets, u32 high_seq) |
1733 | { | 1742 | { |
1743 | struct tcp_sock *tp = tcp_sk(sk); | ||
1734 | struct sk_buff *skb; | 1744 | struct sk_buff *skb; |
1735 | int cnt; | 1745 | int cnt; |
1736 | 1746 | ||
@@ -1771,15 +1781,17 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp, | |||
1771 | 1781 | ||
1772 | /* Account newly detected lost packet(s) */ | 1782 | /* Account newly detected lost packet(s) */ |
1773 | 1783 | ||
1774 | static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | 1784 | static void tcp_update_scoreboard(struct sock *sk) |
1775 | { | 1785 | { |
1786 | struct tcp_sock *tp = tcp_sk(sk); | ||
1787 | |||
1776 | if (IsFack(tp)) { | 1788 | if (IsFack(tp)) { |
1777 | int lost = tp->fackets_out - tp->reordering; | 1789 | int lost = tp->fackets_out - tp->reordering; |
1778 | if (lost <= 0) | 1790 | if (lost <= 0) |
1779 | lost = 1; | 1791 | lost = 1; |
1780 | tcp_mark_head_lost(sk, tp, lost, tp->high_seq); | 1792 | tcp_mark_head_lost(sk, lost, tp->high_seq); |
1781 | } else { | 1793 | } else { |
1782 | tcp_mark_head_lost(sk, tp, 1, tp->high_seq); | 1794 | tcp_mark_head_lost(sk, 1, tp->high_seq); |
1783 | } | 1795 | } |
1784 | 1796 | ||
1785 | /* New heuristics: it is possible only after we switched | 1797 | /* New heuristics: it is possible only after we switched |
@@ -1787,7 +1799,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp) | |||
1787 | * Hence, we can detect timed out packets during fast | 1799 | * Hence, we can detect timed out packets during fast |
1788 | * retransmit without falling to slow start. | 1800 | * retransmit without falling to slow start. |
1789 | */ | 1801 | */ |
1790 | if (!IsReno(tp) && tcp_head_timedout(sk, tp)) { | 1802 | if (!IsReno(tp) && tcp_head_timedout(sk)) { |
1791 | struct sk_buff *skb; | 1803 | struct sk_buff *skb; |
1792 | 1804 | ||
1793 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint | 1805 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint |
@@ -1867,9 +1879,11 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp) | |||
1867 | /* Undo procedures. */ | 1879 | /* Undo procedures. */ |
1868 | 1880 | ||
1869 | #if FASTRETRANS_DEBUG > 1 | 1881 | #if FASTRETRANS_DEBUG > 1 |
1870 | static void DBGUNDO(struct sock *sk, struct tcp_sock *tp, const char *msg) | 1882 | static void DBGUNDO(struct sock *sk, const char *msg) |
1871 | { | 1883 | { |
1884 | struct tcp_sock *tp = tcp_sk(sk); | ||
1872 | struct inet_sock *inet = inet_sk(sk); | 1885 | struct inet_sock *inet = inet_sk(sk); |
1886 | |||
1873 | printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", | 1887 | printk(KERN_DEBUG "Undo %s %u.%u.%u.%u/%u c%u l%u ss%u/%u p%u\n", |
1874 | msg, | 1888 | msg, |
1875 | NIPQUAD(inet->daddr), ntohs(inet->dport), | 1889 | NIPQUAD(inet->daddr), ntohs(inet->dport), |
@@ -1915,13 +1929,15 @@ static inline int tcp_may_undo(struct tcp_sock *tp) | |||
1915 | } | 1929 | } |
1916 | 1930 | ||
1917 | /* People celebrate: "We love our President!" */ | 1931 | /* People celebrate: "We love our President!" */ |
1918 | static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) | 1932 | static int tcp_try_undo_recovery(struct sock *sk) |
1919 | { | 1933 | { |
1934 | struct tcp_sock *tp = tcp_sk(sk); | ||
1935 | |||
1920 | if (tcp_may_undo(tp)) { | 1936 | if (tcp_may_undo(tp)) { |
1921 | /* Happy end! We did not retransmit anything | 1937 | /* Happy end! We did not retransmit anything |
1922 | * or our original transmission succeeded. | 1938 | * or our original transmission succeeded. |
1923 | */ | 1939 | */ |
1924 | DBGUNDO(sk, tp, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 1940 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
1925 | tcp_undo_cwr(sk, 1); | 1941 | tcp_undo_cwr(sk, 1); |
1926 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 1942 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
1927 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 1943 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); |
@@ -1941,10 +1957,12 @@ static int tcp_try_undo_recovery(struct sock *sk, struct tcp_sock *tp) | |||
1941 | } | 1957 | } |
1942 | 1958 | ||
1943 | /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ | 1959 | /* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */ |
1944 | static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) | 1960 | static void tcp_try_undo_dsack(struct sock *sk) |
1945 | { | 1961 | { |
1962 | struct tcp_sock *tp = tcp_sk(sk); | ||
1963 | |||
1946 | if (tp->undo_marker && !tp->undo_retrans) { | 1964 | if (tp->undo_marker && !tp->undo_retrans) { |
1947 | DBGUNDO(sk, tp, "D-SACK"); | 1965 | DBGUNDO(sk, "D-SACK"); |
1948 | tcp_undo_cwr(sk, 1); | 1966 | tcp_undo_cwr(sk, 1); |
1949 | tp->undo_marker = 0; | 1967 | tp->undo_marker = 0; |
1950 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); | 1968 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKUNDO); |
@@ -1953,9 +1971,9 @@ static void tcp_try_undo_dsack(struct sock *sk, struct tcp_sock *tp) | |||
1953 | 1971 | ||
1954 | /* Undo during fast recovery after partial ACK. */ | 1972 | /* Undo during fast recovery after partial ACK. */ |
1955 | 1973 | ||
1956 | static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | 1974 | static int tcp_try_undo_partial(struct sock *sk, int acked) |
1957 | int acked) | ||
1958 | { | 1975 | { |
1976 | struct tcp_sock *tp = tcp_sk(sk); | ||
1959 | /* Partial ACK arrived. Force Hoe's retransmit. */ | 1977 | /* Partial ACK arrived. Force Hoe's retransmit. */ |
1960 | int failed = IsReno(tp) || tp->fackets_out>tp->reordering; | 1978 | int failed = IsReno(tp) || tp->fackets_out>tp->reordering; |
1961 | 1979 | ||
@@ -1968,7 +1986,7 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | |||
1968 | 1986 | ||
1969 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 1987 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
1970 | 1988 | ||
1971 | DBGUNDO(sk, tp, "Hoe"); | 1989 | DBGUNDO(sk, "Hoe"); |
1972 | tcp_undo_cwr(sk, 0); | 1990 | tcp_undo_cwr(sk, 0); |
1973 | NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); | 1991 | NET_INC_STATS_BH(LINUX_MIB_TCPPARTIALUNDO); |
1974 | 1992 | ||
@@ -1982,8 +2000,10 @@ static int tcp_try_undo_partial(struct sock *sk, struct tcp_sock *tp, | |||
1982 | } | 2000 | } |
1983 | 2001 | ||
1984 | /* Undo during loss recovery after partial ACK. */ | 2002 | /* Undo during loss recovery after partial ACK. */ |
1985 | static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | 2003 | static int tcp_try_undo_loss(struct sock *sk) |
1986 | { | 2004 | { |
2005 | struct tcp_sock *tp = tcp_sk(sk); | ||
2006 | |||
1987 | if (tcp_may_undo(tp)) { | 2007 | if (tcp_may_undo(tp)) { |
1988 | struct sk_buff *skb; | 2008 | struct sk_buff *skb; |
1989 | tcp_for_write_queue(skb, sk) { | 2009 | tcp_for_write_queue(skb, sk) { |
@@ -1994,7 +2014,7 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp) | |||
1994 | 2014 | ||
1995 | clear_all_retrans_hints(tp); | 2015 | clear_all_retrans_hints(tp); |
1996 | 2016 | ||
1997 | DBGUNDO(sk, tp, "partial loss"); | 2017 | DBGUNDO(sk, "partial loss"); |
1998 | tp->lost_out = 0; | 2018 | tp->lost_out = 0; |
1999 | tp->left_out = tp->sacked_out; | 2019 | tp->left_out = tp->sacked_out; |
2000 | tcp_undo_cwr(sk, 1); | 2020 | tcp_undo_cwr(sk, 1); |
@@ -2016,8 +2036,10 @@ static inline void tcp_complete_cwr(struct sock *sk) | |||
2016 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); | 2036 | tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); |
2017 | } | 2037 | } |
2018 | 2038 | ||
2019 | static void tcp_try_to_open(struct sock *sk, struct tcp_sock *tp, int flag) | 2039 | static void tcp_try_to_open(struct sock *sk, int flag) |
2020 | { | 2040 | { |
2041 | struct tcp_sock *tp = tcp_sk(sk); | ||
2042 | |||
2021 | tp->left_out = tp->sacked_out; | 2043 | tp->left_out = tp->sacked_out; |
2022 | 2044 | ||
2023 | if (tp->retrans_out == 0) | 2045 | if (tp->retrans_out == 0) |
@@ -2111,7 +2133,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2111 | before(tp->snd_una, tp->high_seq) && | 2133 | before(tp->snd_una, tp->high_seq) && |
2112 | icsk->icsk_ca_state != TCP_CA_Open && | 2134 | icsk->icsk_ca_state != TCP_CA_Open && |
2113 | tp->fackets_out > tp->reordering) { | 2135 | tp->fackets_out > tp->reordering) { |
2114 | tcp_mark_head_lost(sk, tp, tp->fackets_out-tp->reordering, tp->high_seq); | 2136 | tcp_mark_head_lost(sk, tp->fackets_out-tp->reordering, tp->high_seq); |
2115 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); | 2137 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSS); |
2116 | } | 2138 | } |
2117 | 2139 | ||
@@ -2127,7 +2149,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2127 | switch (icsk->icsk_ca_state) { | 2149 | switch (icsk->icsk_ca_state) { |
2128 | case TCP_CA_Loss: | 2150 | case TCP_CA_Loss: |
2129 | icsk->icsk_retransmits = 0; | 2151 | icsk->icsk_retransmits = 0; |
2130 | if (tcp_try_undo_recovery(sk, tp)) | 2152 | if (tcp_try_undo_recovery(sk)) |
2131 | return; | 2153 | return; |
2132 | break; | 2154 | break; |
2133 | 2155 | ||
@@ -2141,7 +2163,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2141 | break; | 2163 | break; |
2142 | 2164 | ||
2143 | case TCP_CA_Disorder: | 2165 | case TCP_CA_Disorder: |
2144 | tcp_try_undo_dsack(sk, tp); | 2166 | tcp_try_undo_dsack(sk); |
2145 | if (!tp->undo_marker || | 2167 | if (!tp->undo_marker || |
2146 | /* For SACK case do not Open to allow to undo | 2168 | /* For SACK case do not Open to allow to undo |
2147 | * catching for all duplicate ACKs. */ | 2169 | * catching for all duplicate ACKs. */ |
@@ -2154,7 +2176,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2154 | case TCP_CA_Recovery: | 2176 | case TCP_CA_Recovery: |
2155 | if (IsReno(tp)) | 2177 | if (IsReno(tp)) |
2156 | tcp_reset_reno_sack(tp); | 2178 | tcp_reset_reno_sack(tp); |
2157 | if (tcp_try_undo_recovery(sk, tp)) | 2179 | if (tcp_try_undo_recovery(sk)) |
2158 | return; | 2180 | return; |
2159 | tcp_complete_cwr(sk); | 2181 | tcp_complete_cwr(sk); |
2160 | break; | 2182 | break; |
@@ -2170,14 +2192,14 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2170 | } else { | 2192 | } else { |
2171 | int acked = prior_packets - tp->packets_out; | 2193 | int acked = prior_packets - tp->packets_out; |
2172 | if (IsReno(tp)) | 2194 | if (IsReno(tp)) |
2173 | tcp_remove_reno_sacks(sk, tp, acked); | 2195 | tcp_remove_reno_sacks(sk, acked); |
2174 | is_dupack = tcp_try_undo_partial(sk, tp, acked); | 2196 | is_dupack = tcp_try_undo_partial(sk, acked); |
2175 | } | 2197 | } |
2176 | break; | 2198 | break; |
2177 | case TCP_CA_Loss: | 2199 | case TCP_CA_Loss: |
2178 | if (flag&FLAG_DATA_ACKED) | 2200 | if (flag&FLAG_DATA_ACKED) |
2179 | icsk->icsk_retransmits = 0; | 2201 | icsk->icsk_retransmits = 0; |
2180 | if (!tcp_try_undo_loss(sk, tp)) { | 2202 | if (!tcp_try_undo_loss(sk)) { |
2181 | tcp_moderate_cwnd(tp); | 2203 | tcp_moderate_cwnd(tp); |
2182 | tcp_xmit_retransmit_queue(sk); | 2204 | tcp_xmit_retransmit_queue(sk); |
2183 | return; | 2205 | return; |
@@ -2194,10 +2216,10 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2194 | } | 2216 | } |
2195 | 2217 | ||
2196 | if (icsk->icsk_ca_state == TCP_CA_Disorder) | 2218 | if (icsk->icsk_ca_state == TCP_CA_Disorder) |
2197 | tcp_try_undo_dsack(sk, tp); | 2219 | tcp_try_undo_dsack(sk); |
2198 | 2220 | ||
2199 | if (!tcp_time_to_recover(sk, tp)) { | 2221 | if (!tcp_time_to_recover(sk)) { |
2200 | tcp_try_to_open(sk, tp, flag); | 2222 | tcp_try_to_open(sk, flag); |
2201 | return; | 2223 | return; |
2202 | } | 2224 | } |
2203 | 2225 | ||
@@ -2236,8 +2258,8 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, | |||
2236 | tcp_set_ca_state(sk, TCP_CA_Recovery); | 2258 | tcp_set_ca_state(sk, TCP_CA_Recovery); |
2237 | } | 2259 | } |
2238 | 2260 | ||
2239 | if (is_dupack || tcp_head_timedout(sk, tp)) | 2261 | if (is_dupack || tcp_head_timedout(sk)) |
2240 | tcp_update_scoreboard(sk, tp); | 2262 | tcp_update_scoreboard(sk); |
2241 | tcp_cwnd_down(sk); | 2263 | tcp_cwnd_down(sk); |
2242 | tcp_xmit_retransmit_queue(sk); | 2264 | tcp_xmit_retransmit_queue(sk); |
2243 | } | 2265 | } |
@@ -2313,8 +2335,10 @@ static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt, | |||
2313 | * RFC2988 recommends to restart timer to now+rto. | 2335 | * RFC2988 recommends to restart timer to now+rto. |
2314 | */ | 2336 | */ |
2315 | 2337 | ||
2316 | static void tcp_ack_packets_out(struct sock *sk, struct tcp_sock *tp) | 2338 | static void tcp_ack_packets_out(struct sock *sk) |
2317 | { | 2339 | { |
2340 | struct tcp_sock *tp = tcp_sk(sk); | ||
2341 | |||
2318 | if (!tp->packets_out) { | 2342 | if (!tp->packets_out) { |
2319 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); | 2343 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); |
2320 | } else { | 2344 | } else { |
@@ -2471,7 +2495,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2471 | 2495 | ||
2472 | if (acked&FLAG_ACKED) { | 2496 | if (acked&FLAG_ACKED) { |
2473 | tcp_ack_update_rtt(sk, acked, seq_rtt); | 2497 | tcp_ack_update_rtt(sk, acked, seq_rtt); |
2474 | tcp_ack_packets_out(sk, tp); | 2498 | tcp_ack_packets_out(sk); |
2475 | if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) | 2499 | if (rtt_sample && !(acked & FLAG_RETRANS_DATA_ACKED)) |
2476 | (*rtt_sample)(sk, tcp_usrtt(&tv)); | 2500 | (*rtt_sample)(sk, tcp_usrtt(&tv)); |
2477 | 2501 | ||
@@ -2556,9 +2580,10 @@ static inline int tcp_may_update_window(const struct tcp_sock *tp, const u32 ack | |||
2556 | * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 | 2580 | * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 |
2557 | * and in FreeBSD. NetBSD's one is even worse.) is wrong. | 2581 | * and in FreeBSD. NetBSD's one is even worse.) is wrong. |
2558 | */ | 2582 | */ |
2559 | static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | 2583 | static int tcp_ack_update_window(struct sock *sk, struct sk_buff *skb, u32 ack, |
2560 | struct sk_buff *skb, u32 ack, u32 ack_seq) | 2584 | u32 ack_seq) |
2561 | { | 2585 | { |
2586 | struct tcp_sock *tp = tcp_sk(sk); | ||
2562 | int flag = 0; | 2587 | int flag = 0; |
2563 | u32 nwin = ntohs(tcp_hdr(skb)->window); | 2588 | u32 nwin = ntohs(tcp_hdr(skb)->window); |
2564 | 2589 | ||
@@ -2576,7 +2601,7 @@ static int tcp_ack_update_window(struct sock *sk, struct tcp_sock *tp, | |||
2576 | * fast path is recovered for sending TCP. | 2601 | * fast path is recovered for sending TCP. |
2577 | */ | 2602 | */ |
2578 | tp->pred_flags = 0; | 2603 | tp->pred_flags = 0; |
2579 | tcp_fast_path_check(sk, tp); | 2604 | tcp_fast_path_check(sk); |
2580 | 2605 | ||
2581 | if (nwin > tp->max_window) { | 2606 | if (nwin > tp->max_window) { |
2582 | tp->max_window = nwin; | 2607 | tp->max_window = nwin; |
@@ -2762,7 +2787,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) | |||
2762 | else | 2787 | else |
2763 | NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); | 2788 | NET_INC_STATS_BH(LINUX_MIB_TCPPUREACKS); |
2764 | 2789 | ||
2765 | flag |= tcp_ack_update_window(sk, tp, skb, ack, ack_seq); | 2790 | flag |= tcp_ack_update_window(sk, skb, ack, ack_seq); |
2766 | 2791 | ||
2767 | if (TCP_SKB_CB(skb)->sacked) | 2792 | if (TCP_SKB_CB(skb)->sacked) |
2768 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); | 2793 | flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una); |
@@ -3426,7 +3451,7 @@ queue_and_out: | |||
3426 | } | 3451 | } |
3427 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 3452 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
3428 | if (skb->len) | 3453 | if (skb->len) |
3429 | tcp_event_data_recv(sk, tp, skb); | 3454 | tcp_event_data_recv(sk, skb); |
3430 | if (th->fin) | 3455 | if (th->fin) |
3431 | tcp_fin(skb, sk, th); | 3456 | tcp_fin(skb, sk, th); |
3432 | 3457 | ||
@@ -3443,7 +3468,7 @@ queue_and_out: | |||
3443 | if (tp->rx_opt.num_sacks) | 3468 | if (tp->rx_opt.num_sacks) |
3444 | tcp_sack_remove(tp); | 3469 | tcp_sack_remove(tp); |
3445 | 3470 | ||
3446 | tcp_fast_path_check(sk, tp); | 3471 | tcp_fast_path_check(sk); |
3447 | 3472 | ||
3448 | if (eaten > 0) | 3473 | if (eaten > 0) |
3449 | __kfree_skb(skb); | 3474 | __kfree_skb(skb); |
@@ -3734,7 +3759,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
3734 | NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); | 3759 | NET_INC_STATS_BH(LINUX_MIB_PRUNECALLED); |
3735 | 3760 | ||
3736 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) | 3761 | if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) |
3737 | tcp_clamp_window(sk, tp); | 3762 | tcp_clamp_window(sk); |
3738 | else if (tcp_memory_pressure) | 3763 | else if (tcp_memory_pressure) |
3739 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); | 3764 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss); |
3740 | 3765 | ||
@@ -3803,8 +3828,10 @@ void tcp_cwnd_application_limited(struct sock *sk) | |||
3803 | tp->snd_cwnd_stamp = tcp_time_stamp; | 3828 | tp->snd_cwnd_stamp = tcp_time_stamp; |
3804 | } | 3829 | } |
3805 | 3830 | ||
3806 | static int tcp_should_expand_sndbuf(struct sock *sk, struct tcp_sock *tp) | 3831 | static int tcp_should_expand_sndbuf(struct sock *sk) |
3807 | { | 3832 | { |
3833 | struct tcp_sock *tp = tcp_sk(sk); | ||
3834 | |||
3808 | /* If the user specified a specific send buffer setting, do | 3835 | /* If the user specified a specific send buffer setting, do |
3809 | * not modify it. | 3836 | * not modify it. |
3810 | */ | 3837 | */ |
@@ -3836,7 +3863,7 @@ static void tcp_new_space(struct sock *sk) | |||
3836 | { | 3863 | { |
3837 | struct tcp_sock *tp = tcp_sk(sk); | 3864 | struct tcp_sock *tp = tcp_sk(sk); |
3838 | 3865 | ||
3839 | if (tcp_should_expand_sndbuf(sk, tp)) { | 3866 | if (tcp_should_expand_sndbuf(sk)) { |
3840 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + | 3867 | int sndmem = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + |
3841 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), | 3868 | MAX_TCP_HEADER + 16 + sizeof(struct sk_buff), |
3842 | demanded = max_t(unsigned int, tp->snd_cwnd, | 3869 | demanded = max_t(unsigned int, tp->snd_cwnd, |
@@ -3860,9 +3887,9 @@ static void tcp_check_space(struct sock *sk) | |||
3860 | } | 3887 | } |
3861 | } | 3888 | } |
3862 | 3889 | ||
3863 | static inline void tcp_data_snd_check(struct sock *sk, struct tcp_sock *tp) | 3890 | static inline void tcp_data_snd_check(struct sock *sk) |
3864 | { | 3891 | { |
3865 | tcp_push_pending_frames(sk, tp); | 3892 | tcp_push_pending_frames(sk); |
3866 | tcp_check_space(sk); | 3893 | tcp_check_space(sk); |
3867 | } | 3894 | } |
3868 | 3895 | ||
@@ -4196,7 +4223,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4196 | */ | 4223 | */ |
4197 | tcp_ack(sk, skb, 0); | 4224 | tcp_ack(sk, skb, 0); |
4198 | __kfree_skb(skb); | 4225 | __kfree_skb(skb); |
4199 | tcp_data_snd_check(sk, tp); | 4226 | tcp_data_snd_check(sk); |
4200 | return 0; | 4227 | return 0; |
4201 | } else { /* Header too small */ | 4228 | } else { /* Header too small */ |
4202 | TCP_INC_STATS_BH(TCP_MIB_INERRS); | 4229 | TCP_INC_STATS_BH(TCP_MIB_INERRS); |
@@ -4267,12 +4294,12 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, | |||
4267 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; | 4294 | tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq; |
4268 | } | 4295 | } |
4269 | 4296 | ||
4270 | tcp_event_data_recv(sk, tp, skb); | 4297 | tcp_event_data_recv(sk, skb); |
4271 | 4298 | ||
4272 | if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { | 4299 | if (TCP_SKB_CB(skb)->ack_seq != tp->snd_una) { |
4273 | /* Well, only one small jumplet in fast path... */ | 4300 | /* Well, only one small jumplet in fast path... */ |
4274 | tcp_ack(sk, skb, FLAG_DATA); | 4301 | tcp_ack(sk, skb, FLAG_DATA); |
4275 | tcp_data_snd_check(sk, tp); | 4302 | tcp_data_snd_check(sk); |
4276 | if (!inet_csk_ack_scheduled(sk)) | 4303 | if (!inet_csk_ack_scheduled(sk)) |
4277 | goto no_ack; | 4304 | goto no_ack; |
4278 | } | 4305 | } |
@@ -4355,7 +4382,7 @@ step5: | |||
4355 | /* step 7: process the segment text */ | 4382 | /* step 7: process the segment text */ |
4356 | tcp_data_queue(sk, skb); | 4383 | tcp_data_queue(sk, skb); |
4357 | 4384 | ||
4358 | tcp_data_snd_check(sk, tp); | 4385 | tcp_data_snd_check(sk); |
4359 | tcp_ack_snd_check(sk); | 4386 | tcp_ack_snd_check(sk); |
4360 | return 0; | 4387 | return 0; |
4361 | 4388 | ||
@@ -4672,7 +4699,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4672 | /* Do step6 onward by hand. */ | 4699 | /* Do step6 onward by hand. */ |
4673 | tcp_urg(sk, skb, th); | 4700 | tcp_urg(sk, skb, th); |
4674 | __kfree_skb(skb); | 4701 | __kfree_skb(skb); |
4675 | tcp_data_snd_check(sk, tp); | 4702 | tcp_data_snd_check(sk); |
4676 | return 0; | 4703 | return 0; |
4677 | } | 4704 | } |
4678 | 4705 | ||
@@ -4864,7 +4891,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, | |||
4864 | 4891 | ||
4865 | /* tcp_data could move socket to TIME-WAIT */ | 4892 | /* tcp_data could move socket to TIME-WAIT */ |
4866 | if (sk->sk_state != TCP_CLOSE) { | 4893 | if (sk->sk_state != TCP_CLOSE) { |
4867 | tcp_data_snd_check(sk, tp); | 4894 | tcp_data_snd_check(sk); |
4868 | tcp_ack_snd_check(sk); | 4895 | tcp_ack_snd_check(sk); |
4869 | } | 4896 | } |
4870 | 4897 | ||