diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
---|---|---|
committer | Arjan van de Ven <arjan@linux.intel.com> | 2008-10-17 12:20:26 -0400 |
commit | 651dab4264e4ba0e563f5ff56f748127246e9065 (patch) | |
tree | 016630974bdcb00fe529b673f96d389e0fd6dc94 /net/ipv4/tcp_output.c | |
parent | 40b8606253552109815786e5d4b0de98782d31f5 (diff) | |
parent | 2e532d68a2b3e2aa6b19731501222069735c741c (diff) |
Merge commit 'linus/master' into merge-linus
Conflicts:
arch/x86/kvm/i8254.c
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r-- | net/ipv4/tcp_output.c | 222 |
1 files changed, 113 insertions, 109 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 8165f5aa8c71..990a58493235 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -345,6 +345,11 @@ static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) | |||
345 | TCP_SKB_CB(skb)->end_seq = seq; | 345 | TCP_SKB_CB(skb)->end_seq = seq; |
346 | } | 346 | } |
347 | 347 | ||
348 | static inline int tcp_urg_mode(const struct tcp_sock *tp) | ||
349 | { | ||
350 | return tp->snd_una != tp->snd_up; | ||
351 | } | ||
352 | |||
348 | #define OPTION_SACK_ADVERTISE (1 << 0) | 353 | #define OPTION_SACK_ADVERTISE (1 << 0) |
349 | #define OPTION_TS (1 << 1) | 354 | #define OPTION_TS (1 << 1) |
350 | #define OPTION_MD5 (1 << 2) | 355 | #define OPTION_MD5 (1 << 2) |
@@ -646,7 +651,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, | |||
646 | th->check = 0; | 651 | th->check = 0; |
647 | th->urg_ptr = 0; | 652 | th->urg_ptr = 0; |
648 | 653 | ||
649 | if (unlikely(tp->urg_mode && | 654 | /* The urg_mode check is necessary during a below snd_una win probe */ |
655 | if (unlikely(tcp_urg_mode(tp) && | ||
650 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { | 656 | between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { |
651 | th->urg_ptr = htons(tp->snd_up - tcb->seq); | 657 | th->urg_ptr = htons(tp->snd_up - tcb->seq); |
652 | th->urg = 1; | 658 | th->urg = 1; |
@@ -1012,7 +1018,7 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) | |||
1012 | /* Compute the current effective MSS, taking SACKs and IP options, | 1018 | /* Compute the current effective MSS, taking SACKs and IP options, |
1013 | * and even PMTU discovery events into account. | 1019 | * and even PMTU discovery events into account. |
1014 | * | 1020 | * |
1015 | * LARGESEND note: !urg_mode is overkill, only frames up to snd_up | 1021 | * LARGESEND note: !tcp_urg_mode is overkill, only frames up to snd_up |
1016 | * cannot be large. However, taking into account rare use of URG, this | 1022 | * cannot be large. However, taking into account rare use of URG, this |
1017 | * is not a big flaw. | 1023 | * is not a big flaw. |
1018 | */ | 1024 | */ |
@@ -1029,7 +1035,7 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) | |||
1029 | 1035 | ||
1030 | mss_now = tp->mss_cache; | 1036 | mss_now = tp->mss_cache; |
1031 | 1037 | ||
1032 | if (large_allowed && sk_can_gso(sk) && !tp->urg_mode) | 1038 | if (large_allowed && sk_can_gso(sk) && !tcp_urg_mode(tp)) |
1033 | doing_tso = 1; | 1039 | doing_tso = 1; |
1034 | 1040 | ||
1035 | if (dst) { | 1041 | if (dst) { |
@@ -1193,7 +1199,7 @@ static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, | |||
1193 | /* Don't use the nagle rule for urgent data (or for the final FIN). | 1199 | /* Don't use the nagle rule for urgent data (or for the final FIN). |
1194 | * Nagle can be ignored during F-RTO too (see RFC4138). | 1200 | * Nagle can be ignored during F-RTO too (see RFC4138). |
1195 | */ | 1201 | */ |
1196 | if (tp->urg_mode || (tp->frto_counter == 2) || | 1202 | if (tcp_urg_mode(tp) || (tp->frto_counter == 2) || |
1197 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) | 1203 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) |
1198 | return 1; | 1204 | return 1; |
1199 | 1205 | ||
@@ -1824,6 +1830,8 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, | |||
1824 | 1830 | ||
1825 | /* changed transmit queue under us so clear hints */ | 1831 | /* changed transmit queue under us so clear hints */ |
1826 | tcp_clear_retrans_hints_partial(tp); | 1832 | tcp_clear_retrans_hints_partial(tp); |
1833 | if (next_skb == tp->retransmit_skb_hint) | ||
1834 | tp->retransmit_skb_hint = skb; | ||
1827 | 1835 | ||
1828 | sk_wmem_free_skb(sk, next_skb); | 1836 | sk_wmem_free_skb(sk, next_skb); |
1829 | } | 1837 | } |
@@ -1838,7 +1846,7 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1838 | struct tcp_sock *tp = tcp_sk(sk); | 1846 | struct tcp_sock *tp = tcp_sk(sk); |
1839 | struct sk_buff *skb; | 1847 | struct sk_buff *skb; |
1840 | unsigned int mss = tcp_current_mss(sk, 0); | 1848 | unsigned int mss = tcp_current_mss(sk, 0); |
1841 | int lost = 0; | 1849 | u32 prior_lost = tp->lost_out; |
1842 | 1850 | ||
1843 | tcp_for_write_queue(skb, sk) { | 1851 | tcp_for_write_queue(skb, sk) { |
1844 | if (skb == tcp_send_head(sk)) | 1852 | if (skb == tcp_send_head(sk)) |
@@ -1849,17 +1857,13 @@ void tcp_simple_retransmit(struct sock *sk) | |||
1849 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; | 1857 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
1850 | tp->retrans_out -= tcp_skb_pcount(skb); | 1858 | tp->retrans_out -= tcp_skb_pcount(skb); |
1851 | } | 1859 | } |
1852 | if (!(TCP_SKB_CB(skb)->sacked & TCPCB_LOST)) { | 1860 | tcp_skb_mark_lost_uncond_verify(tp, skb); |
1853 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; | ||
1854 | tp->lost_out += tcp_skb_pcount(skb); | ||
1855 | lost = 1; | ||
1856 | } | ||
1857 | } | 1861 | } |
1858 | } | 1862 | } |
1859 | 1863 | ||
1860 | tcp_clear_all_retrans_hints(tp); | 1864 | tcp_clear_retrans_hints_partial(tp); |
1861 | 1865 | ||
1862 | if (!lost) | 1866 | if (prior_lost == tp->lost_out) |
1863 | return; | 1867 | return; |
1864 | 1868 | ||
1865 | if (tcp_is_reno(tp)) | 1869 | if (tcp_is_reno(tp)) |
@@ -1934,8 +1938,8 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1934 | /* Collapse two adjacent packets if worthwhile and we can. */ | 1938 | /* Collapse two adjacent packets if worthwhile and we can. */ |
1935 | if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && | 1939 | if (!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && |
1936 | (skb->len < (cur_mss >> 1)) && | 1940 | (skb->len < (cur_mss >> 1)) && |
1937 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && | ||
1938 | (!tcp_skb_is_last(sk, skb)) && | 1941 | (!tcp_skb_is_last(sk, skb)) && |
1942 | (tcp_write_queue_next(sk, skb) != tcp_send_head(sk)) && | ||
1939 | (skb_shinfo(skb)->nr_frags == 0 && | 1943 | (skb_shinfo(skb)->nr_frags == 0 && |
1940 | skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && | 1944 | skb_shinfo(tcp_write_queue_next(sk, skb))->nr_frags == 0) && |
1941 | (tcp_skb_pcount(skb) == 1 && | 1945 | (tcp_skb_pcount(skb) == 1 && |
@@ -1996,86 +2000,18 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) | |||
1996 | return err; | 2000 | return err; |
1997 | } | 2001 | } |
1998 | 2002 | ||
1999 | /* This gets called after a retransmit timeout, and the initially | 2003 | static int tcp_can_forward_retransmit(struct sock *sk) |
2000 | * retransmitted data is acknowledged. It tries to continue | ||
2001 | * resending the rest of the retransmit queue, until either | ||
2002 | * we've sent it all or the congestion window limit is reached. | ||
2003 | * If doing SACK, the first ACK which comes back for a timeout | ||
2004 | * based retransmit packet might feed us FACK information again. | ||
2005 | * If so, we use it to avoid unnecessarily retransmissions. | ||
2006 | */ | ||
2007 | void tcp_xmit_retransmit_queue(struct sock *sk) | ||
2008 | { | 2004 | { |
2009 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2005 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2010 | struct tcp_sock *tp = tcp_sk(sk); | 2006 | struct tcp_sock *tp = tcp_sk(sk); |
2011 | struct sk_buff *skb; | ||
2012 | int packet_cnt; | ||
2013 | |||
2014 | if (tp->retransmit_skb_hint) { | ||
2015 | skb = tp->retransmit_skb_hint; | ||
2016 | packet_cnt = tp->retransmit_cnt_hint; | ||
2017 | } else { | ||
2018 | skb = tcp_write_queue_head(sk); | ||
2019 | packet_cnt = 0; | ||
2020 | } | ||
2021 | |||
2022 | /* First pass: retransmit lost packets. */ | ||
2023 | if (tp->lost_out) { | ||
2024 | tcp_for_write_queue_from(skb, sk) { | ||
2025 | __u8 sacked = TCP_SKB_CB(skb)->sacked; | ||
2026 | |||
2027 | if (skb == tcp_send_head(sk)) | ||
2028 | break; | ||
2029 | /* we could do better than to assign each time */ | ||
2030 | tp->retransmit_skb_hint = skb; | ||
2031 | tp->retransmit_cnt_hint = packet_cnt; | ||
2032 | |||
2033 | /* Assume this retransmit will generate | ||
2034 | * only one packet for congestion window | ||
2035 | * calculation purposes. This works because | ||
2036 | * tcp_retransmit_skb() will chop up the | ||
2037 | * packet to be MSS sized and all the | ||
2038 | * packet counting works out. | ||
2039 | */ | ||
2040 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) | ||
2041 | return; | ||
2042 | |||
2043 | if (sacked & TCPCB_LOST) { | ||
2044 | if (!(sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { | ||
2045 | int mib_idx; | ||
2046 | |||
2047 | if (tcp_retransmit_skb(sk, skb)) { | ||
2048 | tp->retransmit_skb_hint = NULL; | ||
2049 | return; | ||
2050 | } | ||
2051 | if (icsk->icsk_ca_state != TCP_CA_Loss) | ||
2052 | mib_idx = LINUX_MIB_TCPFASTRETRANS; | ||
2053 | else | ||
2054 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; | ||
2055 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2056 | |||
2057 | if (skb == tcp_write_queue_head(sk)) | ||
2058 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | ||
2059 | inet_csk(sk)->icsk_rto, | ||
2060 | TCP_RTO_MAX); | ||
2061 | } | ||
2062 | |||
2063 | packet_cnt += tcp_skb_pcount(skb); | ||
2064 | if (packet_cnt >= tp->lost_out) | ||
2065 | break; | ||
2066 | } | ||
2067 | } | ||
2068 | } | ||
2069 | |||
2070 | /* OK, demanded retransmission is finished. */ | ||
2071 | 2007 | ||
2072 | /* Forward retransmissions are possible only during Recovery. */ | 2008 | /* Forward retransmissions are possible only during Recovery. */ |
2073 | if (icsk->icsk_ca_state != TCP_CA_Recovery) | 2009 | if (icsk->icsk_ca_state != TCP_CA_Recovery) |
2074 | return; | 2010 | return 0; |
2075 | 2011 | ||
2076 | /* No forward retransmissions in Reno are possible. */ | 2012 | /* No forward retransmissions in Reno are possible. */ |
2077 | if (tcp_is_reno(tp)) | 2013 | if (tcp_is_reno(tp)) |
2078 | return; | 2014 | return 0; |
2079 | 2015 | ||
2080 | /* Yeah, we have to make difficult choice between forward transmission | 2016 | /* Yeah, we have to make difficult choice between forward transmission |
2081 | * and retransmission... Both ways have their merits... | 2017 | * and retransmission... Both ways have their merits... |
@@ -2086,43 +2022,104 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
2086 | */ | 2022 | */ |
2087 | 2023 | ||
2088 | if (tcp_may_send_now(sk)) | 2024 | if (tcp_may_send_now(sk)) |
2089 | return; | 2025 | return 0; |
2090 | 2026 | ||
2091 | /* If nothing is SACKed, highest_sack in the loop won't be valid */ | 2027 | return 1; |
2092 | if (!tp->sacked_out) | 2028 | } |
2093 | return; | ||
2094 | 2029 | ||
2095 | if (tp->forward_skb_hint) | 2030 | /* This gets called after a retransmit timeout, and the initially |
2096 | skb = tp->forward_skb_hint; | 2031 | * retransmitted data is acknowledged. It tries to continue |
2097 | else | 2032 | * resending the rest of the retransmit queue, until either |
2033 | * we've sent it all or the congestion window limit is reached. | ||
2034 | * If doing SACK, the first ACK which comes back for a timeout | ||
2035 | * based retransmit packet might feed us FACK information again. | ||
2036 | * If so, we use it to avoid unnecessarily retransmissions. | ||
2037 | */ | ||
2038 | void tcp_xmit_retransmit_queue(struct sock *sk) | ||
2039 | { | ||
2040 | const struct inet_connection_sock *icsk = inet_csk(sk); | ||
2041 | struct tcp_sock *tp = tcp_sk(sk); | ||
2042 | struct sk_buff *skb; | ||
2043 | struct sk_buff *hole = NULL; | ||
2044 | u32 last_lost; | ||
2045 | int mib_idx; | ||
2046 | int fwd_rexmitting = 0; | ||
2047 | |||
2048 | if (!tp->lost_out) | ||
2049 | tp->retransmit_high = tp->snd_una; | ||
2050 | |||
2051 | if (tp->retransmit_skb_hint) { | ||
2052 | skb = tp->retransmit_skb_hint; | ||
2053 | last_lost = TCP_SKB_CB(skb)->end_seq; | ||
2054 | if (after(last_lost, tp->retransmit_high)) | ||
2055 | last_lost = tp->retransmit_high; | ||
2056 | } else { | ||
2098 | skb = tcp_write_queue_head(sk); | 2057 | skb = tcp_write_queue_head(sk); |
2058 | last_lost = tp->snd_una; | ||
2059 | } | ||
2099 | 2060 | ||
2061 | /* First pass: retransmit lost packets. */ | ||
2100 | tcp_for_write_queue_from(skb, sk) { | 2062 | tcp_for_write_queue_from(skb, sk) { |
2101 | if (skb == tcp_send_head(sk)) | 2063 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
2102 | break; | ||
2103 | tp->forward_skb_hint = skb; | ||
2104 | 2064 | ||
2105 | if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) | 2065 | if (skb == tcp_send_head(sk)) |
2106 | break; | 2066 | break; |
2067 | /* we could do better than to assign each time */ | ||
2068 | if (hole == NULL) | ||
2069 | tp->retransmit_skb_hint = skb; | ||
2107 | 2070 | ||
2071 | /* Assume this retransmit will generate | ||
2072 | * only one packet for congestion window | ||
2073 | * calculation purposes. This works because | ||
2074 | * tcp_retransmit_skb() will chop up the | ||
2075 | * packet to be MSS sized and all the | ||
2076 | * packet counting works out. | ||
2077 | */ | ||
2108 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) | 2078 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) |
2109 | break; | 2079 | return; |
2080 | |||
2081 | if (fwd_rexmitting) { | ||
2082 | begin_fwd: | ||
2083 | if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) | ||
2084 | break; | ||
2085 | mib_idx = LINUX_MIB_TCPFORWARDRETRANS; | ||
2086 | |||
2087 | } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { | ||
2088 | tp->retransmit_high = last_lost; | ||
2089 | if (!tcp_can_forward_retransmit(sk)) | ||
2090 | break; | ||
2091 | /* Backtrack if necessary to non-L'ed skb */ | ||
2092 | if (hole != NULL) { | ||
2093 | skb = hole; | ||
2094 | hole = NULL; | ||
2095 | } | ||
2096 | fwd_rexmitting = 1; | ||
2097 | goto begin_fwd; | ||
2110 | 2098 | ||
2111 | if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) | 2099 | } else if (!(sacked & TCPCB_LOST)) { |
2100 | if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS)) | ||
2101 | hole = skb; | ||
2112 | continue; | 2102 | continue; |
2113 | 2103 | ||
2114 | /* Ok, retransmit it. */ | 2104 | } else { |
2115 | if (tcp_retransmit_skb(sk, skb)) { | 2105 | last_lost = TCP_SKB_CB(skb)->end_seq; |
2116 | tp->forward_skb_hint = NULL; | 2106 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
2117 | break; | 2107 | mib_idx = LINUX_MIB_TCPFASTRETRANS; |
2108 | else | ||
2109 | mib_idx = LINUX_MIB_TCPSLOWSTARTRETRANS; | ||
2118 | } | 2110 | } |
2119 | 2111 | ||
2112 | if (sacked & (TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS)) | ||
2113 | continue; | ||
2114 | |||
2115 | if (tcp_retransmit_skb(sk, skb)) | ||
2116 | return; | ||
2117 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | ||
2118 | |||
2120 | if (skb == tcp_write_queue_head(sk)) | 2119 | if (skb == tcp_write_queue_head(sk)) |
2121 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, | 2120 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
2122 | inet_csk(sk)->icsk_rto, | 2121 | inet_csk(sk)->icsk_rto, |
2123 | TCP_RTO_MAX); | 2122 | TCP_RTO_MAX); |
2124 | |||
2125 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFORWARDRETRANS); | ||
2126 | } | 2123 | } |
2127 | } | 2124 | } |
2128 | 2125 | ||
@@ -2241,6 +2238,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2241 | struct sk_buff *skb; | 2238 | struct sk_buff *skb; |
2242 | struct tcp_md5sig_key *md5; | 2239 | struct tcp_md5sig_key *md5; |
2243 | __u8 *md5_hash_location; | 2240 | __u8 *md5_hash_location; |
2241 | int mss; | ||
2244 | 2242 | ||
2245 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); | 2243 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
2246 | if (skb == NULL) | 2244 | if (skb == NULL) |
@@ -2251,13 +2249,17 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2251 | 2249 | ||
2252 | skb->dst = dst_clone(dst); | 2250 | skb->dst = dst_clone(dst); |
2253 | 2251 | ||
2252 | mss = dst_metric(dst, RTAX_ADVMSS); | ||
2253 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) | ||
2254 | mss = tp->rx_opt.user_mss; | ||
2255 | |||
2254 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ | 2256 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
2255 | __u8 rcv_wscale; | 2257 | __u8 rcv_wscale; |
2256 | /* Set this up on the first call only */ | 2258 | /* Set this up on the first call only */ |
2257 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); | 2259 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
2258 | /* tcp_full_space because it is guaranteed to be the first packet */ | 2260 | /* tcp_full_space because it is guaranteed to be the first packet */ |
2259 | tcp_select_initial_window(tcp_full_space(sk), | 2261 | tcp_select_initial_window(tcp_full_space(sk), |
2260 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), | 2262 | mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
2261 | &req->rcv_wnd, | 2263 | &req->rcv_wnd, |
2262 | &req->window_clamp, | 2264 | &req->window_clamp, |
2263 | ireq->wscale_ok, | 2265 | ireq->wscale_ok, |
@@ -2267,8 +2269,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2267 | 2269 | ||
2268 | memset(&opts, 0, sizeof(opts)); | 2270 | memset(&opts, 0, sizeof(opts)); |
2269 | TCP_SKB_CB(skb)->when = tcp_time_stamp; | 2271 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
2270 | tcp_header_size = tcp_synack_options(sk, req, | 2272 | tcp_header_size = tcp_synack_options(sk, req, mss, |
2271 | dst_metric(dst, RTAX_ADVMSS), | ||
2272 | skb, &opts, &md5) + | 2273 | skb, &opts, &md5) + |
2273 | sizeof(struct tcphdr); | 2274 | sizeof(struct tcphdr); |
2274 | 2275 | ||
@@ -2280,7 +2281,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst, | |||
2280 | th->syn = 1; | 2281 | th->syn = 1; |
2281 | th->ack = 1; | 2282 | th->ack = 1; |
2282 | TCP_ECN_make_synack(req, th); | 2283 | TCP_ECN_make_synack(req, th); |
2283 | th->source = inet_sk(sk)->sport; | 2284 | th->source = ireq->loc_port; |
2284 | th->dest = ireq->rmt_port; | 2285 | th->dest = ireq->rmt_port; |
2285 | /* Setting of flags are superfluous here for callers (and ECE is | 2286 | /* Setting of flags are superfluous here for callers (and ECE is |
2286 | * not even correctly set) | 2287 | * not even correctly set) |
@@ -2342,6 +2343,9 @@ static void tcp_connect_init(struct sock *sk) | |||
2342 | if (!tp->window_clamp) | 2343 | if (!tp->window_clamp) |
2343 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); | 2344 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); |
2344 | tp->advmss = dst_metric(dst, RTAX_ADVMSS); | 2345 | tp->advmss = dst_metric(dst, RTAX_ADVMSS); |
2346 | if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < tp->advmss) | ||
2347 | tp->advmss = tp->rx_opt.user_mss; | ||
2348 | |||
2345 | tcp_initialize_rcv_mss(sk); | 2349 | tcp_initialize_rcv_mss(sk); |
2346 | 2350 | ||
2347 | tcp_select_initial_window(tcp_full_space(sk), | 2351 | tcp_select_initial_window(tcp_full_space(sk), |
@@ -2360,6 +2364,7 @@ static void tcp_connect_init(struct sock *sk) | |||
2360 | tcp_init_wl(tp, tp->write_seq, 0); | 2364 | tcp_init_wl(tp, tp->write_seq, 0); |
2361 | tp->snd_una = tp->write_seq; | 2365 | tp->snd_una = tp->write_seq; |
2362 | tp->snd_sml = tp->write_seq; | 2366 | tp->snd_sml = tp->write_seq; |
2367 | tp->snd_up = tp->write_seq; | ||
2363 | tp->rcv_nxt = 0; | 2368 | tp->rcv_nxt = 0; |
2364 | tp->rcv_wup = 0; | 2369 | tp->rcv_wup = 0; |
2365 | tp->copied_seq = 0; | 2370 | tp->copied_seq = 0; |
@@ -2569,8 +2574,7 @@ int tcp_write_wakeup(struct sock *sk) | |||
2569 | tcp_event_new_data_sent(sk, skb); | 2574 | tcp_event_new_data_sent(sk, skb); |
2570 | return err; | 2575 | return err; |
2571 | } else { | 2576 | } else { |
2572 | if (tp->urg_mode && | 2577 | if (between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) |
2573 | between(tp->snd_up, tp->snd_una + 1, tp->snd_una + 0xFFFF)) | ||
2574 | tcp_xmit_probe_skb(sk, 1); | 2578 | tcp_xmit_probe_skb(sk, 1); |
2575 | return tcp_xmit_probe_skb(sk, 0); | 2579 | return tcp_xmit_probe_skb(sk, 0); |
2576 | } | 2580 | } |