aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_output.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-07 05:15:40 -0400
commit5e34437840d33554f69380584311743b39e8fbeb (patch)
treee081135619ee146af5efb9ee883afca950df5757 /net/ipv4/tcp_output.c
parent77d05632baee21b1cef8730d7c06aa69601e4dca (diff)
parentd508afb437daee7cf07da085b635c44a4ebf9b38 (diff)
Merge branch 'linus' into core/softlockup
Conflicts: kernel/sysctl.c
Diffstat (limited to 'net/ipv4/tcp_output.c')
-rw-r--r--net/ipv4/tcp_output.c154
1 files changed, 72 insertions, 82 deletions
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index dda42f0bd7a3..53300fa2359f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -441,10 +441,7 @@ static void tcp_options_write(__be32 *ptr, struct tcp_sock *tp,
441 *ptr++ = htonl(sp[this_sack].end_seq); 441 *ptr++ = htonl(sp[this_sack].end_seq);
442 } 442 }
443 443
444 if (tp->rx_opt.dsack) { 444 tp->rx_opt.dsack = 0;
445 tp->rx_opt.dsack = 0;
446 tp->rx_opt.eff_sacks = tp->rx_opt.num_sacks;
447 }
448 } 445 }
449} 446}
450 447
@@ -550,6 +547,7 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
550 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; 547 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL;
551 struct tcp_sock *tp = tcp_sk(sk); 548 struct tcp_sock *tp = tcp_sk(sk);
552 unsigned size = 0; 549 unsigned size = 0;
550 unsigned int eff_sacks;
553 551
554#ifdef CONFIG_TCP_MD5SIG 552#ifdef CONFIG_TCP_MD5SIG
555 *md5 = tp->af_specific->md5_lookup(sk, sk); 553 *md5 = tp->af_specific->md5_lookup(sk, sk);
@@ -568,10 +566,11 @@ static unsigned tcp_established_options(struct sock *sk, struct sk_buff *skb,
568 size += TCPOLEN_TSTAMP_ALIGNED; 566 size += TCPOLEN_TSTAMP_ALIGNED;
569 } 567 }
570 568
571 if (unlikely(tp->rx_opt.eff_sacks)) { 569 eff_sacks = tp->rx_opt.num_sacks + tp->rx_opt.dsack;
570 if (unlikely(eff_sacks)) {
572 const unsigned remaining = MAX_TCP_OPTION_SPACE - size; 571 const unsigned remaining = MAX_TCP_OPTION_SPACE - size;
573 opts->num_sack_blocks = 572 opts->num_sack_blocks =
574 min_t(unsigned, tp->rx_opt.eff_sacks, 573 min_t(unsigned, eff_sacks,
575 (remaining - TCPOLEN_SACK_BASE_ALIGNED) / 574 (remaining - TCPOLEN_SACK_BASE_ALIGNED) /
576 TCPOLEN_SACK_PERBLOCK); 575 TCPOLEN_SACK_PERBLOCK);
577 size += TCPOLEN_SACK_BASE_ALIGNED + 576 size += TCPOLEN_SACK_BASE_ALIGNED +
@@ -663,10 +662,14 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
663 th->urg_ptr = 0; 662 th->urg_ptr = 0;
664 663
665 /* The urg_mode check is necessary during a below snd_una win probe */ 664 /* The urg_mode check is necessary during a below snd_una win probe */
666 if (unlikely(tcp_urg_mode(tp) && 665 if (unlikely(tcp_urg_mode(tp) && before(tcb->seq, tp->snd_up))) {
667 between(tp->snd_up, tcb->seq + 1, tcb->seq + 0xFFFF))) { 666 if (before(tp->snd_up, tcb->seq + 0x10000)) {
668 th->urg_ptr = htons(tp->snd_up - tcb->seq); 667 th->urg_ptr = htons(tp->snd_up - tcb->seq);
669 th->urg = 1; 668 th->urg = 1;
669 } else if (after(tcb->seq + 0xFFFF, tp->snd_nxt)) {
670 th->urg_ptr = 0xFFFF;
671 th->urg = 1;
672 }
670 } 673 }
671 674
672 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location); 675 tcp_options_write((__be32 *)(th + 1), tp, &opts, &md5_hash_location);
@@ -751,6 +754,36 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
751 tp->fackets_out -= decr; 754 tp->fackets_out -= decr;
752} 755}
753 756
757/* Pcount in the middle of the write queue got changed, we need to do various
758 * tweaks to fix counters
759 */
760static void tcp_adjust_pcount(struct sock *sk, struct sk_buff *skb, int decr)
761{
762 struct tcp_sock *tp = tcp_sk(sk);
763
764 tp->packets_out -= decr;
765
766 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
767 tp->sacked_out -= decr;
768 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
769 tp->retrans_out -= decr;
770 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
771 tp->lost_out -= decr;
772
773 /* Reno case is special. Sigh... */
774 if (tcp_is_reno(tp) && decr > 0)
775 tp->sacked_out -= min_t(u32, tp->sacked_out, decr);
776
777 tcp_adjust_fackets_out(sk, skb, decr);
778
779 if (tp->lost_skb_hint &&
780 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) &&
781 (tcp_is_fack(tp) || TCP_SKB_CB(skb)->sacked))
782 tp->lost_cnt_hint -= decr;
783
784 tcp_verify_left_out(tp);
785}
786
754/* Function to create two new TCP segments. Shrinks the given segment 787/* Function to create two new TCP segments. Shrinks the given segment
755 * to the specified size and appends a new segment with the rest of the 788 * to the specified size and appends a new segment with the rest of the
756 * packet to the list. This won't be called frequently, I hope. 789 * packet to the list. This won't be called frequently, I hope.
@@ -763,11 +796,10 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
763 struct sk_buff *buff; 796 struct sk_buff *buff;
764 int nsize, old_factor; 797 int nsize, old_factor;
765 int nlen; 798 int nlen;
766 u16 flags; 799 u8 flags;
767 800
768 BUG_ON(len > skb->len); 801 BUG_ON(len > skb->len);
769 802
770 tcp_clear_retrans_hints_partial(tp);
771 nsize = skb_headlen(skb) - len; 803 nsize = skb_headlen(skb) - len;
772 if (nsize < 0) 804 if (nsize < 0)
773 nsize = 0; 805 nsize = 0;
@@ -834,22 +866,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len,
834 int diff = old_factor - tcp_skb_pcount(skb) - 866 int diff = old_factor - tcp_skb_pcount(skb) -
835 tcp_skb_pcount(buff); 867 tcp_skb_pcount(buff);
836 868
837 tp->packets_out -= diff; 869 if (diff)
838 870 tcp_adjust_pcount(sk, skb, diff);
839 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)
840 tp->sacked_out -= diff;
841 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
842 tp->retrans_out -= diff;
843
844 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST)
845 tp->lost_out -= diff;
846
847 /* Adjust Reno SACK estimate. */
848 if (tcp_is_reno(tp) && diff > 0) {
849 tcp_dec_pcount_approx_int(&tp->sacked_out, diff);
850 tcp_verify_left_out(tp);
851 }
852 tcp_adjust_fackets_out(sk, skb, diff);
853 } 871 }
854 872
855 /* Link BUFF into the send queue. */ 873 /* Link BUFF into the send queue. */
@@ -913,7 +931,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
913 * factor and mss. 931 * factor and mss.
914 */ 932 */
915 if (tcp_skb_pcount(skb) > 1) 933 if (tcp_skb_pcount(skb) > 1)
916 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); 934 tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
917 935
918 return 0; 936 return 0;
919} 937}
@@ -974,15 +992,6 @@ void tcp_mtup_init(struct sock *sk)
974 icsk->icsk_mtup.probe_size = 0; 992 icsk->icsk_mtup.probe_size = 0;
975} 993}
976 994
977/* Bound MSS / TSO packet size with the half of the window */
978static int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
979{
980 if (tp->max_window && pktsize > (tp->max_window >> 1))
981 return max(tp->max_window >> 1, 68U - tp->tcp_header_len);
982 else
983 return pktsize;
984}
985
986/* This function synchronize snd mss to current pmtu/exthdr set. 995/* This function synchronize snd mss to current pmtu/exthdr set.
987 996
988 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts 997 tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
@@ -1029,22 +1038,17 @@ unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
1029/* Compute the current effective MSS, taking SACKs and IP options, 1038/* Compute the current effective MSS, taking SACKs and IP options,
1030 * and even PMTU discovery events into account. 1039 * and even PMTU discovery events into account.
1031 */ 1040 */
1032unsigned int tcp_current_mss(struct sock *sk, int large_allowed) 1041unsigned int tcp_current_mss(struct sock *sk)
1033{ 1042{
1034 struct tcp_sock *tp = tcp_sk(sk); 1043 struct tcp_sock *tp = tcp_sk(sk);
1035 struct dst_entry *dst = __sk_dst_get(sk); 1044 struct dst_entry *dst = __sk_dst_get(sk);
1036 u32 mss_now; 1045 u32 mss_now;
1037 u16 xmit_size_goal;
1038 int doing_tso = 0;
1039 unsigned header_len; 1046 unsigned header_len;
1040 struct tcp_out_options opts; 1047 struct tcp_out_options opts;
1041 struct tcp_md5sig_key *md5; 1048 struct tcp_md5sig_key *md5;
1042 1049
1043 mss_now = tp->mss_cache; 1050 mss_now = tp->mss_cache;
1044 1051
1045 if (large_allowed && sk_can_gso(sk))
1046 doing_tso = 1;
1047
1048 if (dst) { 1052 if (dst) {
1049 u32 mtu = dst_mtu(dst); 1053 u32 mtu = dst_mtu(dst);
1050 if (mtu != inet_csk(sk)->icsk_pmtu_cookie) 1054 if (mtu != inet_csk(sk)->icsk_pmtu_cookie)
@@ -1062,19 +1066,6 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
1062 mss_now -= delta; 1066 mss_now -= delta;
1063 } 1067 }
1064 1068
1065 xmit_size_goal = mss_now;
1066
1067 if (doing_tso) {
1068 xmit_size_goal = ((sk->sk_gso_max_size - 1) -
1069 inet_csk(sk)->icsk_af_ops->net_header_len -
1070 inet_csk(sk)->icsk_ext_hdr_len -
1071 tp->tcp_header_len);
1072
1073 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
1074 xmit_size_goal -= (xmit_size_goal % mss_now);
1075 }
1076 tp->xmit_size_goal = xmit_size_goal;
1077
1078 return mss_now; 1069 return mss_now;
1079} 1070}
1080 1071
@@ -1256,7 +1247,7 @@ int tcp_may_send_now(struct sock *sk)
1256 struct sk_buff *skb = tcp_send_head(sk); 1247 struct sk_buff *skb = tcp_send_head(sk);
1257 1248
1258 return (skb && 1249 return (skb &&
1259 tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), 1250 tcp_snd_test(sk, skb, tcp_current_mss(sk),
1260 (tcp_skb_is_last(sk, skb) ? 1251 (tcp_skb_is_last(sk, skb) ?
1261 tp->nonagle : TCP_NAGLE_PUSH))); 1252 tp->nonagle : TCP_NAGLE_PUSH)));
1262} 1253}
@@ -1273,7 +1264,7 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
1273{ 1264{
1274 struct sk_buff *buff; 1265 struct sk_buff *buff;
1275 int nlen = skb->len - len; 1266 int nlen = skb->len - len;
1276 u16 flags; 1267 u8 flags;
1277 1268
1278 /* All of a TSO frame must be composed of paged data. */ 1269 /* All of a TSO frame must be composed of paged data. */
1279 if (skb->len != skb->data_len) 1270 if (skb->len != skb->data_len)
@@ -1352,6 +1343,10 @@ static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
1352 if (limit >= sk->sk_gso_max_size) 1343 if (limit >= sk->sk_gso_max_size)
1353 goto send_now; 1344 goto send_now;
1354 1345
1346 /* Middle in queue won't get any more data, full sendable already? */
1347 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len))
1348 goto send_now;
1349
1355 if (sysctl_tcp_tso_win_divisor) { 1350 if (sysctl_tcp_tso_win_divisor) {
1356 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); 1351 u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
1357 1352
@@ -1405,11 +1400,11 @@ static int tcp_mtu_probe(struct sock *sk)
1405 icsk->icsk_mtup.probe_size || 1400 icsk->icsk_mtup.probe_size ||
1406 inet_csk(sk)->icsk_ca_state != TCP_CA_Open || 1401 inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
1407 tp->snd_cwnd < 11 || 1402 tp->snd_cwnd < 11 ||
1408 tp->rx_opt.eff_sacks) 1403 tp->rx_opt.num_sacks || tp->rx_opt.dsack)
1409 return -1; 1404 return -1;
1410 1405
1411 /* Very simple search strategy: just double the MSS. */ 1406 /* Very simple search strategy: just double the MSS. */
1412 mss_now = tcp_current_mss(sk, 0); 1407 mss_now = tcp_current_mss(sk);
1413 probe_size = 2 * tp->mss_cache; 1408 probe_size = 2 * tp->mss_cache;
1414 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache; 1409 size_needed = probe_size + (tp->reordering + 1) * tp->mss_cache;
1415 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) { 1410 if (probe_size > tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_high)) {
@@ -1754,11 +1749,9 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1754 struct tcp_sock *tp = tcp_sk(sk); 1749 struct tcp_sock *tp = tcp_sk(sk);
1755 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); 1750 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb);
1756 int skb_size, next_skb_size; 1751 int skb_size, next_skb_size;
1757 u16 flags;
1758 1752
1759 skb_size = skb->len; 1753 skb_size = skb->len;
1760 next_skb_size = next_skb->len; 1754 next_skb_size = next_skb->len;
1761 flags = TCP_SKB_CB(skb)->flags;
1762 1755
1763 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); 1756 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1);
1764 1757
@@ -1778,30 +1771,21 @@ static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb)
1778 /* Update sequence range on original skb. */ 1771 /* Update sequence range on original skb. */
1779 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; 1772 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
1780 1773
1781 /* Merge over control information. */ 1774 /* Merge over control information. This moves PSH/FIN etc. over */
1782 flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ 1775 TCP_SKB_CB(skb)->flags |= TCP_SKB_CB(next_skb)->flags;
1783 TCP_SKB_CB(skb)->flags = flags;
1784 1776
1785 /* All done, get rid of second SKB and account for it so 1777 /* All done, get rid of second SKB and account for it so
1786 * packet counting does not break. 1778 * packet counting does not break.
1787 */ 1779 */
1788 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; 1780 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS;
1789 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_RETRANS)
1790 tp->retrans_out -= tcp_skb_pcount(next_skb);
1791 if (TCP_SKB_CB(next_skb)->sacked & TCPCB_LOST)
1792 tp->lost_out -= tcp_skb_pcount(next_skb);
1793 /* Reno case is special. Sigh... */
1794 if (tcp_is_reno(tp) && tp->sacked_out)
1795 tcp_dec_pcount_approx(&tp->sacked_out, next_skb);
1796
1797 tcp_adjust_fackets_out(sk, next_skb, tcp_skb_pcount(next_skb));
1798 tp->packets_out -= tcp_skb_pcount(next_skb);
1799 1781
1800 /* changed transmit queue under us so clear hints */ 1782 /* changed transmit queue under us so clear hints */
1801 tcp_clear_retrans_hints_partial(tp); 1783 tcp_clear_retrans_hints_partial(tp);
1802 if (next_skb == tp->retransmit_skb_hint) 1784 if (next_skb == tp->retransmit_skb_hint)
1803 tp->retransmit_skb_hint = skb; 1785 tp->retransmit_skb_hint = skb;
1804 1786
1787 tcp_adjust_pcount(sk, next_skb, tcp_skb_pcount(next_skb));
1788
1805 sk_wmem_free_skb(sk, next_skb); 1789 sk_wmem_free_skb(sk, next_skb);
1806} 1790}
1807 1791
@@ -1894,7 +1878,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1894 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk)) 1878 if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
1895 return -EHOSTUNREACH; /* Routing failure or similar. */ 1879 return -EHOSTUNREACH; /* Routing failure or similar. */
1896 1880
1897 cur_mss = tcp_current_mss(sk, 0); 1881 cur_mss = tcp_current_mss(sk);
1898 1882
1899 /* If receiver has shrunk his window, and skb is out of 1883 /* If receiver has shrunk his window, and skb is out of
1900 * new window, do not retransmit it. The exception is the 1884 * new window, do not retransmit it. The exception is the
@@ -1908,6 +1892,13 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
1908 if (skb->len > cur_mss) { 1892 if (skb->len > cur_mss) {
1909 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) 1893 if (tcp_fragment(sk, skb, cur_mss, cur_mss))
1910 return -ENOMEM; /* We'll try again later. */ 1894 return -ENOMEM; /* We'll try again later. */
1895 } else {
1896 int oldpcount = tcp_skb_pcount(skb);
1897
1898 if (unlikely(oldpcount > 1)) {
1899 tcp_init_tso_segs(sk, skb, cur_mss);
1900 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb));
1901 }
1911 } 1902 }
1912 1903
1913 tcp_retrans_try_collapse(sk, skb, cur_mss); 1904 tcp_retrans_try_collapse(sk, skb, cur_mss);
@@ -2023,7 +2014,6 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2023 last_lost = tp->snd_una; 2014 last_lost = tp->snd_una;
2024 } 2015 }
2025 2016
2026 /* First pass: retransmit lost packets. */
2027 tcp_for_write_queue_from(skb, sk) { 2017 tcp_for_write_queue_from(skb, sk) {
2028 __u8 sacked = TCP_SKB_CB(skb)->sacked; 2018 __u8 sacked = TCP_SKB_CB(skb)->sacked;
2029 2019
@@ -2062,7 +2052,7 @@ begin_fwd:
2062 goto begin_fwd; 2052 goto begin_fwd;
2063 2053
2064 } else if (!(sacked & TCPCB_LOST)) { 2054 } else if (!(sacked & TCPCB_LOST)) {
2065 if (hole == NULL && !(sacked & TCPCB_SACKED_RETRANS)) 2055 if (hole == NULL && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
2066 hole = skb; 2056 hole = skb;
2067 continue; 2057 continue;
2068 2058
@@ -2101,7 +2091,7 @@ void tcp_send_fin(struct sock *sk)
2101 * unsent frames. But be careful about outgoing SACKS 2091 * unsent frames. But be careful about outgoing SACKS
2102 * and IP options. 2092 * and IP options.
2103 */ 2093 */
2104 mss_now = tcp_current_mss(sk, 1); 2094 mss_now = tcp_current_mss(sk);
2105 2095
2106 if (tcp_send_head(sk) != NULL) { 2096 if (tcp_send_head(sk) != NULL) {
2107 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; 2097 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
@@ -2326,7 +2316,7 @@ static void tcp_connect_init(struct sock *sk)
2326 sk->sk_err = 0; 2316 sk->sk_err = 0;
2327 sock_reset_flag(sk, SOCK_DONE); 2317 sock_reset_flag(sk, SOCK_DONE);
2328 tp->snd_wnd = 0; 2318 tp->snd_wnd = 0;
2329 tcp_init_wl(tp, tp->write_seq, 0); 2319 tcp_init_wl(tp, 0);
2330 tp->snd_una = tp->write_seq; 2320 tp->snd_una = tp->write_seq;
2331 tp->snd_sml = tp->write_seq; 2321 tp->snd_sml = tp->write_seq;
2332 tp->snd_up = tp->write_seq; 2322 tp->snd_up = tp->write_seq;
@@ -2513,7 +2503,7 @@ int tcp_write_wakeup(struct sock *sk)
2513 if ((skb = tcp_send_head(sk)) != NULL && 2503 if ((skb = tcp_send_head(sk)) != NULL &&
2514 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { 2504 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) {
2515 int err; 2505 int err;
2516 unsigned int mss = tcp_current_mss(sk, 0); 2506 unsigned int mss = tcp_current_mss(sk);
2517 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; 2507 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
2518 2508
2519 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) 2509 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq))