diff options
author | David S. Miller <davem@davemloft.net> | 2005-09-02 01:47:01 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2005-09-02 01:47:01 -0400 |
commit | 6475be16fd9b3c6746ca4d18959246b13c669ea8 (patch) | |
tree | 03e0da36680ddb227591a4007fa4e6f18d82782c /net/ipv4/tcp_input.c | |
parent | ef015786152adaff5a6a8bf0c8ea2f70cee8059d (diff) |
[TCP]: Keep TSO enabled even during loss events.
All we need to do is resegment the queue so that
we record SACK information accurately. The edges
of the SACK blocks guide our resegmenting decisions.
With help from Herbert Xu.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 36 |
1 files changed, 24 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 1afb080bdf0c..29222b964951 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -923,14 +923,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
923 | int flag = 0; | 923 | int flag = 0; |
924 | int i; | 924 | int i; |
925 | 925 | ||
926 | /* So, SACKs for already sent large segments will be lost. | ||
927 | * Not good, but alternative is to resegment the queue. */ | ||
928 | if (sk->sk_route_caps & NETIF_F_TSO) { | ||
929 | sk->sk_route_caps &= ~NETIF_F_TSO; | ||
930 | sock_set_flag(sk, SOCK_NO_LARGESEND); | ||
931 | tp->mss_cache = tp->mss_cache; | ||
932 | } | ||
933 | |||
934 | if (!tp->sacked_out) | 926 | if (!tp->sacked_out) |
935 | tp->fackets_out = 0; | 927 | tp->fackets_out = 0; |
936 | prior_fackets = tp->fackets_out; | 928 | prior_fackets = tp->fackets_out; |
@@ -978,20 +970,40 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
978 | flag |= FLAG_DATA_LOST; | 970 | flag |= FLAG_DATA_LOST; |
979 | 971 | ||
980 | sk_stream_for_retrans_queue(skb, sk) { | 972 | sk_stream_for_retrans_queue(skb, sk) { |
981 | u8 sacked = TCP_SKB_CB(skb)->sacked; | 973 | int in_sack, pcount; |
982 | int in_sack; | 974 | u8 sacked; |
983 | 975 | ||
984 | /* The retransmission queue is always in order, so | 976 | /* The retransmission queue is always in order, so |
985 | * we can short-circuit the walk early. | 977 | * we can short-circuit the walk early. |
986 | */ | 978 | */ |
987 | if(!before(TCP_SKB_CB(skb)->seq, end_seq)) | 979 | if (!before(TCP_SKB_CB(skb)->seq, end_seq)) |
988 | break; | 980 | break; |
989 | 981 | ||
990 | fack_count += tcp_skb_pcount(skb); | 982 | pcount = tcp_skb_pcount(skb); |
983 | |||
984 | if (pcount > 1 && | ||
985 | (after(start_seq, TCP_SKB_CB(skb)->seq) || | ||
986 | before(end_seq, TCP_SKB_CB(skb)->end_seq))) { | ||
987 | unsigned int pkt_len; | ||
988 | |||
989 | if (after(start_seq, TCP_SKB_CB(skb)->seq)) | ||
990 | pkt_len = (start_seq - | ||
991 | TCP_SKB_CB(skb)->seq); | ||
992 | else | ||
993 | pkt_len = (end_seq - | ||
994 | TCP_SKB_CB(skb)->seq); | ||
995 | if (tcp_fragment(sk, skb, pkt_len, skb_shinfo(skb)->tso_size)) | ||
996 | break; | ||
997 | pcount = tcp_skb_pcount(skb); | ||
998 | } | ||
999 | |||
1000 | fack_count += pcount; | ||
991 | 1001 | ||
992 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && | 1002 | in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && |
993 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); | 1003 | !before(end_seq, TCP_SKB_CB(skb)->end_seq); |
994 | 1004 | ||
1005 | sacked = TCP_SKB_CB(skb)->sacked; | ||
1006 | |||
995 | /* Account D-SACK for retransmitted packet. */ | 1007 | /* Account D-SACK for retransmitted packet. */ |
996 | if ((dup_sack && in_sack) && | 1008 | if ((dup_sack && in_sack) && |
997 | (sacked & TCPCB_RETRANS) && | 1009 | (sacked & TCPCB_RETRANS) && |