diff options
author | Baruch Even <baruch@ev-en.org> | 2007-02-05 02:36:42 -0500 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-02-08 15:38:49 -0500 |
commit | 6f74651ae626ec672028587bc700538076dfbefb (patch) | |
tree | e00c9cf21c9d917a2392310980f9119ed9529221 /net | |
parent | fda03fbb56bf88f1fb1c57b2474082e5addaa884 (diff) |
[TCP]: Seperate DSACK from SACK fast path
Move DSACK code outside the SACK fast-path checking code. If the DSACK
determined that the information was too old we stayed with a partial cache
copied. Most likely this matters very little since the next packet will not be
DSACK and we will find it in the cache. but it's still not good form and there
is little reason to couple the two checks.
Since the SACK receive cache doesn't need the data to be in host order we also
remove the ntohl in the checking loop.
Signed-off-by: Baruch Even <baruch@ev-en.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 66 |
1 files changed, 31 insertions, 35 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 7670ef968dce..870f53afd363 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -951,16 +951,43 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
951 | tp->fackets_out = 0; | 951 | tp->fackets_out = 0; |
952 | prior_fackets = tp->fackets_out; | 952 | prior_fackets = tp->fackets_out; |
953 | 953 | ||
954 | /* Check for D-SACK. */ | ||
955 | if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { | ||
956 | dup_sack = 1; | ||
957 | tp->rx_opt.sack_ok |= 4; | ||
958 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); | ||
959 | } else if (num_sacks > 1 && | ||
960 | !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) && | ||
961 | !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) { | ||
962 | dup_sack = 1; | ||
963 | tp->rx_opt.sack_ok |= 4; | ||
964 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); | ||
965 | } | ||
966 | |||
967 | /* D-SACK for already forgotten data... | ||
968 | * Do dumb counting. */ | ||
969 | if (dup_sack && | ||
970 | !after(ntohl(sp[0].end_seq), prior_snd_una) && | ||
971 | after(ntohl(sp[0].end_seq), tp->undo_marker)) | ||
972 | tp->undo_retrans--; | ||
973 | |||
974 | /* Eliminate too old ACKs, but take into | ||
975 | * account more or less fresh ones, they can | ||
976 | * contain valid SACK info. | ||
977 | */ | ||
978 | if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) | ||
979 | return 0; | ||
980 | |||
954 | /* SACK fastpath: | 981 | /* SACK fastpath: |
955 | * if the only SACK change is the increase of the end_seq of | 982 | * if the only SACK change is the increase of the end_seq of |
956 | * the first block then only apply that SACK block | 983 | * the first block then only apply that SACK block |
957 | * and use retrans queue hinting otherwise slowpath */ | 984 | * and use retrans queue hinting otherwise slowpath */ |
958 | flag = 1; | 985 | flag = 1; |
959 | for (i = 0; i< num_sacks; i++) { | 986 | for (i = 0; i < num_sacks; i++) { |
960 | __u32 start_seq = ntohl(sp[i].start_seq); | 987 | __be32 start_seq = sp[i].start_seq; |
961 | __u32 end_seq = ntohl(sp[i].end_seq); | 988 | __be32 end_seq = sp[i].end_seq; |
962 | 989 | ||
963 | if (i == 0){ | 990 | if (i == 0) { |
964 | if (tp->recv_sack_cache[i].start_seq != start_seq) | 991 | if (tp->recv_sack_cache[i].start_seq != start_seq) |
965 | flag = 0; | 992 | flag = 0; |
966 | } else { | 993 | } else { |
@@ -970,37 +997,6 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
970 | } | 997 | } |
971 | tp->recv_sack_cache[i].start_seq = start_seq; | 998 | tp->recv_sack_cache[i].start_seq = start_seq; |
972 | tp->recv_sack_cache[i].end_seq = end_seq; | 999 | tp->recv_sack_cache[i].end_seq = end_seq; |
973 | |||
974 | /* Check for D-SACK. */ | ||
975 | if (i == 0) { | ||
976 | u32 ack = TCP_SKB_CB(ack_skb)->ack_seq; | ||
977 | |||
978 | if (before(start_seq, ack)) { | ||
979 | dup_sack = 1; | ||
980 | tp->rx_opt.sack_ok |= 4; | ||
981 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); | ||
982 | } else if (num_sacks > 1 && | ||
983 | !after(end_seq, ntohl(sp[1].end_seq)) && | ||
984 | !before(start_seq, ntohl(sp[1].start_seq))) { | ||
985 | dup_sack = 1; | ||
986 | tp->rx_opt.sack_ok |= 4; | ||
987 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); | ||
988 | } | ||
989 | |||
990 | /* D-SACK for already forgotten data... | ||
991 | * Do dumb counting. */ | ||
992 | if (dup_sack && | ||
993 | !after(end_seq, prior_snd_una) && | ||
994 | after(end_seq, tp->undo_marker)) | ||
995 | tp->undo_retrans--; | ||
996 | |||
997 | /* Eliminate too old ACKs, but take into | ||
998 | * account more or less fresh ones, they can | ||
999 | * contain valid SACK info. | ||
1000 | */ | ||
1001 | if (before(ack, prior_snd_una - tp->max_window)) | ||
1002 | return 0; | ||
1003 | } | ||
1004 | } | 1000 | } |
1005 | 1001 | ||
1006 | first_sack_index = 0; | 1002 | first_sack_index = 0; |