aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2007-06-19 01:43:06 -0400
committerDavid S. Miller <davem@sunset.davemloft.net>2007-10-10 19:47:51 -0400
commitd06e021d71d95aae402340dc3d9f79f9c8ad11d7 (patch)
treec0f785af4d4449d70f802a556e36382e5ed5a07f /net/ipv4/tcp_input.c
parent19b2b486580f5939688d3e225acdc0f4b291ed0d (diff)
[TCP]: Extract DSACK detection code from tcp_sacktag_write_queue().
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c56
1 files changed, 36 insertions, 20 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0aa17243d369..5187870d0333 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -960,6 +960,39 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
960 * Both of these heuristics are not used in Loss state, when we cannot 960 * Both of these heuristics are not used in Loss state, when we cannot
961 * account for retransmits accurately. 961 * account for retransmits accurately.
962 */ 962 */
963static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb,
964 struct tcp_sack_block_wire *sp, int num_sacks,
965 u32 prior_snd_una)
966{
967 u32 start_seq_0 = ntohl(get_unaligned(&sp[0].start_seq));
968 u32 end_seq_0 = ntohl(get_unaligned(&sp[0].end_seq));
969 int dup_sack = 0;
970
971 if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) {
972 dup_sack = 1;
973 tp->rx_opt.sack_ok |= 4;
974 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
975 } else if (num_sacks > 1) {
976 u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq));
977 u32 start_seq_1 = ntohl(get_unaligned(&sp[1].start_seq));
978
979 if (!after(end_seq_0, end_seq_1) &&
980 !before(start_seq_0, start_seq_1)) {
981 dup_sack = 1;
982 tp->rx_opt.sack_ok |= 4;
983 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
984 }
985 }
986
987 /* D-SACK for already forgotten data... Do dumb counting. */
988 if (dup_sack &&
989 !after(end_seq_0, prior_snd_una) &&
990 after(end_seq_0, tp->undo_marker))
991 tp->undo_retrans--;
992
993 return dup_sack;
994}
995
963static int 996static int
964tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una) 997tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_una)
965{ 998{
@@ -985,27 +1018,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
985 } 1018 }
986 prior_fackets = tp->fackets_out; 1019 prior_fackets = tp->fackets_out;
987 1020
988 /* Check for D-SACK. */ 1021 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp,
989 if (before(ntohl(sp[0].start_seq), TCP_SKB_CB(ack_skb)->ack_seq)) { 1022 num_sacks, prior_snd_una);
990 flag |= FLAG_DSACKING_ACK; 1023 if (found_dup_sack)
991 found_dup_sack = 1;
992 tp->rx_opt.sack_ok |= 4;
993 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV);
994 } else if (num_sacks > 1 &&
995 !after(ntohl(sp[0].end_seq), ntohl(sp[1].end_seq)) &&
996 !before(ntohl(sp[0].start_seq), ntohl(sp[1].start_seq))) {
997 flag |= FLAG_DSACKING_ACK; 1024 flag |= FLAG_DSACKING_ACK;
998 found_dup_sack = 1;
999 tp->rx_opt.sack_ok |= 4;
1000 NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV);
1001 }
1002
1003 /* D-SACK for already forgotten data...
1004 * Do dumb counting. */
1005 if (found_dup_sack &&
1006 !after(ntohl(sp[0].end_seq), prior_snd_una) &&
1007 after(ntohl(sp[0].end_seq), tp->undo_marker))
1008 tp->undo_retrans--;
1009 1025
1010 /* Eliminate too old ACKs, but take into 1026 /* Eliminate too old ACKs, but take into
1011 * account more or less fresh ones, they can 1027 * account more or less fresh ones, they can