aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorBaruch Even <baruch@ev-en.org>2007-02-05 02:35:57 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2007-02-08 15:38:48 -0500
commitfda03fbb56bf88f1fb1c57b2474082e5addaa884 (patch)
treeb6067b0be2ee1bd65c704a1db893b45cd4920cc9 /net/ipv4
parentffbc61117d32dc4e768f999325ecfb2528d6b303 (diff)
[TCP]: Advance fast path pointer for first block only
Only advance the SACK fast-path pointer for the first block, the fast-path assumes that only the first block advances next time so we should not move the cached skb for the next sack blocks. Signed-off-by: Baruch Even <baruch@ev-en.org> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/tcp_input.c34
1 files changed, 24 insertions, 10 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index c26076fb890e..7670ef968dce 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -936,13 +936,16 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
936 struct tcp_sock *tp = tcp_sk(sk); 936 struct tcp_sock *tp = tcp_sk(sk);
937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked; 937 unsigned char *ptr = ack_skb->h.raw + TCP_SKB_CB(ack_skb)->sacked;
938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2); 938 struct tcp_sack_block_wire *sp = (struct tcp_sack_block_wire *)(ptr+2);
939 struct sk_buff *cached_skb;
939 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3; 940 int num_sacks = (ptr[1] - TCPOLEN_SACK_BASE)>>3;
940 int reord = tp->packets_out; 941 int reord = tp->packets_out;
941 int prior_fackets; 942 int prior_fackets;
942 u32 lost_retrans = 0; 943 u32 lost_retrans = 0;
943 int flag = 0; 944 int flag = 0;
944 int dup_sack = 0; 945 int dup_sack = 0;
946 int cached_fack_count;
945 int i; 947 int i;
948 int first_sack_index;
946 949
947 if (!tp->sacked_out) 950 if (!tp->sacked_out)
948 tp->fackets_out = 0; 951 tp->fackets_out = 0;
@@ -1000,6 +1003,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1000 } 1003 }
1001 } 1004 }
1002 1005
1006 first_sack_index = 0;
1003 if (flag) 1007 if (flag)
1004 num_sacks = 1; 1008 num_sacks = 1;
1005 else { 1009 else {
@@ -1016,6 +1020,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1016 tmp = sp[j]; 1020 tmp = sp[j];
1017 sp[j] = sp[j+1]; 1021 sp[j] = sp[j+1];
1018 sp[j+1] = tmp; 1022 sp[j+1] = tmp;
1023
1024 /* Track where the first SACK block goes to */
1025 if (j == first_sack_index)
1026 first_sack_index = j+1;
1019 } 1027 }
1020 1028
1021 } 1029 }
@@ -1025,20 +1033,22 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1025 /* clear flag as used for different purpose in following code */ 1033 /* clear flag as used for different purpose in following code */
1026 flag = 0; 1034 flag = 0;
1027 1035
1036 /* Use SACK fastpath hint if valid */
1037 cached_skb = tp->fastpath_skb_hint;
1038 cached_fack_count = tp->fastpath_cnt_hint;
1039 if (!cached_skb) {
1040 cached_skb = sk->sk_write_queue.next;
1041 cached_fack_count = 0;
1042 }
1043
1028 for (i=0; i<num_sacks; i++, sp++) { 1044 for (i=0; i<num_sacks; i++, sp++) {
1029 struct sk_buff *skb; 1045 struct sk_buff *skb;
1030 __u32 start_seq = ntohl(sp->start_seq); 1046 __u32 start_seq = ntohl(sp->start_seq);
1031 __u32 end_seq = ntohl(sp->end_seq); 1047 __u32 end_seq = ntohl(sp->end_seq);
1032 int fack_count; 1048 int fack_count;
1033 1049
1034 /* Use SACK fastpath hint if valid */ 1050 skb = cached_skb;
1035 if (tp->fastpath_skb_hint) { 1051 fack_count = cached_fack_count;
1036 skb = tp->fastpath_skb_hint;
1037 fack_count = tp->fastpath_cnt_hint;
1038 } else {
1039 skb = sk->sk_write_queue.next;
1040 fack_count = 0;
1041 }
1042 1052
1043 /* Event "B" in the comment above. */ 1053 /* Event "B" in the comment above. */
1044 if (after(end_seq, tp->high_seq)) 1054 if (after(end_seq, tp->high_seq))
@@ -1048,8 +1058,12 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1048 int in_sack, pcount; 1058 int in_sack, pcount;
1049 u8 sacked; 1059 u8 sacked;
1050 1060
1051 tp->fastpath_skb_hint = skb; 1061 cached_skb = skb;
1052 tp->fastpath_cnt_hint = fack_count; 1062 cached_fack_count = fack_count;
1063 if (i == first_sack_index) {
1064 tp->fastpath_skb_hint = skb;
1065 tp->fastpath_cnt_hint = fack_count;
1066 }
1053 1067
1054 /* The retransmission queue is always in order, so 1068 /* The retransmission queue is always in order, so
1055 * we can short-circuit the walk early. 1069 * we can short-circuit the walk early.