aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-12-01 17:48:06 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:55:46 -0500
commit6859d49475d4f32abe640372117e4b687906e6b6 (patch)
tree2133f2e26af6540f2a212c36f219873d34db2c1e /net/ipv4/tcp_input.c
parent7201883599ac8bff76300117155e299b1a54092f (diff)
[TCP]: Abstract tp->highest_sack accessing & point to next skb
Pointing to the next skb is necessary to avoid referencing already SACKed skbs which will soon be on a separate list. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 263c536def5c..bc2d5f70966e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1125,7 +1125,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1125 struct sk_buff *skb; 1125 struct sk_buff *skb;
1126 int cnt = 0; 1126 int cnt = 0;
1127 u32 new_low_seq = tp->snd_nxt; 1127 u32 new_low_seq = tp->snd_nxt;
1128 u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq; 1128 u32 received_upto = tcp_highest_sack_seq(tp);
1129 1129
1130 if (!tcp_is_fack(tp) || !tp->retrans_out || 1130 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1131 !after(received_upto, tp->lost_retrans_low) || 1131 !after(received_upto, tp->lost_retrans_low) ||
@@ -1236,9 +1236,10 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1236 return in_sack; 1236 return in_sack;
1237} 1237}
1238 1238
1239static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp, 1239static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1240 int *reord, int dup_sack, int fack_count) 1240 int *reord, int dup_sack, int fack_count)
1241{ 1241{
1242 struct tcp_sock *tp = tcp_sk(sk);
1242 u8 sacked = TCP_SKB_CB(skb)->sacked; 1243 u8 sacked = TCP_SKB_CB(skb)->sacked;
1243 int flag = 0; 1244 int flag = 0;
1244 1245
@@ -1307,8 +1308,8 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp,
1307 if (fack_count > tp->fackets_out) 1308 if (fack_count > tp->fackets_out)
1308 tp->fackets_out = fack_count; 1309 tp->fackets_out = fack_count;
1309 1310
1310 if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 1311 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
1311 tp->highest_sack = skb; 1312 tcp_advance_highest_sack(sk, skb);
1312 } 1313 }
1313 1314
1314 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1315 /* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1330,8 +1331,6 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1330 int dup_sack_in, int *fack_count, 1331 int dup_sack_in, int *fack_count,
1331 int *reord, int *flag) 1332 int *reord, int *flag)
1332{ 1333{
1333 struct tcp_sock *tp = tcp_sk(sk);
1334
1335 tcp_for_write_queue_from(skb, sk) { 1334 tcp_for_write_queue_from(skb, sk) {
1336 int in_sack = 0; 1335 int in_sack = 0;
1337 int dup_sack = dup_sack_in; 1336 int dup_sack = dup_sack_in;
@@ -1358,7 +1357,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1358 break; 1357 break;
1359 1358
1360 if (in_sack) 1359 if (in_sack)
1361 *flag |= tcp_sacktag_one(skb, tp, reord, dup_sack, *fack_count); 1360 *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count);
1362 1361
1363 *fack_count += tcp_skb_pcount(skb); 1362 *fack_count += tcp_skb_pcount(skb);
1364 } 1363 }
@@ -1429,7 +1428,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1429 if (!tp->sacked_out) { 1428 if (!tp->sacked_out) {
1430 if (WARN_ON(tp->fackets_out)) 1429 if (WARN_ON(tp->fackets_out))
1431 tp->fackets_out = 0; 1430 tp->fackets_out = 0;
1432 tp->highest_sack = tcp_write_queue_head(sk); 1431 tcp_highest_sack_reset(sk);
1433 } 1432 }
1434 1433
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1434 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
@@ -1552,9 +1551,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1552 &fack_count, &reord, &flag); 1551 &fack_count, &reord, &flag);
1553 1552
1554 /* ...tail remains todo... */ 1553 /* ...tail remains todo... */
1555 if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) { 1554 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1556 /* ...but better entrypoint exists! */ 1555 /* ...but better entrypoint exists! */
1557 skb = tcp_write_queue_next(sk, tp->highest_sack); 1556 skb = tcp_highest_sack(sk);
1557 if (skb == NULL)
1558 break;
1558 fack_count = tp->fackets_out; 1559 fack_count = tp->fackets_out;
1559 cache++; 1560 cache++;
1560 goto walk; 1561 goto walk;
@@ -1566,8 +1567,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1566 continue; 1567 continue;
1567 } 1568 }
1568 1569
1569 if (tp->sacked_out && !before(start_seq, tcp_highest_sack_seq(tp))) { 1570 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1570 skb = tcp_write_queue_next(sk, tp->highest_sack); 1571 skb = tcp_highest_sack(sk);
1572 if (skb == NULL)
1573 break;
1571 fack_count = tp->fackets_out; 1574 fack_count = tp->fackets_out;
1572 } 1575 }
1573 skb = tcp_sacktag_skip(skb, sk, start_seq); 1576 skb = tcp_sacktag_skip(skb, sk, start_seq);