aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorIlpo Järvinen <ilpo.jarvinen@helsinki.fi>2007-12-01 17:48:06 -0500
committerDavid S. Miller <davem@davemloft.net>2008-01-28 17:55:46 -0500
commit6859d49475d4f32abe640372117e4b687906e6b6 (patch)
tree2133f2e26af6540f2a212c36f219873d34db2c1e /net
parent7201883599ac8bff76300117155e299b1a54092f (diff)
[TCP]: Abstract tp->highest_sack accessing & point to next skb
Pointing to the next skb is necessary to avoid referencing already SACKed skbs which will soon be on a separate list. Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_output.c11
2 files changed, 18 insertions, 20 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 263c536def5..bc2d5f70966 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1125,7 +1125,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1125 struct sk_buff *skb; 1125 struct sk_buff *skb;
1126 int cnt = 0; 1126 int cnt = 0;
1127 u32 new_low_seq = tp->snd_nxt; 1127 u32 new_low_seq = tp->snd_nxt;
1128 u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq; 1128 u32 received_upto = tcp_highest_sack_seq(tp);
1129 1129
1130 if (!tcp_is_fack(tp) || !tp->retrans_out || 1130 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1131 !after(received_upto, tp->lost_retrans_low) || 1131 !after(received_upto, tp->lost_retrans_low) ||
@@ -1236,9 +1236,10 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1236 return in_sack; 1236 return in_sack;
1237} 1237}
1238 1238
1239static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp, 1239static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1240 int *reord, int dup_sack, int fack_count) 1240 int *reord, int dup_sack, int fack_count)
1241{ 1241{
1242 struct tcp_sock *tp = tcp_sk(sk);
1242 u8 sacked = TCP_SKB_CB(skb)->sacked; 1243 u8 sacked = TCP_SKB_CB(skb)->sacked;
1243 int flag = 0; 1244 int flag = 0;
1244 1245
@@ -1307,8 +1308,8 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp,
1307 if (fack_count > tp->fackets_out) 1308 if (fack_count > tp->fackets_out)
1308 tp->fackets_out = fack_count; 1309 tp->fackets_out = fack_count;
1309 1310
1310 if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 1311 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
1311 tp->highest_sack = skb; 1312 tcp_advance_highest_sack(sk, skb);
1312 } 1313 }
1313 1314
1314 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1315 /* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1330,8 +1331,6 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1330 int dup_sack_in, int *fack_count, 1331 int dup_sack_in, int *fack_count,
1331 int *reord, int *flag) 1332 int *reord, int *flag)
1332{ 1333{
1333 struct tcp_sock *tp = tcp_sk(sk);
1334
1335 tcp_for_write_queue_from(skb, sk) { 1334 tcp_for_write_queue_from(skb, sk) {
1336 int in_sack = 0; 1335 int in_sack = 0;
1337 int dup_sack = dup_sack_in; 1336 int dup_sack = dup_sack_in;
@@ -1358,7 +1357,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1358 break; 1357 break;
1359 1358
1360 if (in_sack) 1359 if (in_sack)
1361 *flag |= tcp_sacktag_one(skb, tp, reord, dup_sack, *fack_count); 1360 *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count);
1362 1361
1363 *fack_count += tcp_skb_pcount(skb); 1362 *fack_count += tcp_skb_pcount(skb);
1364 } 1363 }
@@ -1429,7 +1428,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1429 if (!tp->sacked_out) { 1428 if (!tp->sacked_out) {
1430 if (WARN_ON(tp->fackets_out)) 1429 if (WARN_ON(tp->fackets_out))
1431 tp->fackets_out = 0; 1430 tp->fackets_out = 0;
1432 tp->highest_sack = tcp_write_queue_head(sk); 1431 tcp_highest_sack_reset(sk);
1433 } 1432 }
1434 1433
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1434 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
@@ -1552,9 +1551,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1552 &fack_count, &reord, &flag); 1551 &fack_count, &reord, &flag);
1553 1552
1554 /* ...tail remains todo... */ 1553 /* ...tail remains todo... */
1555 if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) { 1554 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1556 /* ...but better entrypoint exists! */ 1555 /* ...but better entrypoint exists! */
1557 skb = tcp_write_queue_next(sk, tp->highest_sack); 1556 skb = tcp_highest_sack(sk);
1557 if (skb == NULL)
1558 break;
1558 fack_count = tp->fackets_out; 1559 fack_count = tp->fackets_out;
1559 cache++; 1560 cache++;
1560 goto walk; 1561 goto walk;
@@ -1566,8 +1567,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1566 continue; 1567 continue;
1567 } 1568 }
1568 1569
1569 if (tp->sacked_out && !before(start_seq, tcp_highest_sack_seq(tp))) { 1570 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1570 skb = tcp_write_queue_next(sk, tp->highest_sack); 1571 skb = tcp_highest_sack(sk);
1572 if (skb == NULL)
1573 break;
1571 fack_count = tp->fackets_out; 1574 fack_count = tp->fackets_out;
1572 } 1575 }
1573 skb = tcp_sacktag_skip(skb, sk, start_seq); 1576 skb = tcp_sacktag_skip(skb, sk, start_seq);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d8583a15d0..9a985b55e7d 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -667,7 +667,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
667 if (!tp->sacked_out || tcp_is_reno(tp)) 667 if (!tp->sacked_out || tcp_is_reno(tp))
668 return; 668 return;
669 669
670 if (!before(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 670 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
671 tp->fackets_out -= decr; 671 tp->fackets_out -= decr;
672} 672}
673 673
@@ -711,9 +711,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
711 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 711 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
712 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 712 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
713 713
714 if (tcp_is_sack(tp) && tp->sacked_out && (skb == tp->highest_sack))
715 tp->highest_sack = buff;
716
717 /* PSH and FIN should only be set in the second packet. */ 714 /* PSH and FIN should only be set in the second packet. */
718 flags = TCP_SKB_CB(skb)->flags; 715 flags = TCP_SKB_CB(skb)->flags;
719 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 716 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
@@ -1707,9 +1704,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1707 BUG_ON(tcp_skb_pcount(skb) != 1 || 1704 BUG_ON(tcp_skb_pcount(skb) != 1 ||
1708 tcp_skb_pcount(next_skb) != 1); 1705 tcp_skb_pcount(next_skb) != 1);
1709 1706
1710 if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out && 1707 tcp_highest_sack_combine(sk, next_skb, skb);
1711 (next_skb == tp->highest_sack)))
1712 return;
1713 1708
1714 /* Ok. We will be able to collapse the packet. */ 1709 /* Ok. We will be able to collapse the packet. */
1715 tcp_unlink_write_queue(next_skb, sk); 1710 tcp_unlink_write_queue(next_skb, sk);
@@ -2019,7 +2014,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2019 break; 2014 break;
2020 tp->forward_skb_hint = skb; 2015 tp->forward_skb_hint = skb;
2021 2016
2022 if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2017 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2023 break; 2018 break;
2024 2019
2025 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2020 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)