aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/tcp.h35
-rw-r--r--net/ipv4/tcp_input.c27
-rw-r--r--net/ipv4/tcp_output.c11
3 files changed, 52 insertions, 21 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6e392babda4a..5ec1cacca8a1 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1267,8 +1267,12 @@ static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb
1267 __tcp_add_write_queue_tail(sk, skb); 1267 __tcp_add_write_queue_tail(sk, skb);
1268 1268
1269 /* Queue it, remembering where we must start sending. */ 1269 /* Queue it, remembering where we must start sending. */
1270 if (sk->sk_send_head == NULL) 1270 if (sk->sk_send_head == NULL) {
1271 sk->sk_send_head = skb; 1271 sk->sk_send_head = skb;
1272
1273 if (tcp_sk(sk)->highest_sack == NULL)
1274 tcp_sk(sk)->highest_sack = skb;
1275 }
1272} 1276}
1273 1277
1274static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb) 1278static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
@@ -1318,9 +1322,38 @@ static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1318{ 1322{
1319 if (!tp->sacked_out) 1323 if (!tp->sacked_out)
1320 return tp->snd_una; 1324 return tp->snd_una;
1325
1326 if (tp->highest_sack == NULL)
1327 return tp->snd_nxt;
1328
1321 return TCP_SKB_CB(tp->highest_sack)->seq; 1329 return TCP_SKB_CB(tp->highest_sack)->seq;
1322} 1330}
1323 1331
1332static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1333{
1334 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1335 tcp_write_queue_next(sk, skb);
1336}
1337
1338static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1339{
1340 return tcp_sk(sk)->highest_sack;
1341}
1342
1343static inline void tcp_highest_sack_reset(struct sock *sk)
1344{
1345 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1346}
1347
1348/* Called when old skb is about to be deleted (to be combined with new skb) */
1349static inline void tcp_highest_sack_combine(struct sock *sk,
1350 struct sk_buff *old,
1351 struct sk_buff *new)
1352{
1353 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1354 tcp_sk(sk)->highest_sack = new;
1355}
1356
1324/* /proc */ 1357/* /proc */
1325enum tcp_seq_states { 1358enum tcp_seq_states {
1326 TCP_SEQ_STATE_LISTENING, 1359 TCP_SEQ_STATE_LISTENING,
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 263c536def5c..bc2d5f70966e 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1125,7 +1125,7 @@ static void tcp_mark_lost_retrans(struct sock *sk)
1125 struct sk_buff *skb; 1125 struct sk_buff *skb;
1126 int cnt = 0; 1126 int cnt = 0;
1127 u32 new_low_seq = tp->snd_nxt; 1127 u32 new_low_seq = tp->snd_nxt;
1128 u32 received_upto = TCP_SKB_CB(tp->highest_sack)->end_seq; 1128 u32 received_upto = tcp_highest_sack_seq(tp);
1129 1129
1130 if (!tcp_is_fack(tp) || !tp->retrans_out || 1130 if (!tcp_is_fack(tp) || !tp->retrans_out ||
1131 !after(received_upto, tp->lost_retrans_low) || 1131 !after(received_upto, tp->lost_retrans_low) ||
@@ -1236,9 +1236,10 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1236 return in_sack; 1236 return in_sack;
1237} 1237}
1238 1238
1239static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp, 1239static int tcp_sacktag_one(struct sk_buff *skb, struct sock *sk,
1240 int *reord, int dup_sack, int fack_count) 1240 int *reord, int dup_sack, int fack_count)
1241{ 1241{
1242 struct tcp_sock *tp = tcp_sk(sk);
1242 u8 sacked = TCP_SKB_CB(skb)->sacked; 1243 u8 sacked = TCP_SKB_CB(skb)->sacked;
1243 int flag = 0; 1244 int flag = 0;
1244 1245
@@ -1307,8 +1308,8 @@ static int tcp_sacktag_one(struct sk_buff *skb, struct tcp_sock *tp,
1307 if (fack_count > tp->fackets_out) 1308 if (fack_count > tp->fackets_out)
1308 tp->fackets_out = fack_count; 1309 tp->fackets_out = fack_count;
1309 1310
1310 if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 1311 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
1311 tp->highest_sack = skb; 1312 tcp_advance_highest_sack(sk, skb);
1312 } 1313 }
1313 1314
1314 /* D-SACK. We can detect redundant retransmission in S|R and plain R 1315 /* D-SACK. We can detect redundant retransmission in S|R and plain R
@@ -1330,8 +1331,6 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1330 int dup_sack_in, int *fack_count, 1331 int dup_sack_in, int *fack_count,
1331 int *reord, int *flag) 1332 int *reord, int *flag)
1332{ 1333{
1333 struct tcp_sock *tp = tcp_sk(sk);
1334
1335 tcp_for_write_queue_from(skb, sk) { 1334 tcp_for_write_queue_from(skb, sk) {
1336 int in_sack = 0; 1335 int in_sack = 0;
1337 int dup_sack = dup_sack_in; 1336 int dup_sack = dup_sack_in;
@@ -1358,7 +1357,7 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk,
1358 break; 1357 break;
1359 1358
1360 if (in_sack) 1359 if (in_sack)
1361 *flag |= tcp_sacktag_one(skb, tp, reord, dup_sack, *fack_count); 1360 *flag |= tcp_sacktag_one(skb, sk, reord, dup_sack, *fack_count);
1362 1361
1363 *fack_count += tcp_skb_pcount(skb); 1362 *fack_count += tcp_skb_pcount(skb);
1364 } 1363 }
@@ -1429,7 +1428,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1429 if (!tp->sacked_out) { 1428 if (!tp->sacked_out) {
1430 if (WARN_ON(tp->fackets_out)) 1429 if (WARN_ON(tp->fackets_out))
1431 tp->fackets_out = 0; 1430 tp->fackets_out = 0;
1432 tp->highest_sack = tcp_write_queue_head(sk); 1431 tcp_highest_sack_reset(sk);
1433 } 1432 }
1434 1433
1435 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire, 1434 found_dup_sack = tcp_check_dsack(tp, ack_skb, sp_wire,
@@ -1552,9 +1551,11 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1552 &fack_count, &reord, &flag); 1551 &fack_count, &reord, &flag);
1553 1552
1554 /* ...tail remains todo... */ 1553 /* ...tail remains todo... */
1555 if (TCP_SKB_CB(tp->highest_sack)->end_seq == cache->end_seq) { 1554 if (tcp_highest_sack_seq(tp) == cache->end_seq) {
1556 /* ...but better entrypoint exists! */ 1555 /* ...but better entrypoint exists! */
1557 skb = tcp_write_queue_next(sk, tp->highest_sack); 1556 skb = tcp_highest_sack(sk);
1557 if (skb == NULL)
1558 break;
1558 fack_count = tp->fackets_out; 1559 fack_count = tp->fackets_out;
1559 cache++; 1560 cache++;
1560 goto walk; 1561 goto walk;
@@ -1566,8 +1567,10 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
1566 continue; 1567 continue;
1567 } 1568 }
1568 1569
1569 if (tp->sacked_out && !before(start_seq, tcp_highest_sack_seq(tp))) { 1570 if (!before(start_seq, tcp_highest_sack_seq(tp))) {
1570 skb = tcp_write_queue_next(sk, tp->highest_sack); 1571 skb = tcp_highest_sack(sk);
1572 if (skb == NULL)
1573 break;
1571 fack_count = tp->fackets_out; 1574 fack_count = tp->fackets_out;
1572 } 1575 }
1573 skb = tcp_sacktag_skip(skb, sk, start_seq); 1576 skb = tcp_sacktag_skip(skb, sk, start_seq);
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7d8583a15d02..9a985b55e7d8 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -667,7 +667,7 @@ static void tcp_adjust_fackets_out(struct sock *sk, struct sk_buff *skb,
667 if (!tp->sacked_out || tcp_is_reno(tp)) 667 if (!tp->sacked_out || tcp_is_reno(tp))
668 return; 668 return;
669 669
670 if (!before(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) 670 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq))
671 tp->fackets_out -= decr; 671 tp->fackets_out -= decr;
672} 672}
673 673
@@ -711,9 +711,6 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
711 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; 711 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
712 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; 712 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
713 713
714 if (tcp_is_sack(tp) && tp->sacked_out && (skb == tp->highest_sack))
715 tp->highest_sack = buff;
716
717 /* PSH and FIN should only be set in the second packet. */ 714 /* PSH and FIN should only be set in the second packet. */
718 flags = TCP_SKB_CB(skb)->flags; 715 flags = TCP_SKB_CB(skb)->flags;
719 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); 716 TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH);
@@ -1707,9 +1704,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
1707 BUG_ON(tcp_skb_pcount(skb) != 1 || 1704 BUG_ON(tcp_skb_pcount(skb) != 1 ||
1708 tcp_skb_pcount(next_skb) != 1); 1705 tcp_skb_pcount(next_skb) != 1);
1709 1706
1710 if (WARN_ON(tcp_is_sack(tp) && tp->sacked_out && 1707 tcp_highest_sack_combine(sk, next_skb, skb);
1711 (next_skb == tp->highest_sack)))
1712 return;
1713 1708
1714 /* Ok. We will be able to collapse the packet. */ 1709 /* Ok. We will be able to collapse the packet. */
1715 tcp_unlink_write_queue(next_skb, sk); 1710 tcp_unlink_write_queue(next_skb, sk);
@@ -2019,7 +2014,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
2019 break; 2014 break;
2020 tp->forward_skb_hint = skb; 2015 tp->forward_skb_hint = skb;
2021 2016
2022 if (after(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) 2017 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
2023 break; 2018 break;
2024 2019
2025 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) 2020 if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)