diff options
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 45 |
1 files changed, 29 insertions, 16 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 976034f82320..53c8ce4046b2 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1307,25 +1307,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, | |||
1307 | return in_sack; | 1307 | return in_sack; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, | 1310 | /* Mark the given newly-SACKed range as such, adjusting counters and hints. */ |
1311 | struct tcp_sacktag_state *state, | 1311 | static u8 tcp_sacktag_one(struct sock *sk, |
1312 | struct tcp_sacktag_state *state, u8 sacked, | ||
1313 | u32 start_seq, u32 end_seq, | ||
1312 | int dup_sack, int pcount) | 1314 | int dup_sack, int pcount) |
1313 | { | 1315 | { |
1314 | struct tcp_sock *tp = tcp_sk(sk); | 1316 | struct tcp_sock *tp = tcp_sk(sk); |
1315 | u8 sacked = TCP_SKB_CB(skb)->sacked; | ||
1316 | int fack_count = state->fack_count; | 1317 | int fack_count = state->fack_count; |
1317 | 1318 | ||
1318 | /* Account D-SACK for retransmitted packet. */ | 1319 | /* Account D-SACK for retransmitted packet. */ |
1319 | if (dup_sack && (sacked & TCPCB_RETRANS)) { | 1320 | if (dup_sack && (sacked & TCPCB_RETRANS)) { |
1320 | if (tp->undo_marker && tp->undo_retrans && | 1321 | if (tp->undo_marker && tp->undo_retrans && |
1321 | after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) | 1322 | after(end_seq, tp->undo_marker)) |
1322 | tp->undo_retrans--; | 1323 | tp->undo_retrans--; |
1323 | if (sacked & TCPCB_SACKED_ACKED) | 1324 | if (sacked & TCPCB_SACKED_ACKED) |
1324 | state->reord = min(fack_count, state->reord); | 1325 | state->reord = min(fack_count, state->reord); |
1325 | } | 1326 | } |
1326 | 1327 | ||
1327 | /* Nothing to do; acked frame is about to be dropped (was ACKed). */ | 1328 | /* Nothing to do; acked frame is about to be dropped (was ACKed). */ |
1328 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) | 1329 | if (!after(end_seq, tp->snd_una)) |
1329 | return sacked; | 1330 | return sacked; |
1330 | 1331 | ||
1331 | if (!(sacked & TCPCB_SACKED_ACKED)) { | 1332 | if (!(sacked & TCPCB_SACKED_ACKED)) { |
@@ -1344,13 +1345,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, | |||
1344 | /* New sack for not retransmitted frame, | 1345 | /* New sack for not retransmitted frame, |
1345 | * which was in hole. It is reordering. | 1346 | * which was in hole. It is reordering. |
1346 | */ | 1347 | */ |
1347 | if (before(TCP_SKB_CB(skb)->seq, | 1348 | if (before(start_seq, |
1348 | tcp_highest_sack_seq(tp))) | 1349 | tcp_highest_sack_seq(tp))) |
1349 | state->reord = min(fack_count, | 1350 | state->reord = min(fack_count, |
1350 | state->reord); | 1351 | state->reord); |
1351 | 1352 | ||
1352 | /* SACK enhanced F-RTO (RFC4138; Appendix B) */ | 1353 | /* SACK enhanced F-RTO (RFC4138; Appendix B) */ |
1353 | if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) | 1354 | if (!after(end_seq, tp->frto_highmark)) |
1354 | state->flag |= FLAG_ONLY_ORIG_SACKED; | 1355 | state->flag |= FLAG_ONLY_ORIG_SACKED; |
1355 | } | 1356 | } |
1356 | 1357 | ||
@@ -1368,8 +1369,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, | |||
1368 | 1369 | ||
1369 | /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ | 1370 | /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ |
1370 | if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && | 1371 | if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && |
1371 | before(TCP_SKB_CB(skb)->seq, | 1372 | before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) |
1372 | TCP_SKB_CB(tp->lost_skb_hint)->seq)) | ||
1373 | tp->lost_cnt_hint += pcount; | 1373 | tp->lost_cnt_hint += pcount; |
1374 | 1374 | ||
1375 | if (fack_count > tp->fackets_out) | 1375 | if (fack_count > tp->fackets_out) |
@@ -1388,6 +1388,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, | |||
1388 | return sacked; | 1388 | return sacked; |
1389 | } | 1389 | } |
1390 | 1390 | ||
1391 | /* Shift newly-SACKed bytes from this skb to the immediately previous | ||
1392 | * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. | ||
1393 | */ | ||
1391 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | 1394 | static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, |
1392 | struct tcp_sacktag_state *state, | 1395 | struct tcp_sacktag_state *state, |
1393 | unsigned int pcount, int shifted, int mss, | 1396 | unsigned int pcount, int shifted, int mss, |
@@ -1395,10 +1398,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1395 | { | 1398 | { |
1396 | struct tcp_sock *tp = tcp_sk(sk); | 1399 | struct tcp_sock *tp = tcp_sk(sk); |
1397 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); | 1400 | struct sk_buff *prev = tcp_write_queue_prev(sk, skb); |
1401 | u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ | ||
1402 | u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ | ||
1398 | 1403 | ||
1399 | BUG_ON(!pcount); | 1404 | BUG_ON(!pcount); |
1400 | 1405 | ||
1401 | if (skb == tp->lost_skb_hint) | 1406 | /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */ |
1407 | if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint)) | ||
1402 | tp->lost_cnt_hint += pcount; | 1408 | tp->lost_cnt_hint += pcount; |
1403 | 1409 | ||
1404 | TCP_SKB_CB(prev)->end_seq += shifted; | 1410 | TCP_SKB_CB(prev)->end_seq += shifted; |
@@ -1424,8 +1430,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, | |||
1424 | skb_shinfo(skb)->gso_type = 0; | 1430 | skb_shinfo(skb)->gso_type = 0; |
1425 | } | 1431 | } |
1426 | 1432 | ||
1427 | /* We discard results */ | 1433 | /* Adjust counters and hints for the newly sacked sequence range but |
1428 | tcp_sacktag_one(skb, sk, state, dup_sack, pcount); | 1434 | * discard the return value since prev is already marked. |
1435 | */ | ||
1436 | tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, | ||
1437 | start_seq, end_seq, dup_sack, pcount); | ||
1429 | 1438 | ||
1430 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ | 1439 | /* Difference in this won't matter, both ACKed by the same cumul. ACK */ |
1431 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); | 1440 | TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); |
@@ -1664,10 +1673,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, | |||
1664 | break; | 1673 | break; |
1665 | 1674 | ||
1666 | if (in_sack) { | 1675 | if (in_sack) { |
1667 | TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, | 1676 | TCP_SKB_CB(skb)->sacked = |
1668 | state, | 1677 | tcp_sacktag_one(sk, |
1669 | dup_sack, | 1678 | state, |
1670 | tcp_skb_pcount(skb)); | 1679 | TCP_SKB_CB(skb)->sacked, |
1680 | TCP_SKB_CB(skb)->seq, | ||
1681 | TCP_SKB_CB(skb)->end_seq, | ||
1682 | dup_sack, | ||
1683 | tcp_skb_pcount(skb)); | ||
1671 | 1684 | ||
1672 | if (!before(TCP_SKB_CB(skb)->seq, | 1685 | if (!before(TCP_SKB_CB(skb)->seq, |
1673 | tcp_highest_sack_seq(tp))) | 1686 | tcp_highest_sack_seq(tp))) |