diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-05-27 04:52:00 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:47:52 -0400 |
commit | 4ddf66769d2df868071420e2e0106746c6204ea3 (patch) | |
tree | 4a962ac4d1e2d441c89eb3be7bfa799b15177405 /net/ipv4 | |
parent | d06e021d71d95aae402340dc3d9f79f9c8ad11d7 (diff) |
[TCP]: Move Reno SACKed_out counter functions earlier
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r-- | net/ipv4/tcp_input.c | 98 |
1 files changed, 47 insertions, 51 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 5187870d0333..2711ef7df7b5 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -1314,6 +1314,53 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1314 | /* F-RTO can only be used if TCP has never retransmitted anything other than | 1314 | /* F-RTO can only be used if TCP has never retransmitted anything other than |
1315 | * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) | 1315 | * head (SACK enhanced variant from Appendix B of RFC4138 is more robust here) |
1316 | */ | 1316 | */ |
1317 | static void tcp_check_reno_reordering(struct sock *sk, const int addend) | ||
1318 | { | ||
1319 | struct tcp_sock *tp = tcp_sk(sk); | ||
1320 | u32 holes; | ||
1321 | |||
1322 | holes = max(tp->lost_out, 1U); | ||
1323 | holes = min(holes, tp->packets_out); | ||
1324 | |||
1325 | if ((tp->sacked_out + holes) > tp->packets_out) { | ||
1326 | tp->sacked_out = tp->packets_out - holes; | ||
1327 | tcp_update_reordering(sk, tp->packets_out + addend, 0); | ||
1328 | } | ||
1329 | } | ||
1330 | |||
1331 | /* Emulate SACKs for SACKless connection: account for a new dupack. */ | ||
1332 | |||
1333 | static void tcp_add_reno_sack(struct sock *sk) | ||
1334 | { | ||
1335 | struct tcp_sock *tp = tcp_sk(sk); | ||
1336 | tp->sacked_out++; | ||
1337 | tcp_check_reno_reordering(sk, 0); | ||
1338 | tcp_sync_left_out(tp); | ||
1339 | } | ||
1340 | |||
1341 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ | ||
1342 | |||
1343 | static void tcp_remove_reno_sacks(struct sock *sk, int acked) | ||
1344 | { | ||
1345 | struct tcp_sock *tp = tcp_sk(sk); | ||
1346 | |||
1347 | if (acked > 0) { | ||
1348 | /* One ACK acked hole. The rest eat duplicate ACKs. */ | ||
1349 | if (acked-1 >= tp->sacked_out) | ||
1350 | tp->sacked_out = 0; | ||
1351 | else | ||
1352 | tp->sacked_out -= acked-1; | ||
1353 | } | ||
1354 | tcp_check_reno_reordering(sk, acked); | ||
1355 | tcp_sync_left_out(tp); | ||
1356 | } | ||
1357 | |||
1358 | static inline void tcp_reset_reno_sack(struct tcp_sock *tp) | ||
1359 | { | ||
1360 | tp->sacked_out = 0; | ||
1361 | tp->left_out = tp->lost_out; | ||
1362 | } | ||
1363 | |||
1317 | int tcp_use_frto(struct sock *sk) | 1364 | int tcp_use_frto(struct sock *sk) |
1318 | { | 1365 | { |
1319 | const struct tcp_sock *tp = tcp_sk(sk); | 1366 | const struct tcp_sock *tp = tcp_sk(sk); |
@@ -1730,57 +1777,6 @@ static int tcp_time_to_recover(struct sock *sk) | |||
1730 | return 0; | 1777 | return 0; |
1731 | } | 1778 | } |
1732 | 1779 | ||
1733 | /* If we receive more dupacks than we expected counting segments | ||
1734 | * in assumption of absent reordering, interpret this as reordering. | ||
1735 | * The only another reason could be bug in receiver TCP. | ||
1736 | */ | ||
1737 | static void tcp_check_reno_reordering(struct sock *sk, const int addend) | ||
1738 | { | ||
1739 | struct tcp_sock *tp = tcp_sk(sk); | ||
1740 | u32 holes; | ||
1741 | |||
1742 | holes = max(tp->lost_out, 1U); | ||
1743 | holes = min(holes, tp->packets_out); | ||
1744 | |||
1745 | if ((tp->sacked_out + holes) > tp->packets_out) { | ||
1746 | tp->sacked_out = tp->packets_out - holes; | ||
1747 | tcp_update_reordering(sk, tp->packets_out + addend, 0); | ||
1748 | } | ||
1749 | } | ||
1750 | |||
1751 | /* Emulate SACKs for SACKless connection: account for a new dupack. */ | ||
1752 | |||
1753 | static void tcp_add_reno_sack(struct sock *sk) | ||
1754 | { | ||
1755 | struct tcp_sock *tp = tcp_sk(sk); | ||
1756 | tp->sacked_out++; | ||
1757 | tcp_check_reno_reordering(sk, 0); | ||
1758 | tcp_sync_left_out(tp); | ||
1759 | } | ||
1760 | |||
1761 | /* Account for ACK, ACKing some data in Reno Recovery phase. */ | ||
1762 | |||
1763 | static void tcp_remove_reno_sacks(struct sock *sk, int acked) | ||
1764 | { | ||
1765 | struct tcp_sock *tp = tcp_sk(sk); | ||
1766 | |||
1767 | if (acked > 0) { | ||
1768 | /* One ACK acked hole. The rest eat duplicate ACKs. */ | ||
1769 | if (acked-1 >= tp->sacked_out) | ||
1770 | tp->sacked_out = 0; | ||
1771 | else | ||
1772 | tp->sacked_out -= acked-1; | ||
1773 | } | ||
1774 | tcp_check_reno_reordering(sk, acked); | ||
1775 | tcp_sync_left_out(tp); | ||
1776 | } | ||
1777 | |||
1778 | static inline void tcp_reset_reno_sack(struct tcp_sock *tp) | ||
1779 | { | ||
1780 | tp->sacked_out = 0; | ||
1781 | tp->left_out = tp->lost_out; | ||
1782 | } | ||
1783 | |||
1784 | /* RFC: This is from the original, I doubt that this is necessary at all: | 1780 | /* RFC: This is from the original, I doubt that this is necessary at all: |
1785 | * clear xmit_retrans hint if seq of this skb is beyond hint. How could we | 1781 | * clear xmit_retrans hint if seq of this skb is beyond hint. How could we |
1786 | * retransmitted past LOST markings in the first place? I'm not fully sure | 1782 | * retransmitted past LOST markings in the first place? I'm not fully sure |