diff options
author | Ilpo Järvinen <ilpo.jarvinen@helsinki.fi> | 2007-08-09 08:14:46 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-10 19:48:00 -0400 |
commit | e60402d0a909ca2e6e2fbdf9ed004ef0fae36d33 (patch) | |
tree | b40a00b801cf44f81bebd7dadddb19c0086b145c | |
parent | b9c4595bc4947faa236a849324fe3492e388d949 (diff) |
[TCP]: Move sack_ok access to obviously named funcs & cleanup
Previously code had IsReno/IsFack defined as macros that were
local to tcp_input.c though sack_ok field has user elsewhere too
for the same purpose. This changes them to static inlines as
preferred according the current coding style and unifies the
access to sack_ok across multiple files. Magic bitops of sack_ok
for FACK and DSACK are also abstracted to functions with
appropriate names.
Note:
- One sack_ok = 1 remains but that's self explanary, i.e., it
enables sack
- Couple of !IsReno cases are changed to tcp_is_sack
- There were no users for IsDSack => I dropped it
Signed-off-by: Ilpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | include/net/tcp.h | 28 | ||||
-rw-r--r-- | net/ipv4/tcp.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_input.c | 82 | ||||
-rw-r--r-- | net/ipv4/tcp_minisocks.c | 2 | ||||
-rw-r--r-- | net/ipv4/tcp_output.c | 6 | ||||
-rw-r--r-- | net/ipv4/tcp_timer.c | 2 |
6 files changed, 80 insertions, 42 deletions
diff --git a/include/net/tcp.h b/include/net/tcp.h index b92bdc7c92a9..0a4ed6e85c6f 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -719,6 +719,34 @@ static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event) | |||
719 | icsk->icsk_ca_ops->cwnd_event(sk, event); | 719 | icsk->icsk_ca_ops->cwnd_event(sk, event); |
720 | } | 720 | } |
721 | 721 | ||
722 | /* These functions determine how the current flow behaves in respect of SACK | ||
723 | * handling. SACK is negotiated with the peer, and therefore it can vary | ||
724 | * between different flows. | ||
725 | * | ||
726 | * tcp_is_sack - SACK enabled | ||
727 | * tcp_is_reno - No SACK | ||
728 | * tcp_is_fack - FACK enabled, implies SACK enabled | ||
729 | */ | ||
730 | static inline int tcp_is_sack(const struct tcp_sock *tp) | ||
731 | { | ||
732 | return tp->rx_opt.sack_ok; | ||
733 | } | ||
734 | |||
735 | static inline int tcp_is_reno(const struct tcp_sock *tp) | ||
736 | { | ||
737 | return !tcp_is_sack(tp); | ||
738 | } | ||
739 | |||
740 | static inline int tcp_is_fack(const struct tcp_sock *tp) | ||
741 | { | ||
742 | return tp->rx_opt.sack_ok & 2; | ||
743 | } | ||
744 | |||
745 | static inline void tcp_enable_fack(struct tcp_sock *tp) | ||
746 | { | ||
747 | tp->rx_opt.sack_ok |= 2; | ||
748 | } | ||
749 | |||
722 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) | 750 | static inline unsigned int tcp_left_out(const struct tcp_sock *tp) |
723 | { | 751 | { |
724 | return tp->sacked_out + tp->lost_out; | 752 | return tp->sacked_out + tp->lost_out; |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 7e740112b238..aff31427f525 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
@@ -2014,7 +2014,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) | |||
2014 | 2014 | ||
2015 | if (tp->rx_opt.tstamp_ok) | 2015 | if (tp->rx_opt.tstamp_ok) |
2016 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | 2016 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; |
2017 | if (tp->rx_opt.sack_ok) | 2017 | if (tcp_is_sack(tp)) |
2018 | info->tcpi_options |= TCPI_OPT_SACK; | 2018 | info->tcpi_options |= TCPI_OPT_SACK; |
2019 | if (tp->rx_opt.wscale_ok) { | 2019 | if (tp->rx_opt.wscale_ok) { |
2020 | info->tcpi_options |= TCPI_OPT_WSCALE; | 2020 | info->tcpi_options |= TCPI_OPT_WSCALE; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index f8af018dd224..faba9beb3613 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -111,10 +111,6 @@ int sysctl_tcp_abc __read_mostly; | |||
111 | #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) | 111 | #define FLAG_FORWARD_PROGRESS (FLAG_ACKED|FLAG_DATA_SACKED) |
112 | #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) | 112 | #define FLAG_ANY_PROGRESS (FLAG_FORWARD_PROGRESS|FLAG_SND_UNA_ADVANCED) |
113 | 113 | ||
114 | #define IsReno(tp) ((tp)->rx_opt.sack_ok == 0) | ||
115 | #define IsFack(tp) ((tp)->rx_opt.sack_ok & 2) | ||
116 | #define IsDSack(tp) ((tp)->rx_opt.sack_ok & 4) | ||
117 | |||
118 | #define IsSackFrto() (sysctl_tcp_frto == 0x2) | 114 | #define IsSackFrto() (sysctl_tcp_frto == 0x2) |
119 | 115 | ||
120 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) | 116 | #define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH) |
@@ -860,6 +856,21 @@ void tcp_enter_cwr(struct sock *sk, const int set_ssthresh) | |||
860 | } | 856 | } |
861 | } | 857 | } |
862 | 858 | ||
859 | /* | ||
860 | * Packet counting of FACK is based on in-order assumptions, therefore TCP | ||
861 | * disables it when reordering is detected | ||
862 | */ | ||
863 | static void tcp_disable_fack(struct tcp_sock *tp) | ||
864 | { | ||
865 | tp->rx_opt.sack_ok &= ~2; | ||
866 | } | ||
867 | |||
868 | /* Take a notice that peer is sending DSACKs */ | ||
869 | static void tcp_dsack_seen(struct tcp_sock *tp) | ||
870 | { | ||
871 | tp->rx_opt.sack_ok |= 4; | ||
872 | } | ||
873 | |||
863 | /* Initialize metrics on socket. */ | 874 | /* Initialize metrics on socket. */ |
864 | 875 | ||
865 | static void tcp_init_metrics(struct sock *sk) | 876 | static void tcp_init_metrics(struct sock *sk) |
@@ -881,7 +892,7 @@ static void tcp_init_metrics(struct sock *sk) | |||
881 | } | 892 | } |
882 | if (dst_metric(dst, RTAX_REORDERING) && | 893 | if (dst_metric(dst, RTAX_REORDERING) && |
883 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { | 894 | tp->reordering != dst_metric(dst, RTAX_REORDERING)) { |
884 | tp->rx_opt.sack_ok &= ~2; | 895 | tcp_disable_fack(tp); |
885 | tp->reordering = dst_metric(dst, RTAX_REORDERING); | 896 | tp->reordering = dst_metric(dst, RTAX_REORDERING); |
886 | } | 897 | } |
887 | 898 | ||
@@ -943,9 +954,9 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
943 | /* This exciting event is worth to be remembered. 8) */ | 954 | /* This exciting event is worth to be remembered. 8) */ |
944 | if (ts) | 955 | if (ts) |
945 | NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); | 956 | NET_INC_STATS_BH(LINUX_MIB_TCPTSREORDER); |
946 | else if (IsReno(tp)) | 957 | else if (tcp_is_reno(tp)) |
947 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); | 958 | NET_INC_STATS_BH(LINUX_MIB_TCPRENOREORDER); |
948 | else if (IsFack(tp)) | 959 | else if (tcp_is_fack(tp)) |
949 | NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); | 960 | NET_INC_STATS_BH(LINUX_MIB_TCPFACKREORDER); |
950 | else | 961 | else |
951 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); | 962 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKREORDER); |
@@ -957,8 +968,7 @@ static void tcp_update_reordering(struct sock *sk, const int metric, | |||
957 | tp->sacked_out, | 968 | tp->sacked_out, |
958 | tp->undo_marker ? tp->undo_retrans : 0); | 969 | tp->undo_marker ? tp->undo_retrans : 0); |
959 | #endif | 970 | #endif |
960 | /* Disable FACK yet. */ | 971 | tcp_disable_fack(tp); |
961 | tp->rx_opt.sack_ok &= ~2; | ||
962 | } | 972 | } |
963 | } | 973 | } |
964 | 974 | ||
@@ -1020,7 +1030,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1020 | 1030 | ||
1021 | if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { | 1031 | if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { |
1022 | dup_sack = 1; | 1032 | dup_sack = 1; |
1023 | tp->rx_opt.sack_ok |= 4; | 1033 | tcp_dsack_seen(tp); |
1024 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); | 1034 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKRECV); |
1025 | } else if (num_sacks > 1) { | 1035 | } else if (num_sacks > 1) { |
1026 | u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq)); | 1036 | u32 end_seq_1 = ntohl(get_unaligned(&sp[1].end_seq)); |
@@ -1029,7 +1039,7 @@ static int tcp_check_dsack(struct tcp_sock *tp, struct sk_buff *ack_skb, | |||
1029 | if (!after(end_seq_0, end_seq_1) && | 1039 | if (!after(end_seq_0, end_seq_1) && |
1030 | !before(start_seq_0, start_seq_1)) { | 1040 | !before(start_seq_0, start_seq_1)) { |
1031 | dup_sack = 1; | 1041 | dup_sack = 1; |
1032 | tp->rx_opt.sack_ok |= 4; | 1042 | tcp_dsack_seen(tp); |
1033 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); | 1043 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOFORECV); |
1034 | } | 1044 | } |
1035 | } | 1045 | } |
@@ -1326,7 +1336,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_ | |||
1326 | continue; | 1336 | continue; |
1327 | if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) && | 1337 | if ((TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) && |
1328 | after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) && | 1338 | after(lost_retrans, TCP_SKB_CB(skb)->ack_seq) && |
1329 | (IsFack(tp) || | 1339 | (tcp_is_fack(tp) || |
1330 | !before(lost_retrans, | 1340 | !before(lost_retrans, |
1331 | TCP_SKB_CB(skb)->ack_seq + tp->reordering * | 1341 | TCP_SKB_CB(skb)->ack_seq + tp->reordering * |
1332 | tp->mss_cache))) { | 1342 | tp->mss_cache))) { |
@@ -1526,7 +1536,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag) | |||
1526 | 1536 | ||
1527 | tp->lost_out = 0; | 1537 | tp->lost_out = 0; |
1528 | tp->retrans_out = 0; | 1538 | tp->retrans_out = 0; |
1529 | if (IsReno(tp)) | 1539 | if (tcp_is_reno(tp)) |
1530 | tcp_reset_reno_sack(tp); | 1540 | tcp_reset_reno_sack(tp); |
1531 | 1541 | ||
1532 | tcp_for_write_queue(skb, sk) { | 1542 | tcp_for_write_queue(skb, sk) { |
@@ -1668,7 +1678,7 @@ static int tcp_check_sack_reneging(struct sock *sk) | |||
1668 | 1678 | ||
1669 | static inline int tcp_fackets_out(struct tcp_sock *tp) | 1679 | static inline int tcp_fackets_out(struct tcp_sock *tp) |
1670 | { | 1680 | { |
1671 | return IsReno(tp) ? tp->sacked_out+1 : tp->fackets_out; | 1681 | return tcp_is_reno(tp) ? tp->sacked_out+1 : tp->fackets_out; |
1672 | } | 1682 | } |
1673 | 1683 | ||
1674 | static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) | 1684 | static inline int tcp_skb_timedout(struct sock *sk, struct sk_buff *skb) |
@@ -1872,7 +1882,7 @@ static void tcp_update_scoreboard(struct sock *sk) | |||
1872 | { | 1882 | { |
1873 | struct tcp_sock *tp = tcp_sk(sk); | 1883 | struct tcp_sock *tp = tcp_sk(sk); |
1874 | 1884 | ||
1875 | if (IsFack(tp)) { | 1885 | if (tcp_is_fack(tp)) { |
1876 | int lost = tp->fackets_out - tp->reordering; | 1886 | int lost = tp->fackets_out - tp->reordering; |
1877 | if (lost <= 0) | 1887 | if (lost <= 0) |
1878 | lost = 1; | 1888 | lost = 1; |
@@ -1886,7 +1896,7 @@ static void tcp_update_scoreboard(struct sock *sk) | |||
1886 | * Hence, we can detect timed out packets during fast | 1896 | * Hence, we can detect timed out packets during fast |
1887 | * retransmit without falling to slow start. | 1897 | * retransmit without falling to slow start. |
1888 | */ | 1898 | */ |
1889 | if (!IsReno(tp) && tcp_head_timedout(sk)) { | 1899 | if (!tcp_is_reno(tp) && tcp_head_timedout(sk)) { |
1890 | struct sk_buff *skb; | 1900 | struct sk_buff *skb; |
1891 | 1901 | ||
1892 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint | 1902 | skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint |
@@ -1938,7 +1948,7 @@ static void tcp_cwnd_down(struct sock *sk, int flag) | |||
1938 | int decr = tp->snd_cwnd_cnt + 1; | 1948 | int decr = tp->snd_cwnd_cnt + 1; |
1939 | 1949 | ||
1940 | if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || | 1950 | if ((flag&(FLAG_ANY_PROGRESS|FLAG_DSACKING_ACK)) || |
1941 | (IsReno(tp) && !(flag&FLAG_NOT_DUP))) { | 1951 | (tcp_is_reno(tp) && !(flag&FLAG_NOT_DUP))) { |
1942 | tp->snd_cwnd_cnt = decr&1; | 1952 | tp->snd_cwnd_cnt = decr&1; |
1943 | decr >>= 1; | 1953 | decr >>= 1; |
1944 | 1954 | ||
@@ -2029,7 +2039,7 @@ static int tcp_try_undo_recovery(struct sock *sk) | |||
2029 | NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); | 2039 | NET_INC_STATS_BH(LINUX_MIB_TCPFULLUNDO); |
2030 | tp->undo_marker = 0; | 2040 | tp->undo_marker = 0; |
2031 | } | 2041 | } |
2032 | if (tp->snd_una == tp->high_seq && IsReno(tp)) { | 2042 | if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { |
2033 | /* Hold old state until something *above* high_seq | 2043 | /* Hold old state until something *above* high_seq |
2034 | * is ACKed. For Reno it is MUST to prevent false | 2044 | * is ACKed. For Reno it is MUST to prevent false |
2035 | * fast retransmits (RFC2582). SACK TCP is safe. */ | 2045 | * fast retransmits (RFC2582). SACK TCP is safe. */ |
@@ -2059,7 +2069,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2059 | { | 2069 | { |
2060 | struct tcp_sock *tp = tcp_sk(sk); | 2070 | struct tcp_sock *tp = tcp_sk(sk); |
2061 | /* Partial ACK arrived. Force Hoe's retransmit. */ | 2071 | /* Partial ACK arrived. Force Hoe's retransmit. */ |
2062 | int failed = IsReno(tp) || tp->fackets_out>tp->reordering; | 2072 | int failed = tcp_is_reno(tp) || tp->fackets_out>tp->reordering; |
2063 | 2073 | ||
2064 | if (tcp_may_undo(tp)) { | 2074 | if (tcp_may_undo(tp)) { |
2065 | /* Plain luck! Hole if filled with delayed | 2075 | /* Plain luck! Hole if filled with delayed |
@@ -2104,7 +2114,7 @@ static int tcp_try_undo_loss(struct sock *sk) | |||
2104 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); | 2114 | NET_INC_STATS_BH(LINUX_MIB_TCPLOSSUNDO); |
2105 | inet_csk(sk)->icsk_retransmits = 0; | 2115 | inet_csk(sk)->icsk_retransmits = 0; |
2106 | tp->undo_marker = 0; | 2116 | tp->undo_marker = 0; |
2107 | if (!IsReno(tp)) | 2117 | if (tcp_is_sack(tp)) |
2108 | tcp_set_ca_state(sk, TCP_CA_Open); | 2118 | tcp_set_ca_state(sk, TCP_CA_Open); |
2109 | return 1; | 2119 | return 1; |
2110 | } | 2120 | } |
@@ -2251,14 +2261,14 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2251 | if (!tp->undo_marker || | 2261 | if (!tp->undo_marker || |
2252 | /* For SACK case do not Open to allow to undo | 2262 | /* For SACK case do not Open to allow to undo |
2253 | * catching for all duplicate ACKs. */ | 2263 | * catching for all duplicate ACKs. */ |
2254 | IsReno(tp) || tp->snd_una != tp->high_seq) { | 2264 | tcp_is_reno(tp) || tp->snd_una != tp->high_seq) { |
2255 | tp->undo_marker = 0; | 2265 | tp->undo_marker = 0; |
2256 | tcp_set_ca_state(sk, TCP_CA_Open); | 2266 | tcp_set_ca_state(sk, TCP_CA_Open); |
2257 | } | 2267 | } |
2258 | break; | 2268 | break; |
2259 | 2269 | ||
2260 | case TCP_CA_Recovery: | 2270 | case TCP_CA_Recovery: |
2261 | if (IsReno(tp)) | 2271 | if (tcp_is_reno(tp)) |
2262 | tcp_reset_reno_sack(tp); | 2272 | tcp_reset_reno_sack(tp); |
2263 | if (tcp_try_undo_recovery(sk)) | 2273 | if (tcp_try_undo_recovery(sk)) |
2264 | return; | 2274 | return; |
@@ -2271,7 +2281,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2271 | switch (icsk->icsk_ca_state) { | 2281 | switch (icsk->icsk_ca_state) { |
2272 | case TCP_CA_Recovery: | 2282 | case TCP_CA_Recovery: |
2273 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { | 2283 | if (!(flag & FLAG_SND_UNA_ADVANCED)) { |
2274 | if (IsReno(tp) && is_dupack) | 2284 | if (tcp_is_reno(tp) && is_dupack) |
2275 | tcp_add_reno_sack(sk); | 2285 | tcp_add_reno_sack(sk); |
2276 | } else | 2286 | } else |
2277 | do_lost = tcp_try_undo_partial(sk, pkts_acked); | 2287 | do_lost = tcp_try_undo_partial(sk, pkts_acked); |
@@ -2288,7 +2298,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2288 | return; | 2298 | return; |
2289 | /* Loss is undone; fall through to processing in Open state. */ | 2299 | /* Loss is undone; fall through to processing in Open state. */ |
2290 | default: | 2300 | default: |
2291 | if (IsReno(tp)) { | 2301 | if (tcp_is_reno(tp)) { |
2292 | if (flag & FLAG_SND_UNA_ADVANCED) | 2302 | if (flag & FLAG_SND_UNA_ADVANCED) |
2293 | tcp_reset_reno_sack(tp); | 2303 | tcp_reset_reno_sack(tp); |
2294 | if (is_dupack) | 2304 | if (is_dupack) |
@@ -2316,7 +2326,7 @@ tcp_fastretrans_alert(struct sock *sk, int pkts_acked, int flag) | |||
2316 | 2326 | ||
2317 | /* Otherwise enter Recovery state */ | 2327 | /* Otherwise enter Recovery state */ |
2318 | 2328 | ||
2319 | if (IsReno(tp)) | 2329 | if (tcp_is_reno(tp)) |
2320 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); | 2330 | NET_INC_STATS_BH(LINUX_MIB_TCPRENORECOVERY); |
2321 | else | 2331 | else |
2322 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); | 2332 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERY); |
@@ -2573,7 +2583,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2573 | tcp_ack_update_rtt(sk, acked, seq_rtt); | 2583 | tcp_ack_update_rtt(sk, acked, seq_rtt); |
2574 | tcp_ack_packets_out(sk); | 2584 | tcp_ack_packets_out(sk); |
2575 | 2585 | ||
2576 | if (IsReno(tp)) | 2586 | if (tcp_is_reno(tp)) |
2577 | tcp_remove_reno_sacks(sk, pkts_acked); | 2587 | tcp_remove_reno_sacks(sk, pkts_acked); |
2578 | 2588 | ||
2579 | if (ca_ops->pkts_acked) { | 2589 | if (ca_ops->pkts_acked) { |
@@ -2599,7 +2609,7 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p) | |||
2599 | BUG_TRAP((int)tp->sacked_out >= 0); | 2609 | BUG_TRAP((int)tp->sacked_out >= 0); |
2600 | BUG_TRAP((int)tp->lost_out >= 0); | 2610 | BUG_TRAP((int)tp->lost_out >= 0); |
2601 | BUG_TRAP((int)tp->retrans_out >= 0); | 2611 | BUG_TRAP((int)tp->retrans_out >= 0); |
2602 | if (!tp->packets_out && tp->rx_opt.sack_ok) { | 2612 | if (!tp->packets_out && tcp_is_sack(tp)) { |
2603 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2613 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2604 | if (tp->lost_out) { | 2614 | if (tp->lost_out) { |
2605 | printk(KERN_DEBUG "Leak l=%u %d\n", | 2615 | printk(KERN_DEBUG "Leak l=%u %d\n", |
@@ -2779,7 +2789,7 @@ static int tcp_process_frto(struct sock *sk, int flag) | |||
2779 | return 1; | 2789 | return 1; |
2780 | } | 2790 | } |
2781 | 2791 | ||
2782 | if (!IsSackFrto() || IsReno(tp)) { | 2792 | if (!IsSackFrto() || tcp_is_reno(tp)) { |
2783 | /* RFC4138 shortcoming in step 2; should also have case c): | 2793 | /* RFC4138 shortcoming in step 2; should also have case c): |
2784 | * ACK isn't duplicate nor advances window, e.g., opposite dir | 2794 | * ACK isn't duplicate nor advances window, e.g., opposite dir |
2785 | * data, winupdate | 2795 | * data, winupdate |
@@ -3263,7 +3273,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) | |||
3263 | * Probably, we should reset in this case. For now drop them. | 3273 | * Probably, we should reset in this case. For now drop them. |
3264 | */ | 3274 | */ |
3265 | __skb_queue_purge(&tp->out_of_order_queue); | 3275 | __skb_queue_purge(&tp->out_of_order_queue); |
3266 | if (tp->rx_opt.sack_ok) | 3276 | if (tcp_is_sack(tp)) |
3267 | tcp_sack_reset(&tp->rx_opt); | 3277 | tcp_sack_reset(&tp->rx_opt); |
3268 | sk_stream_mem_reclaim(sk); | 3278 | sk_stream_mem_reclaim(sk); |
3269 | 3279 | ||
@@ -3293,7 +3303,7 @@ static inline int tcp_sack_extend(struct tcp_sack_block *sp, u32 seq, u32 end_se | |||
3293 | 3303 | ||
3294 | static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) | 3304 | static void tcp_dsack_set(struct tcp_sock *tp, u32 seq, u32 end_seq) |
3295 | { | 3305 | { |
3296 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { | 3306 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
3297 | if (before(seq, tp->rcv_nxt)) | 3307 | if (before(seq, tp->rcv_nxt)) |
3298 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); | 3308 | NET_INC_STATS_BH(LINUX_MIB_TCPDSACKOLDSENT); |
3299 | else | 3309 | else |
@@ -3323,7 +3333,7 @@ static void tcp_send_dupack(struct sock *sk, struct sk_buff *skb) | |||
3323 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); | 3333 | NET_INC_STATS_BH(LINUX_MIB_DELAYEDACKLOST); |
3324 | tcp_enter_quickack_mode(sk); | 3334 | tcp_enter_quickack_mode(sk); |
3325 | 3335 | ||
3326 | if (tp->rx_opt.sack_ok && sysctl_tcp_dsack) { | 3336 | if (tcp_is_sack(tp) && sysctl_tcp_dsack) { |
3327 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; | 3337 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
3328 | 3338 | ||
3329 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) | 3339 | if (after(TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt)) |
@@ -3639,7 +3649,7 @@ drop: | |||
3639 | 3649 | ||
3640 | if (!skb_peek(&tp->out_of_order_queue)) { | 3650 | if (!skb_peek(&tp->out_of_order_queue)) { |
3641 | /* Initial out of order segment, build 1 SACK. */ | 3651 | /* Initial out of order segment, build 1 SACK. */ |
3642 | if (tp->rx_opt.sack_ok) { | 3652 | if (tcp_is_sack(tp)) { |
3643 | tp->rx_opt.num_sacks = 1; | 3653 | tp->rx_opt.num_sacks = 1; |
3644 | tp->rx_opt.dsack = 0; | 3654 | tp->rx_opt.dsack = 0; |
3645 | tp->rx_opt.eff_sacks = 1; | 3655 | tp->rx_opt.eff_sacks = 1; |
@@ -3704,7 +3714,7 @@ drop: | |||
3704 | } | 3714 | } |
3705 | 3715 | ||
3706 | add_sack: | 3716 | add_sack: |
3707 | if (tp->rx_opt.sack_ok) | 3717 | if (tcp_is_sack(tp)) |
3708 | tcp_sack_new_ofo_skb(sk, seq, end_seq); | 3718 | tcp_sack_new_ofo_skb(sk, seq, end_seq); |
3709 | } | 3719 | } |
3710 | } | 3720 | } |
@@ -3893,7 +3903,7 @@ static int tcp_prune_queue(struct sock *sk) | |||
3893 | * is in a sad state like this, we care only about integrity | 3903 | * is in a sad state like this, we care only about integrity |
3894 | * of the connection not performance. | 3904 | * of the connection not performance. |
3895 | */ | 3905 | */ |
3896 | if (tp->rx_opt.sack_ok) | 3906 | if (tcp_is_sack(tp)) |
3897 | tcp_sack_reset(&tp->rx_opt); | 3907 | tcp_sack_reset(&tp->rx_opt); |
3898 | sk_stream_mem_reclaim(sk); | 3908 | sk_stream_mem_reclaim(sk); |
3899 | } | 3909 | } |
@@ -4594,8 +4604,8 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb, | |||
4594 | tp->tcp_header_len = sizeof(struct tcphdr); | 4604 | tp->tcp_header_len = sizeof(struct tcphdr); |
4595 | } | 4605 | } |
4596 | 4606 | ||
4597 | if (tp->rx_opt.sack_ok && sysctl_tcp_fack) | 4607 | if (tcp_is_sack(tp) && sysctl_tcp_fack) |
4598 | tp->rx_opt.sack_ok |= 2; | 4608 | tcp_enable_fack(tp); |
4599 | 4609 | ||
4600 | tcp_mtup_init(sk); | 4610 | tcp_mtup_init(sk); |
4601 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); | 4611 | tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index fdfe89fe646b..b61b76847ad9 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
@@ -445,7 +445,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, | |||
445 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; | 445 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
446 | if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { | 446 | if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
447 | if (sysctl_tcp_fack) | 447 | if (sysctl_tcp_fack) |
448 | newtp->rx_opt.sack_ok |= 2; | 448 | tcp_enable_fack(newtp); |
449 | } | 449 | } |
450 | newtp->window_clamp = req->window_clamp; | 450 | newtp->window_clamp = req->window_clamp; |
451 | newtp->rcv_ssthresh = req->rcv_wnd; | 451 | newtp->rcv_ssthresh = req->rcv_wnd; |
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index a92fad55cd32..a3679174e78a 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c | |||
@@ -737,7 +737,7 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss | |||
737 | 737 | ||
738 | if (diff > 0) { | 738 | if (diff > 0) { |
739 | /* Adjust Reno SACK estimate. */ | 739 | /* Adjust Reno SACK estimate. */ |
740 | if (!tp->rx_opt.sack_ok) { | 740 | if (tcp_is_reno(tp)) { |
741 | tcp_dec_pcount_approx_int(&tp->sacked_out, diff); | 741 | tcp_dec_pcount_approx_int(&tp->sacked_out, diff); |
742 | tcp_verify_left_out(tp); | 742 | tcp_verify_left_out(tp); |
743 | } | 743 | } |
@@ -1728,7 +1728,7 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m | |||
1728 | if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) | 1728 | if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) |
1729 | tp->lost_out -= tcp_skb_pcount(next_skb); | 1729 | tp->lost_out -= tcp_skb_pcount(next_skb); |
1730 | /* Reno case is special. Sigh... */ | 1730 | /* Reno case is special. Sigh... */ |
1731 | if (!tp->rx_opt.sack_ok && tp->sacked_out) | 1731 | if (tcp_is_reno(tp) && tp->sacked_out) |
1732 | tcp_dec_pcount_approx(&tp->sacked_out, next_skb); | 1732 | tcp_dec_pcount_approx(&tp->sacked_out, next_skb); |
1733 | 1733 | ||
1734 | /* Not quite right: it can be > snd.fack, but | 1734 | /* Not quite right: it can be > snd.fack, but |
@@ -1976,7 +1976,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) | |||
1976 | return; | 1976 | return; |
1977 | 1977 | ||
1978 | /* No forward retransmissions in Reno are possible. */ | 1978 | /* No forward retransmissions in Reno are possible. */ |
1979 | if (!tp->rx_opt.sack_ok) | 1979 | if (tcp_is_reno(tp)) |
1980 | return; | 1980 | return; |
1981 | 1981 | ||
1982 | /* Yeah, we have to make difficult choice between forward transmission | 1982 | /* Yeah, we have to make difficult choice between forward transmission |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index e9b151b3a598..d8970ecfcfc8 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -315,7 +315,7 @@ static void tcp_retransmit_timer(struct sock *sk) | |||
315 | if (icsk->icsk_retransmits == 0) { | 315 | if (icsk->icsk_retransmits == 0) { |
316 | if (icsk->icsk_ca_state == TCP_CA_Disorder || | 316 | if (icsk->icsk_ca_state == TCP_CA_Disorder || |
317 | icsk->icsk_ca_state == TCP_CA_Recovery) { | 317 | icsk->icsk_ca_state == TCP_CA_Recovery) { |
318 | if (tp->rx_opt.sack_ok) { | 318 | if (tcp_is_sack(tp)) { |
319 | if (icsk->icsk_ca_state == TCP_CA_Recovery) | 319 | if (icsk->icsk_ca_state == TCP_CA_Recovery) |
320 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); | 320 | NET_INC_STATS_BH(LINUX_MIB_TCPSACKRECOVERYFAIL); |
321 | else | 321 | else |