diff options
author | Yuchung Cheng <ycheng@google.com> | 2013-05-29 10:20:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-05-30 21:06:11 -0400 |
commit | 6a63df46a7363833a0dc0c431027f522b3487972 (patch) | |
tree | c3d2b0919c7ee440bf3d9ad7e67e6485767d7539 /net/ipv4/tcp_input.c | |
parent | 6804973ffb4288bba14d53223e2fbb2bbd1d2e1b (diff) |
tcp: refactor undo functions
Refactor and relocate various functions or variables to prepare the
undo fix. Remove some unused function arguments. Rename tcp_undo_cwr
to tcp_undo_cwnd_reduction to be consistent with the rest of
CWR related function names.
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Acked-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r-- | net/ipv4/tcp_input.c | 97 |
1 files changed, 50 insertions, 47 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 86b5fa72ff9e..fcb668d1860d 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2243,10 +2243,23 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2243 | #define DBGUNDO(x...) do { } while (0) | 2243 | #define DBGUNDO(x...) do { } while (0) |
2244 | #endif | 2244 | #endif |
2245 | 2245 | ||
2246 | static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) | 2246 | static void tcp_undo_cwnd_reduction(struct sock *sk, const bool undo_ssthresh, |
2247 | bool unmark_loss) | ||
2247 | { | 2248 | { |
2248 | struct tcp_sock *tp = tcp_sk(sk); | 2249 | struct tcp_sock *tp = tcp_sk(sk); |
2249 | 2250 | ||
2251 | if (unmark_loss) { | ||
2252 | struct sk_buff *skb; | ||
2253 | |||
2254 | tcp_for_write_queue(skb, sk) { | ||
2255 | if (skb == tcp_send_head(sk)) | ||
2256 | break; | ||
2257 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; | ||
2258 | } | ||
2259 | tp->lost_out = 0; | ||
2260 | tcp_clear_all_retrans_hints(tp); | ||
2261 | } | ||
2262 | |||
2250 | if (tp->prior_ssthresh) { | 2263 | if (tp->prior_ssthresh) { |
2251 | const struct inet_connection_sock *icsk = inet_csk(sk); | 2264 | const struct inet_connection_sock *icsk = inet_csk(sk); |
2252 | 2265 | ||
@@ -2263,6 +2276,9 @@ static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) | |||
2263 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); | 2276 | tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh); |
2264 | } | 2277 | } |
2265 | tp->snd_cwnd_stamp = tcp_time_stamp; | 2278 | tp->snd_cwnd_stamp = tcp_time_stamp; |
2279 | |||
2280 | if (undo_ssthresh) | ||
2281 | tp->undo_marker = 0; | ||
2266 | } | 2282 | } |
2267 | 2283 | ||
2268 | static inline bool tcp_may_undo(const struct tcp_sock *tp) | 2284 | static inline bool tcp_may_undo(const struct tcp_sock *tp) |
@@ -2282,14 +2298,13 @@ static bool tcp_try_undo_recovery(struct sock *sk) | |||
2282 | * or our original transmission succeeded. | 2298 | * or our original transmission succeeded. |
2283 | */ | 2299 | */ |
2284 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 2300 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
2285 | tcp_undo_cwr(sk, true); | 2301 | tcp_undo_cwnd_reduction(sk, true, false); |
2286 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 2302 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
2287 | mib_idx = LINUX_MIB_TCPLOSSUNDO; | 2303 | mib_idx = LINUX_MIB_TCPLOSSUNDO; |
2288 | else | 2304 | else |
2289 | mib_idx = LINUX_MIB_TCPFULLUNDO; | 2305 | mib_idx = LINUX_MIB_TCPFULLUNDO; |
2290 | 2306 | ||
2291 | NET_INC_STATS_BH(sock_net(sk), mib_idx); | 2307 | NET_INC_STATS_BH(sock_net(sk), mib_idx); |
2292 | tp->undo_marker = 0; | ||
2293 | } | 2308 | } |
2294 | if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { | 2309 | if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { |
2295 | /* Hold old state until something *above* high_seq | 2310 | /* Hold old state until something *above* high_seq |
@@ -2309,8 +2324,7 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2309 | 2324 | ||
2310 | if (tp->undo_marker && !tp->undo_retrans) { | 2325 | if (tp->undo_marker && !tp->undo_retrans) { |
2311 | DBGUNDO(sk, "D-SACK"); | 2326 | DBGUNDO(sk, "D-SACK"); |
2312 | tcp_undo_cwr(sk, true); | 2327 | tcp_undo_cwnd_reduction(sk, true, false); |
2313 | tp->undo_marker = 0; | ||
2314 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); | 2328 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); |
2315 | } | 2329 | } |
2316 | } | 2330 | } |
@@ -2344,60 +2358,20 @@ static bool tcp_any_retrans_done(const struct sock *sk) | |||
2344 | return false; | 2358 | return false; |
2345 | } | 2359 | } |
2346 | 2360 | ||
2347 | /* Undo during fast recovery after partial ACK. */ | ||
2348 | |||
2349 | static int tcp_try_undo_partial(struct sock *sk, int acked) | ||
2350 | { | ||
2351 | struct tcp_sock *tp = tcp_sk(sk); | ||
2352 | /* Partial ACK arrived. Force Hoe's retransmit. */ | ||
2353 | int failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); | ||
2354 | |||
2355 | if (tcp_may_undo(tp)) { | ||
2356 | /* Plain luck! Hole if filled with delayed | ||
2357 | * packet, rather than with a retransmit. | ||
2358 | */ | ||
2359 | if (!tcp_any_retrans_done(sk)) | ||
2360 | tp->retrans_stamp = 0; | ||
2361 | |||
2362 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | ||
2363 | |||
2364 | DBGUNDO(sk, "Hoe"); | ||
2365 | tcp_undo_cwr(sk, false); | ||
2366 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); | ||
2367 | |||
2368 | /* So... Do not make Hoe's retransmit yet. | ||
2369 | * If the first packet was delayed, the rest | ||
2370 | * ones are most probably delayed as well. | ||
2371 | */ | ||
2372 | failed = 0; | ||
2373 | } | ||
2374 | return failed; | ||
2375 | } | ||
2376 | |||
2377 | /* Undo during loss recovery after partial ACK or using F-RTO. */ | 2361 | /* Undo during loss recovery after partial ACK or using F-RTO. */ |
2378 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) | 2362 | static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) |
2379 | { | 2363 | { |
2380 | struct tcp_sock *tp = tcp_sk(sk); | 2364 | struct tcp_sock *tp = tcp_sk(sk); |
2381 | 2365 | ||
2382 | if (frto_undo || tcp_may_undo(tp)) { | 2366 | if (frto_undo || tcp_may_undo(tp)) { |
2383 | struct sk_buff *skb; | 2367 | tcp_undo_cwnd_reduction(sk, true, true); |
2384 | tcp_for_write_queue(skb, sk) { | ||
2385 | if (skb == tcp_send_head(sk)) | ||
2386 | break; | ||
2387 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; | ||
2388 | } | ||
2389 | |||
2390 | tcp_clear_all_retrans_hints(tp); | ||
2391 | 2368 | ||
2392 | DBGUNDO(sk, "partial loss"); | 2369 | DBGUNDO(sk, "partial loss"); |
2393 | tp->lost_out = 0; | ||
2394 | tcp_undo_cwr(sk, true); | ||
2395 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); | 2370 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); |
2396 | if (frto_undo) | 2371 | if (frto_undo) |
2397 | NET_INC_STATS_BH(sock_net(sk), | 2372 | NET_INC_STATS_BH(sock_net(sk), |
2398 | LINUX_MIB_TCPSPURIOUSRTOS); | 2373 | LINUX_MIB_TCPSPURIOUSRTOS); |
2399 | inet_csk(sk)->icsk_retransmits = 0; | 2374 | inet_csk(sk)->icsk_retransmits = 0; |
2400 | tp->undo_marker = 0; | ||
2401 | if (frto_undo || tcp_is_sack(tp)) | 2375 | if (frto_undo || tcp_is_sack(tp)) |
2402 | tcp_set_ca_state(sk, TCP_CA_Open); | 2376 | tcp_set_ca_state(sk, TCP_CA_Open); |
2403 | return true; | 2377 | return true; |
@@ -2669,6 +2643,35 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack) | |||
2669 | tcp_xmit_retransmit_queue(sk); | 2643 | tcp_xmit_retransmit_queue(sk); |
2670 | } | 2644 | } |
2671 | 2645 | ||
2646 | /* Undo during fast recovery after partial ACK. */ | ||
2647 | static bool tcp_try_undo_partial(struct sock *sk, int acked) | ||
2648 | { | ||
2649 | struct tcp_sock *tp = tcp_sk(sk); | ||
2650 | /* Partial ACK arrived. Force Hoe's retransmit. */ | ||
2651 | bool failed = tcp_is_reno(tp) || (tcp_fackets_out(tp) > tp->reordering); | ||
2652 | |||
2653 | if (tcp_may_undo(tp)) { | ||
2654 | /* Plain luck! Hole if filled with delayed | ||
2655 | * packet, rather than with a retransmit. | ||
2656 | */ | ||
2657 | if (!tcp_any_retrans_done(sk)) | ||
2658 | tp->retrans_stamp = 0; | ||
2659 | |||
2660 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | ||
2661 | |||
2662 | DBGUNDO(sk, "Hoe"); | ||
2663 | tcp_undo_cwnd_reduction(sk, false, false); | ||
2664 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); | ||
2665 | |||
2666 | /* So... Do not make Hoe's retransmit yet. | ||
2667 | * If the first packet was delayed, the rest | ||
2668 | * ones are most probably delayed as well. | ||
2669 | */ | ||
2670 | failed = false; | ||
2671 | } | ||
2672 | return failed; | ||
2673 | } | ||
2674 | |||
2672 | /* Process an event, which can update packets-in-flight not trivially. | 2675 | /* Process an event, which can update packets-in-flight not trivially. |
2673 | * Main goal of this function is to calculate new estimate for left_out, | 2676 | * Main goal of this function is to calculate new estimate for left_out, |
2674 | * taking into account both packets sitting in receiver's buffer and | 2677 | * taking into account both packets sitting in receiver's buffer and |
@@ -2686,7 +2689,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked, | |||
2686 | { | 2689 | { |
2687 | struct inet_connection_sock *icsk = inet_csk(sk); | 2690 | struct inet_connection_sock *icsk = inet_csk(sk); |
2688 | struct tcp_sock *tp = tcp_sk(sk); | 2691 | struct tcp_sock *tp = tcp_sk(sk); |
2689 | int do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && | 2692 | bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && |
2690 | (tcp_fackets_out(tp) > tp->reordering)); | 2693 | (tcp_fackets_out(tp) > tp->reordering)); |
2691 | int fast_rexmit = 0; | 2694 | int fast_rexmit = 0; |
2692 | 2695 | ||