diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/tcp_input.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index e72af7e31d1c..bef9f04c22ba 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -2659,7 +2659,7 @@ static void DBGUNDO(struct sock *sk, const char *msg) | |||
2659 | #define DBGUNDO(x...) do { } while (0) | 2659 | #define DBGUNDO(x...) do { } while (0) |
2660 | #endif | 2660 | #endif |
2661 | 2661 | ||
2662 | static void tcp_undo_cwr(struct sock *sk, const int undo_ssthresh) | 2662 | static void tcp_undo_cwr(struct sock *sk, const bool undo_ssthresh) |
2663 | { | 2663 | { |
2664 | struct tcp_sock *tp = tcp_sk(sk); | 2664 | struct tcp_sock *tp = tcp_sk(sk); |
2665 | 2665 | ||
@@ -2698,7 +2698,7 @@ static int tcp_try_undo_recovery(struct sock *sk) | |||
2698 | * or our original transmission succeeded. | 2698 | * or our original transmission succeeded. |
2699 | */ | 2699 | */ |
2700 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); | 2700 | DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); |
2701 | tcp_undo_cwr(sk, 1); | 2701 | tcp_undo_cwr(sk, true); |
2702 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) | 2702 | if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) |
2703 | mib_idx = LINUX_MIB_TCPLOSSUNDO; | 2703 | mib_idx = LINUX_MIB_TCPLOSSUNDO; |
2704 | else | 2704 | else |
@@ -2725,7 +2725,7 @@ static void tcp_try_undo_dsack(struct sock *sk) | |||
2725 | 2725 | ||
2726 | if (tp->undo_marker && !tp->undo_retrans) { | 2726 | if (tp->undo_marker && !tp->undo_retrans) { |
2727 | DBGUNDO(sk, "D-SACK"); | 2727 | DBGUNDO(sk, "D-SACK"); |
2728 | tcp_undo_cwr(sk, 1); | 2728 | tcp_undo_cwr(sk, true); |
2729 | tp->undo_marker = 0; | 2729 | tp->undo_marker = 0; |
2730 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); | 2730 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); |
2731 | } | 2731 | } |
@@ -2778,7 +2778,7 @@ static int tcp_try_undo_partial(struct sock *sk, int acked) | |||
2778 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); | 2778 | tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); |
2779 | 2779 | ||
2780 | DBGUNDO(sk, "Hoe"); | 2780 | DBGUNDO(sk, "Hoe"); |
2781 | tcp_undo_cwr(sk, 0); | 2781 | tcp_undo_cwr(sk, false); |
2782 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); | 2782 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); |
2783 | 2783 | ||
2784 | /* So... Do not make Hoe's retransmit yet. | 2784 | /* So... Do not make Hoe's retransmit yet. |
@@ -2807,7 +2807,7 @@ static int tcp_try_undo_loss(struct sock *sk) | |||
2807 | 2807 | ||
2808 | DBGUNDO(sk, "partial loss"); | 2808 | DBGUNDO(sk, "partial loss"); |
2809 | tp->lost_out = 0; | 2809 | tp->lost_out = 0; |
2810 | tcp_undo_cwr(sk, 1); | 2810 | tcp_undo_cwr(sk, true); |
2811 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); | 2811 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); |
2812 | inet_csk(sk)->icsk_retransmits = 0; | 2812 | inet_csk(sk)->icsk_retransmits = 0; |
2813 | tp->undo_marker = 0; | 2813 | tp->undo_marker = 0; |
@@ -3496,7 +3496,7 @@ static void tcp_undo_spur_to_response(struct sock *sk, int flag) | |||
3496 | if (flag & FLAG_ECE) | 3496 | if (flag & FLAG_ECE) |
3497 | tcp_ratehalving_spur_to_response(sk); | 3497 | tcp_ratehalving_spur_to_response(sk); |
3498 | else | 3498 | else |
3499 | tcp_undo_cwr(sk, 1); | 3499 | tcp_undo_cwr(sk, true); |
3500 | } | 3500 | } |
3501 | 3501 | ||
3502 | /* F-RTO spurious RTO detection algorithm (RFC4138) | 3502 | /* F-RTO spurious RTO detection algorithm (RFC4138) |