diff options
author | Nikolay Borisov <kernel@kyup.com> | 2016-02-03 02:46:53 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-02-07 14:35:10 -0500 |
commit | ae5c3f406cffe15ffd2aa544961b7cd027468d46 (patch) | |
tree | 4f5499a05fc3b033982b0678980f5e5415b3ab29 /net/ipv4/tcp_timer.c | |
parent | 1043e25ff96a1efc7bd34d11f5f32203a28a3bd7 (diff) |
ipv4: Namespaceify tcp_retries1 sysctl knob
Signed-off-by: Nikolay Borisov <kernel@kyup.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_timer.c')
-rw-r--r-- | net/ipv4/tcp_timer.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index ca25fdf0c525..6694e33149b9 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
23 | #include <net/tcp.h> | 23 | #include <net/tcp.h> |
24 | 24 | ||
25 | int sysctl_tcp_retries1 __read_mostly = TCP_RETR1; | ||
26 | int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; | 25 | int sysctl_tcp_retries2 __read_mostly = TCP_RETR2; |
27 | int sysctl_tcp_orphan_retries __read_mostly; | 26 | int sysctl_tcp_orphan_retries __read_mostly; |
28 | int sysctl_tcp_thin_linear_timeouts __read_mostly; | 27 | int sysctl_tcp_thin_linear_timeouts __read_mostly; |
@@ -171,7 +170,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
171 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; | 170 | retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries; |
172 | syn_set = true; | 171 | syn_set = true; |
173 | } else { | 172 | } else { |
174 | if (retransmits_timed_out(sk, sysctl_tcp_retries1, 0, 0)) { | 173 | if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0, 0)) { |
175 | /* Some middle-boxes may black-hole Fast Open _after_ | 174 | /* Some middle-boxes may black-hole Fast Open _after_ |
176 | * the handshake. Therefore we conservatively disable | 175 | * the handshake. Therefore we conservatively disable |
177 | * Fast Open on this path on recurring timeouts with | 176 | * Fast Open on this path on recurring timeouts with |
@@ -180,7 +179,7 @@ static int tcp_write_timeout(struct sock *sk) | |||
180 | if (tp->syn_data_acked && | 179 | if (tp->syn_data_acked && |
181 | tp->bytes_acked <= tp->rx_opt.mss_clamp) { | 180 | tp->bytes_acked <= tp->rx_opt.mss_clamp) { |
182 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); | 181 | tcp_fastopen_cache_set(sk, 0, NULL, true, 0); |
183 | if (icsk->icsk_retransmits == sysctl_tcp_retries1) | 182 | if (icsk->icsk_retransmits == net->ipv4.sysctl_tcp_retries1) |
184 | NET_INC_STATS_BH(sock_net(sk), | 183 | NET_INC_STATS_BH(sock_net(sk), |
185 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); | 184 | LINUX_MIB_TCPFASTOPENACTIVEFAIL); |
186 | } | 185 | } |
@@ -359,6 +358,7 @@ static void tcp_fastopen_synack_timer(struct sock *sk) | |||
359 | void tcp_retransmit_timer(struct sock *sk) | 358 | void tcp_retransmit_timer(struct sock *sk) |
360 | { | 359 | { |
361 | struct tcp_sock *tp = tcp_sk(sk); | 360 | struct tcp_sock *tp = tcp_sk(sk); |
361 | struct net *net = sock_net(sk); | ||
362 | struct inet_connection_sock *icsk = inet_csk(sk); | 362 | struct inet_connection_sock *icsk = inet_csk(sk); |
363 | 363 | ||
364 | if (tp->fastopen_rsk) { | 364 | if (tp->fastopen_rsk) { |
@@ -489,7 +489,7 @@ out_reset_timer: | |||
489 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); | 489 | icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); |
490 | } | 490 | } |
491 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); | 491 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, icsk->icsk_rto, TCP_RTO_MAX); |
492 | if (retransmits_timed_out(sk, sysctl_tcp_retries1 + 1, 0, 0)) | 492 | if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0, 0)) |
493 | __sk_dst_reset(sk); | 493 | __sk_dst_reset(sk); |
494 | 494 | ||
495 | out:; | 495 | out:; |