diff options
author | Soheil Hassas Yeganeh <soheil@google.com> | 2017-03-15 16:30:46 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-03-16 23:33:56 -0400 |
commit | 4396e46187ca5070219b81773c4e65088dac50cc (patch) | |
tree | e38eaa381c005da5bcd3d979d53c8dc699e3333d /net/ipv4/tcp_ipv4.c | |
parent | d82bae12dc38d79a2b77473f5eb0612a3d69c55b (diff) |
tcp: remove tcp_tw_recycle
The tcp_tw_recycle was already broken for connections
behind NAT, since the per-destination timestamp is not
monotonically increasing for multiple machines behind
a single destination address.
After the randomization of TCP timestamp offsets
in commit 8a5bd45f6616 (tcp: randomize tcp timestamp offsets
for each connection), the tcp_tw_recycle is broken for all
types of connections for the same reason: the timestamps
received from a single machine is not monotonically increasing,
anymore.
Remove tcp_tw_recycle, since it is not functional. Also, remove
the PAWSPassive SNMP counter since it is only used for
tcp_tw_recycle, and simplify tcp_v4_route_req and tcp_v6_route_req
since the strict argument is only set when tcp_tw_recycle is
enabled.
Signed-off-by: Soheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: Eric Dumazet <edumazet@google.com>
Signed-off-by: Neal Cardwell <ncardwell@google.com>
Signed-off-by: Yuchung Cheng <ycheng@google.com>
Cc: Lutz Vieweg <lvml@5t9.de>
Cc: Florian Westphal <fw@strlen.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_ipv4.c')
-rw-r--r-- | net/ipv4/tcp_ipv4.c | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index d8b401fff9fe..7482b5d11861 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -1213,19 +1213,9 @@ static void tcp_v4_init_req(struct request_sock *req, | |||
1213 | 1213 | ||
1214 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, | 1214 | static struct dst_entry *tcp_v4_route_req(const struct sock *sk, |
1215 | struct flowi *fl, | 1215 | struct flowi *fl, |
1216 | const struct request_sock *req, | 1216 | const struct request_sock *req) |
1217 | bool *strict) | ||
1218 | { | 1217 | { |
1219 | struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); | 1218 | return inet_csk_route_req(sk, &fl->u.ip4, req); |
1220 | |||
1221 | if (strict) { | ||
1222 | if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) | ||
1223 | *strict = true; | ||
1224 | else | ||
1225 | *strict = false; | ||
1226 | } | ||
1227 | |||
1228 | return dst; | ||
1229 | } | 1219 | } |
1230 | 1220 | ||
1231 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { | 1221 | struct request_sock_ops tcp_request_sock_ops __read_mostly = { |
@@ -2462,7 +2452,6 @@ static int __net_init tcp_sk_init(struct net *net) | |||
2462 | net->ipv4.sysctl_tcp_tw_reuse = 0; | 2452 | net->ipv4.sysctl_tcp_tw_reuse = 0; |
2463 | 2453 | ||
2464 | cnt = tcp_hashinfo.ehash_mask + 1; | 2454 | cnt = tcp_hashinfo.ehash_mask + 1; |
2465 | net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; | ||
2466 | net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; | 2455 | net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; |
2467 | net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; | 2456 | net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; |
2468 | 2457 | ||