diff options
author | Haishuang Yan <yanhaishuang@cmss.chinamobile.com> | 2017-09-26 23:35:43 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-10-01 20:55:54 -0400 |
commit | 3733be14a32bae288b61ed28341e593baba983af (patch) | |
tree | 972622b76e08246aee043832bdc8224f6ac50501 /net/ipv4/tcp_fastopen.c | |
parent | 437138485656c41e32b8c63c0987cfa0348be0e6 (diff) |
ipv4: Namespaceify tcp_fastopen_blackhole_timeout knob
Different namespace application might require different time period in
second to disable Fastopen on active TCP sockets.
Tested:
Simulate following similar situation that the server's data gets dropped
after 3WHS.
C ---- syn-data ---> S
C <--- syn/ack ----- S
C ---- ack --------> S
S (accept & write)
C? X <- data ------ S
[retry and timeout]
And then print netstat of TCPFastOpenBlackhole, the counter increased as
expected when the firewall blackhole issue is detected and active TFO is
disabled.
# cat /proc/net/netstat | awk '{print $91}'
TCPFastOpenBlackhole
1
Signed-off-by: Haishuang Yan <yanhaishuang@cmss.chinamobile.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_fastopen.c')
-rw-r--r-- | net/ipv4/tcp_fastopen.c | 30 |
1 files changed, 11 insertions, 19 deletions
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 4eae44ac3cb0..de470e7e586f 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c | |||
@@ -422,25 +422,16 @@ EXPORT_SYMBOL(tcp_fastopen_defer_connect); | |||
422 | * TFO connection with data exchanges. | 422 | * TFO connection with data exchanges. |
423 | */ | 423 | */ |
424 | 424 | ||
425 | /* Default to 1hr */ | ||
426 | unsigned int sysctl_tcp_fastopen_blackhole_timeout __read_mostly = 60 * 60; | ||
427 | static atomic_t tfo_active_disable_times __read_mostly = ATOMIC_INIT(0); | ||
428 | static unsigned long tfo_active_disable_stamp __read_mostly; | ||
429 | |||
430 | /* Disable active TFO and record current jiffies and | 425 | /* Disable active TFO and record current jiffies and |
431 | * tfo_active_disable_times | 426 | * tfo_active_disable_times |
432 | */ | 427 | */ |
433 | void tcp_fastopen_active_disable(struct sock *sk) | 428 | void tcp_fastopen_active_disable(struct sock *sk) |
434 | { | 429 | { |
435 | atomic_inc(&tfo_active_disable_times); | 430 | struct net *net = sock_net(sk); |
436 | tfo_active_disable_stamp = jiffies; | ||
437 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPFASTOPENBLACKHOLE); | ||
438 | } | ||
439 | 431 | ||
440 | /* Reset tfo_active_disable_times to 0 */ | 432 | atomic_inc(&net->ipv4.tfo_active_disable_times); |
441 | void tcp_fastopen_active_timeout_reset(void) | 433 | net->ipv4.tfo_active_disable_stamp = jiffies; |
442 | { | 434 | NET_INC_STATS(net, LINUX_MIB_TCPFASTOPENBLACKHOLE); |
443 | atomic_set(&tfo_active_disable_times, 0); | ||
444 | } | 435 | } |
445 | 436 | ||
446 | /* Calculate timeout for tfo active disable | 437 | /* Calculate timeout for tfo active disable |
@@ -449,17 +440,18 @@ void tcp_fastopen_active_timeout_reset(void) | |||
449 | */ | 440 | */ |
450 | bool tcp_fastopen_active_should_disable(struct sock *sk) | 441 | bool tcp_fastopen_active_should_disable(struct sock *sk) |
451 | { | 442 | { |
452 | int tfo_da_times = atomic_read(&tfo_active_disable_times); | 443 | unsigned int tfo_bh_timeout = sock_net(sk)->ipv4.sysctl_tcp_fastopen_blackhole_timeout; |
453 | int multiplier; | 444 | int tfo_da_times = atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times); |
454 | unsigned long timeout; | 445 | unsigned long timeout; |
446 | int multiplier; | ||
455 | 447 | ||
456 | if (!tfo_da_times) | 448 | if (!tfo_da_times) |
457 | return false; | 449 | return false; |
458 | 450 | ||
459 | /* Limit timout to max: 2^6 * initial timeout */ | 451 | /* Limit timout to max: 2^6 * initial timeout */ |
460 | multiplier = 1 << min(tfo_da_times - 1, 6); | 452 | multiplier = 1 << min(tfo_da_times - 1, 6); |
461 | timeout = multiplier * sysctl_tcp_fastopen_blackhole_timeout * HZ; | 453 | timeout = multiplier * tfo_bh_timeout * HZ; |
462 | if (time_before(jiffies, tfo_active_disable_stamp + timeout)) | 454 | if (time_before(jiffies, sock_net(sk)->ipv4.tfo_active_disable_stamp + timeout)) |
463 | return true; | 455 | return true; |
464 | 456 | ||
465 | /* Mark check bit so we can check for successful active TFO | 457 | /* Mark check bit so we can check for successful active TFO |
@@ -495,10 +487,10 @@ void tcp_fastopen_active_disable_ofo_check(struct sock *sk) | |||
495 | } | 487 | } |
496 | } | 488 | } |
497 | } else if (tp->syn_fastopen_ch && | 489 | } else if (tp->syn_fastopen_ch && |
498 | atomic_read(&tfo_active_disable_times)) { | 490 | atomic_read(&sock_net(sk)->ipv4.tfo_active_disable_times)) { |
499 | dst = sk_dst_get(sk); | 491 | dst = sk_dst_get(sk); |
500 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) | 492 | if (!(dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))) |
501 | tcp_fastopen_active_timeout_reset(); | 493 | atomic_set(&sock_net(sk)->ipv4.tfo_active_disable_times, 0); |
502 | dst_release(dst); | 494 | dst_release(dst); |
503 | } | 495 | } |
504 | } | 496 | } |