aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/tcp_input.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2016-04-14 01:05:39 -0400
committerDavid S. Miller <davem@davemloft.net>2016-04-15 16:45:44 -0400
commitb3d051477cf94e9d71d6acadb8a90de15237b9c1 (patch)
tree59009bc698f472b31b15059972e6904cd9272d32 /net/ipv4/tcp_input.c
parentac18dd9e842294377dbaf1e8d169493567a81fa1 (diff)
tcp: do not mess with listener sk_wmem_alloc
When removing sk_refcnt manipulation on synflood, I missed that using skb_set_owner_w() was racy, if sk->sk_wmem_alloc had already transitioned to 0. We should hold sk_refcnt instead, but this is a big deal under attack. (Doing so increase performance from 3.2 Mpps to 3.8 Mpps only) In this patch, I chose to not attach a socket to syncookies skb. Performance is now 5 Mpps instead of 3.2 Mpps. Following patch will remove last known false sharing in tcp_rcv_state_process() Fixes: 3b24d854cb35 ("tcp/dccp: do not touch listener sk_refcnt under synflood") Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/tcp_input.c')
-rw-r--r--net/ipv4/tcp_input.c7
1 files changed, 4 insertions, 3 deletions
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 983f04c11177..7ea7034af83f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6327,7 +6327,7 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6327 } 6327 }
6328 if (fastopen_sk) { 6328 if (fastopen_sk) {
6329 af_ops->send_synack(fastopen_sk, dst, &fl, req, 6329 af_ops->send_synack(fastopen_sk, dst, &fl, req,
6330 &foc, false); 6330 &foc, TCP_SYNACK_FASTOPEN);
6331 /* Add the child socket directly into the accept queue */ 6331 /* Add the child socket directly into the accept queue */
6332 inet_csk_reqsk_queue_add(sk, req, fastopen_sk); 6332 inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
6333 sk->sk_data_ready(sk); 6333 sk->sk_data_ready(sk);
@@ -6337,8 +6337,9 @@ int tcp_conn_request(struct request_sock_ops *rsk_ops,
6337 tcp_rsk(req)->tfo_listener = false; 6337 tcp_rsk(req)->tfo_listener = false;
6338 if (!want_cookie) 6338 if (!want_cookie)
6339 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT); 6339 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6340 af_ops->send_synack(sk, dst, &fl, req, 6340 af_ops->send_synack(sk, dst, &fl, req, &foc,
6341 &foc, !want_cookie); 6341 !want_cookie ? TCP_SYNACK_NORMAL :
6342 TCP_SYNACK_COOKIE);
6342 if (want_cookie) { 6343 if (want_cookie) {
6343 reqsk_free(req); 6344 reqsk_free(req);
6344 return 0; 6345 return 0;