aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6/inet6_connection_sock.c
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-19 22:04:20 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-20 12:40:25 -0400
commitfa76ce7328b289b6edd476e24eb52fd634261720 (patch)
tree2e4c116a4e299700c185d73018bbb3518e46e1bb /net/ipv6/inet6_connection_sock.c
parent52452c542559ac980b48dbf22a30ee7fa0af507c (diff)
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling, done by inet_csk_reqsk_queue_prune(), fired by the keepalive timer of a TCP_LISTEN socket. This function runs for awful long times, with socket lock held, meaning that other cpus needing this lock have to spin for hundred of ms. SYNACK are sent in huge bursts, likely to cause severe drops anyway. This model was OK 15 years ago when memory was very tight. We now can afford to have a timer per request sock. Timer invocations no longer need to lock the listener, and can be run from all cpus in parallel. With following patch increasing somaxconn width to 32 bits, I tested a listener with more than 4 million active request sockets, and a steady SYNFLOOD of ~200,000 SYN per second. Host was sending ~830,000 SYNACK per second. This is ~100 times more what we could achieve before this patch. Later, we will get rid of the listener hash and use ehash instead. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6/inet6_connection_sock.c')
-rw-r--r--net/ipv6/inet6_connection_sock.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index b7acb9ebc4f5..2f3bbe569e8f 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -112,21 +112,20 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
112 return c & (synq_hsize - 1); 112 return c & (synq_hsize - 1);
113} 113}
114 114
115struct request_sock *inet6_csk_search_req(const struct sock *sk, 115struct request_sock *inet6_csk_search_req(struct sock *sk,
116 const __be16 rport, 116 const __be16 rport,
117 const struct in6_addr *raddr, 117 const struct in6_addr *raddr,
118 const struct in6_addr *laddr, 118 const struct in6_addr *laddr,
119 const int iif) 119 const int iif)
120{ 120{
121 const struct inet_connection_sock *icsk = inet_csk(sk); 121 struct inet_connection_sock *icsk = inet_csk(sk);
122 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 122 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
123 struct request_sock *req; 123 struct request_sock *req;
124 u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
125 lopt->nr_table_entries);
124 126
125 for (req = lopt->syn_table[inet6_synq_hash(raddr, rport, 127 write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
126 lopt->hash_rnd, 128 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
127 lopt->nr_table_entries)];
128 req != NULL;
129 req = req->dl_next) {
130 const struct inet_request_sock *ireq = inet_rsk(req); 129 const struct inet_request_sock *ireq = inet_rsk(req);
131 130
132 if (ireq->ir_rmt_port == rport && 131 if (ireq->ir_rmt_port == rport &&
@@ -134,12 +133,14 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
134 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && 133 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
135 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && 134 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
136 (!ireq->ir_iif || ireq->ir_iif == iif)) { 135 (!ireq->ir_iif || ireq->ir_iif == iif)) {
136 atomic_inc(&req->rsk_refcnt);
137 WARN_ON(req->sk != NULL); 137 WARN_ON(req->sk != NULL);
138 return req; 138 break;
139 } 139 }
140 } 140 }
141 write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
141 142
142 return NULL; 143 return req;
143} 144}
144EXPORT_SYMBOL_GPL(inet6_csk_search_req); 145EXPORT_SYMBOL_GPL(inet6_csk_search_req);
145 146