aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEric Dumazet <edumazet@google.com>2015-03-19 22:04:20 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-20 12:40:25 -0400
commitfa76ce7328b289b6edd476e24eb52fd634261720 (patch)
tree2e4c116a4e299700c185d73018bbb3518e46e1bb
parent52452c542559ac980b48dbf22a30ee7fa0af507c (diff)
inet: get rid of central tcp/dccp listener timer
One of the major issue for TCP is the SYNACK rtx handling, done by inet_csk_reqsk_queue_prune(), fired by the keepalive timer of a TCP_LISTEN socket. This function runs for awful long times, with socket lock held, meaning that other cpus needing this lock have to spin for hundred of ms. SYNACK are sent in huge bursts, likely to cause severe drops anyway. This model was OK 15 years ago when memory was very tight. We now can afford to have a timer per request sock. Timer invocations no longer need to lock the listener, and can be run from all cpus in parallel. With following patch increasing somaxconn width to 32 bits, I tested a listener with more than 4 million active request sockets, and a steady SYNFLOOD of ~200,000 SYN per second. Host was sending ~830,000 SYNACK per second. This is ~100 times more what we could achieve before this patch. Later, we will get rid of the listener hash and use ehash instead. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--include/net/inet6_connection_sock.h2
-rw-r--r--include/net/inet_connection_sock.h15
-rw-r--r--include/net/request_sock.h87
-rw-r--r--net/core/request_sock.c13
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv4.c10
-rw-r--r--net/dccp/ipv6.c12
-rw-r--r--net/dccp/timer.c24
-rw-r--r--net/ipv4/inet_connection_sock.c139
-rw-r--r--net/ipv4/inet_diag.c4
-rw-r--r--net/ipv4/syncookies.c1
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_ipv4.c11
-rw-r--r--net/ipv4/tcp_minisocks.c5
-rw-r--r--net/ipv4/tcp_timer.c12
-rw-r--r--net/ipv6/inet6_connection_sock.c19
-rw-r--r--net/ipv6/syncookies.c1
-rw-r--r--net/ipv6/tcp_ipv6.c12
18 files changed, 173 insertions, 198 deletions
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 15bd40878d2a..6d539e4e5ba7 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -28,7 +28,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
28struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6, 28struct dst_entry *inet6_csk_route_req(struct sock *sk, struct flowi6 *fl6,
29 const struct request_sock *req); 29 const struct request_sock *req);
30 30
31struct request_sock *inet6_csk_search_req(const struct sock *sk, 31struct request_sock *inet6_csk_search_req(struct sock *sk,
32 const __be16 rport, 32 const __be16 rport,
33 const struct in6_addr *raddr, 33 const struct in6_addr *raddr,
34 const struct in6_addr *laddr, 34 const struct in6_addr *laddr,
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 423a46106e57..7b5887cd1172 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -256,7 +256,7 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
256 256
257struct sock *inet_csk_accept(struct sock *sk, int flags, int *err); 257struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
258 258
259struct request_sock *inet_csk_search_req(const struct sock *sk, 259struct request_sock *inet_csk_search_req(struct sock *sk,
260 const __be16 rport, 260 const __be16 rport,
261 const __be32 raddr, 261 const __be32 raddr,
262 const __be32 laddr); 262 const __be32 laddr);
@@ -282,15 +282,13 @@ void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
282static inline void inet_csk_reqsk_queue_removed(struct sock *sk, 282static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
283 struct request_sock *req) 283 struct request_sock *req)
284{ 284{
285 if (reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req) == 0) 285 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
286 inet_csk_delete_keepalive_timer(sk);
287} 286}
288 287
289static inline void inet_csk_reqsk_queue_added(struct sock *sk, 288static inline void inet_csk_reqsk_queue_added(struct sock *sk,
290 const unsigned long timeout) 289 const unsigned long timeout)
291{ 290{
292 if (reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue) == 0) 291 reqsk_queue_added(&inet_csk(sk)->icsk_accept_queue);
293 inet_csk_reset_keepalive_timer(sk, timeout);
294} 292}
295 293
296static inline int inet_csk_reqsk_queue_len(const struct sock *sk) 294static inline int inet_csk_reqsk_queue_len(const struct sock *sk)
@@ -319,14 +317,9 @@ static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
319{ 317{
320 inet_csk_reqsk_queue_unlink(sk, req); 318 inet_csk_reqsk_queue_unlink(sk, req);
321 inet_csk_reqsk_queue_removed(sk, req); 319 inet_csk_reqsk_queue_removed(sk, req);
322 reqsk_free(req); 320 reqsk_put(req);
323} 321}
324 322
325void inet_csk_reqsk_queue_prune(struct sock *parent,
326 const unsigned long interval,
327 const unsigned long timeout,
328 const unsigned long max_rto);
329
330void inet_csk_destroy_sock(struct sock *sk); 323void inet_csk_destroy_sock(struct sock *sk);
331void inet_csk_prepare_forced_close(struct sock *sk); 324void inet_csk_prepare_forced_close(struct sock *sk);
332 325
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index 65223905d139..6a91261d9b7b 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -62,7 +62,7 @@ struct request_sock {
62 u32 window_clamp; /* window clamp at creation time */ 62 u32 window_clamp; /* window clamp at creation time */
63 u32 rcv_wnd; /* rcv_wnd offered first time */ 63 u32 rcv_wnd; /* rcv_wnd offered first time */
64 u32 ts_recent; 64 u32 ts_recent;
65 unsigned long expires; 65 struct timer_list rsk_timer;
66 const struct request_sock_ops *rsk_ops; 66 const struct request_sock_ops *rsk_ops;
67 struct sock *sk; 67 struct sock *sk;
68 u32 secid; 68 u32 secid;
@@ -110,9 +110,6 @@ static inline void reqsk_free(struct request_sock *req)
110 110
111static inline void reqsk_put(struct request_sock *req) 111static inline void reqsk_put(struct request_sock *req)
112{ 112{
113 /* temporary debugging, until req sock are put into ehash table */
114 WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 1);
115
116 if (atomic_dec_and_test(&req->rsk_refcnt)) 113 if (atomic_dec_and_test(&req->rsk_refcnt))
117 reqsk_free(req); 114 reqsk_free(req);
118} 115}
@@ -124,12 +121,16 @@ extern int sysctl_max_syn_backlog;
124 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs 121 * @max_qlen_log - log_2 of maximal queued SYNs/REQUESTs
125 */ 122 */
126struct listen_sock { 123struct listen_sock {
127 u8 max_qlen_log; 124 int qlen_inc; /* protected by listener lock */
125 int young_inc;/* protected by listener lock */
126
127 /* following fields can be updated by timer */
128 atomic_t qlen_dec; /* qlen = qlen_inc - qlen_dec */
129 atomic_t young_dec;
130
131 u8 max_qlen_log ____cacheline_aligned_in_smp;
128 u8 synflood_warned; 132 u8 synflood_warned;
129 /* 2 bytes hole, try to use */ 133 /* 2 bytes hole, try to use */
130 int qlen;
131 int qlen_young;
132 int clock_hand;
133 u32 hash_rnd; 134 u32 hash_rnd;
134 u32 nr_table_entries; 135 u32 nr_table_entries;
135 struct request_sock *syn_table[0]; 136 struct request_sock *syn_table[0];
@@ -182,9 +183,7 @@ struct fastopen_queue {
182struct request_sock_queue { 183struct request_sock_queue {
183 struct request_sock *rskq_accept_head; 184 struct request_sock *rskq_accept_head;
184 struct request_sock *rskq_accept_tail; 185 struct request_sock *rskq_accept_tail;
185 rwlock_t syn_wait_lock;
186 u8 rskq_defer_accept; 186 u8 rskq_defer_accept;
187 /* 3 bytes hole, try to pack */
188 struct listen_sock *listen_opt; 187 struct listen_sock *listen_opt;
189 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been 188 struct fastopen_queue *fastopenq; /* This is non-NULL iff TFO has been
190 * enabled on this listener. Check 189 * enabled on this listener. Check
@@ -192,6 +191,9 @@ struct request_sock_queue {
192 * to determine if TFO is enabled 191 * to determine if TFO is enabled
193 * right at this moment. 192 * right at this moment.
194 */ 193 */
194
195 /* temporary alignment, our goal is to get rid of this lock */
196 rwlock_t syn_wait_lock ____cacheline_aligned_in_smp;
195}; 197};
196 198
197int reqsk_queue_alloc(struct request_sock_queue *queue, 199int reqsk_queue_alloc(struct request_sock_queue *queue,
@@ -223,11 +225,15 @@ static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
223 struct request_sock **prev; 225 struct request_sock **prev;
224 226
225 write_lock(&queue->syn_wait_lock); 227 write_lock(&queue->syn_wait_lock);
228
226 prev = &lopt->syn_table[req->rsk_hash]; 229 prev = &lopt->syn_table[req->rsk_hash];
227 while (*prev != req) 230 while (*prev != req)
228 prev = &(*prev)->dl_next; 231 prev = &(*prev)->dl_next;
229 *prev = req->dl_next; 232 *prev = req->dl_next;
233
230 write_unlock(&queue->syn_wait_lock); 234 write_unlock(&queue->syn_wait_lock);
235 if (del_timer(&req->rsk_timer))
236 reqsk_put(req);
231} 237}
232 238
233static inline void reqsk_queue_add(struct request_sock_queue *queue, 239static inline void reqsk_queue_add(struct request_sock_queue *queue,
@@ -260,64 +266,53 @@ static inline struct request_sock *reqsk_queue_remove(struct request_sock_queue
260 return req; 266 return req;
261} 267}
262 268
263static inline int reqsk_queue_removed(struct request_sock_queue *queue, 269static inline void reqsk_queue_removed(struct request_sock_queue *queue,
264 struct request_sock *req) 270 const struct request_sock *req)
265{ 271{
266 struct listen_sock *lopt = queue->listen_opt; 272 struct listen_sock *lopt = queue->listen_opt;
267 273
268 if (req->num_timeout == 0) 274 if (req->num_timeout == 0)
269 --lopt->qlen_young; 275 atomic_inc(&lopt->young_dec);
270 276 atomic_inc(&lopt->qlen_dec);
271 return --lopt->qlen;
272} 277}
273 278
274static inline int reqsk_queue_added(struct request_sock_queue *queue) 279static inline void reqsk_queue_added(struct request_sock_queue *queue)
275{ 280{
276 struct listen_sock *lopt = queue->listen_opt; 281 struct listen_sock *lopt = queue->listen_opt;
277 const int prev_qlen = lopt->qlen;
278 282
279 lopt->qlen_young++; 283 lopt->young_inc++;
280 lopt->qlen++; 284 lopt->qlen_inc++;
281 return prev_qlen;
282} 285}
283 286
284static inline int reqsk_queue_len(const struct request_sock_queue *queue) 287static inline int listen_sock_qlen(const struct listen_sock *lopt)
285{ 288{
286 return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0; 289 return lopt->qlen_inc - atomic_read(&lopt->qlen_dec);
287} 290}
288 291
289static inline int reqsk_queue_len_young(const struct request_sock_queue *queue) 292static inline int listen_sock_young(const struct listen_sock *lopt)
290{ 293{
291 return queue->listen_opt->qlen_young; 294 return lopt->young_inc - atomic_read(&lopt->young_dec);
292} 295}
293 296
294static inline int reqsk_queue_is_full(const struct request_sock_queue *queue) 297static inline int reqsk_queue_len(const struct request_sock_queue *queue)
295{ 298{
296 return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log; 299 const struct listen_sock *lopt = queue->listen_opt;
300
301 return lopt ? listen_sock_qlen(lopt) : 0;
297} 302}
298 303
299static inline void reqsk_queue_hash_req(struct request_sock_queue *queue, 304static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
300 u32 hash, struct request_sock *req,
301 unsigned long timeout)
302{ 305{
303 struct listen_sock *lopt = queue->listen_opt; 306 return listen_sock_young(queue->listen_opt);
304 307}
305 req->expires = jiffies + timeout;
306 req->num_retrans = 0;
307 req->num_timeout = 0;
308 req->sk = NULL;
309
310 /* before letting lookups find us, make sure all req fields
311 * are committed to memory and refcnt initialized.
312 */
313 smp_wmb();
314 atomic_set(&req->rsk_refcnt, 1);
315 308
316 req->rsk_hash = hash; 309static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
317 write_lock(&queue->syn_wait_lock); 310{
318 req->dl_next = lopt->syn_table[hash]; 311 return reqsk_queue_len(queue) >> queue->listen_opt->max_qlen_log;
319 lopt->syn_table[hash] = req;
320 write_unlock(&queue->syn_wait_lock);
321} 312}
322 313
314void reqsk_queue_hash_req(struct request_sock_queue *queue,
315 u32 hash, struct request_sock *req,
316 unsigned long timeout);
317
323#endif /* _REQUEST_SOCK_H */ 318#endif /* _REQUEST_SOCK_H */
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index cc39a2aa663a..cdc0ddd9ac9f 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -94,21 +94,26 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
94 /* make all the listen_opt local to us */ 94 /* make all the listen_opt local to us */
95 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue); 95 struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
96 96
97 if (lopt->qlen != 0) { 97 if (listen_sock_qlen(lopt) != 0) {
98 unsigned int i; 98 unsigned int i;
99 99
100 for (i = 0; i < lopt->nr_table_entries; i++) { 100 for (i = 0; i < lopt->nr_table_entries; i++) {
101 struct request_sock *req; 101 struct request_sock *req;
102 102
103 write_lock_bh(&queue->syn_wait_lock);
103 while ((req = lopt->syn_table[i]) != NULL) { 104 while ((req = lopt->syn_table[i]) != NULL) {
104 lopt->syn_table[i] = req->dl_next; 105 lopt->syn_table[i] = req->dl_next;
105 lopt->qlen--; 106 atomic_inc(&lopt->qlen_dec);
107 if (del_timer(&req->rsk_timer))
108 reqsk_put(req);
106 reqsk_put(req); 109 reqsk_put(req);
107 } 110 }
111 write_unlock_bh(&queue->syn_wait_lock);
108 } 112 }
109 } 113 }
110 114
111 WARN_ON(lopt->qlen != 0); 115 if (WARN_ON(listen_sock_qlen(lopt) != 0))
116 pr_err("qlen %u\n", listen_sock_qlen(lopt));
112 kvfree(lopt); 117 kvfree(lopt);
113} 118}
114 119
@@ -187,7 +192,7 @@ void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
187 * 192 *
188 * For more details see CoNext'11 "TCP Fast Open" paper. 193 * For more details see CoNext'11 "TCP Fast Open" paper.
189 */ 194 */
190 req->expires = jiffies + 60*HZ; 195 req->rsk_timer.expires = jiffies + 60*HZ;
191 if (fastopenq->rskq_rst_head == NULL) 196 if (fastopenq->rskq_rst_head == NULL)
192 fastopenq->rskq_rst_head = req; 197 fastopenq->rskq_rst_head = req;
193 else 198 else
diff --git a/net/core/sock.c b/net/core/sock.c
index d9f9e4825362..744a04ddb61c 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2739,7 +2739,7 @@ static int req_prot_init(const struct proto *prot)
2739 2739
2740 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name, 2740 rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
2741 rsk_prot->obj_size, 0, 2741 rsk_prot->obj_size, 0,
2742 SLAB_HWCACHE_ALIGN, NULL); 2742 0, NULL);
2743 2743
2744 if (!rsk_prot->slab) { 2744 if (!rsk_prot->slab) {
2745 pr_crit("%s: Can't create request sock SLAB cache!\n", 2745 pr_crit("%s: Can't create request sock SLAB cache!\n",
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5bffbbaf1fac..25a9615b3b88 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -306,6 +306,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
306 if (!between48(seq, dccp_rsk(req)->dreq_iss, 306 if (!between48(seq, dccp_rsk(req)->dreq_iss,
307 dccp_rsk(req)->dreq_gss)) { 307 dccp_rsk(req)->dreq_gss)) {
308 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 308 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
309 reqsk_put(req);
309 goto out; 310 goto out;
310 } 311 }
311 /* 312 /*
@@ -315,6 +316,7 @@ static void dccp_v4_err(struct sk_buff *skb, u32 info)
315 * errors returned from accept(). 316 * errors returned from accept().
316 */ 317 */
317 inet_csk_reqsk_queue_drop(sk, req); 318 inet_csk_reqsk_queue_drop(sk, req);
319 reqsk_put(req);
318 goto out; 320 goto out;
319 321
320 case DCCP_REQUESTING: 322 case DCCP_REQUESTING:
@@ -451,9 +453,11 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
451 /* Find possible connection requests. */ 453 /* Find possible connection requests. */
452 struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport, 454 struct request_sock *req = inet_csk_search_req(sk, dh->dccph_sport,
453 iph->saddr, iph->daddr); 455 iph->saddr, iph->daddr);
454 if (req) 456 if (req) {
455 return dccp_check_req(sk, skb, req); 457 nsk = dccp_check_req(sk, skb, req);
456 458 reqsk_put(req);
459 return nsk;
460 }
457 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo, 461 nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
458 iph->saddr, dh->dccph_sport, 462 iph->saddr, dh->dccph_sport,
459 iph->daddr, dh->dccph_dport, 463 iph->daddr, dh->dccph_dport,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index ae2184039fe3..69d8f13895ba 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -157,7 +157,7 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
157 req = inet6_csk_search_req(sk, dh->dccph_dport, 157 req = inet6_csk_search_req(sk, dh->dccph_dport,
158 &hdr->daddr, &hdr->saddr, 158 &hdr->daddr, &hdr->saddr,
159 inet6_iif(skb)); 159 inet6_iif(skb));
160 if (req == NULL) 160 if (!req)
161 goto out; 161 goto out;
162 162
163 /* 163 /*
@@ -169,10 +169,12 @@ static void dccp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
169 if (!between48(seq, dccp_rsk(req)->dreq_iss, 169 if (!between48(seq, dccp_rsk(req)->dreq_iss,
170 dccp_rsk(req)->dreq_gss)) { 170 dccp_rsk(req)->dreq_gss)) {
171 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 171 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
172 reqsk_put(req);
172 goto out; 173 goto out;
173 } 174 }
174 175
175 inet_csk_reqsk_queue_drop(sk, req); 176 inet_csk_reqsk_queue_drop(sk, req);
177 reqsk_put(req);
176 goto out; 178 goto out;
177 179
178 case DCCP_REQUESTING: 180 case DCCP_REQUESTING:
@@ -322,9 +324,11 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
322 324
323 req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr, 325 req = inet6_csk_search_req(sk, dh->dccph_sport, &iph->saddr,
324 &iph->daddr, inet6_iif(skb)); 326 &iph->daddr, inet6_iif(skb));
325 if (req != NULL) 327 if (req) {
326 return dccp_check_req(sk, skb, req); 328 nsk = dccp_check_req(sk, skb, req);
327 329 reqsk_put(req);
330 return nsk;
331 }
328 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo, 332 nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
329 &iph->saddr, dh->dccph_sport, 333 &iph->saddr, dh->dccph_sport,
330 &iph->daddr, ntohs(dh->dccph_dport), 334 &iph->daddr, ntohs(dh->dccph_dport),
diff --git a/net/dccp/timer.c b/net/dccp/timer.c
index 1cd46a345cb0..3ef7acef3ce8 100644
--- a/net/dccp/timer.c
+++ b/net/dccp/timer.c
@@ -161,33 +161,11 @@ out:
161 sock_put(sk); 161 sock_put(sk);
162} 162}
163 163
164/*
165 * Timer for listening sockets
166 */
167static void dccp_response_timer(struct sock *sk)
168{
169 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL, DCCP_TIMEOUT_INIT,
170 DCCP_RTO_MAX);
171}
172
173static void dccp_keepalive_timer(unsigned long data) 164static void dccp_keepalive_timer(unsigned long data)
174{ 165{
175 struct sock *sk = (struct sock *)data; 166 struct sock *sk = (struct sock *)data;
176 167
177 /* Only process if socket is not in use. */ 168 pr_err("dccp should not use a keepalive timer !\n");
178 bh_lock_sock(sk);
179 if (sock_owned_by_user(sk)) {
180 /* Try again later. */
181 inet_csk_reset_keepalive_timer(sk, HZ / 20);
182 goto out;
183 }
184
185 if (sk->sk_state == DCCP_LISTEN) {
186 dccp_response_timer(sk);
187 goto out;
188 }
189out:
190 bh_unlock_sock(sk);
191 sock_put(sk); 169 sock_put(sk);
192} 170}
193 171
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 4f57a017928c..126a37a156cf 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -23,6 +23,7 @@
23#include <net/route.h> 23#include <net/route.h>
24#include <net/tcp_states.h> 24#include <net/tcp_states.h>
25#include <net/xfrm.h> 25#include <net/xfrm.h>
26#include <net/tcp.h>
26 27
27#ifdef INET_CSK_DEBUG 28#ifdef INET_CSK_DEBUG
28const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n"; 29const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
@@ -476,31 +477,37 @@ static inline u32 inet_synq_hash(const __be32 raddr, const __be16 rport,
476#if IS_ENABLED(CONFIG_IPV6) 477#if IS_ENABLED(CONFIG_IPV6)
477#define AF_INET_FAMILY(fam) ((fam) == AF_INET) 478#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
478#else 479#else
479#define AF_INET_FAMILY(fam) 1 480#define AF_INET_FAMILY(fam) true
480#endif 481#endif
481 482
482struct request_sock *inet_csk_search_req(const struct sock *sk, 483/* Note: this is temporary :
483 const __be16 rport, const __be32 raddr, 484 * req sock will no longer be in listener hash table
485*/
486struct request_sock *inet_csk_search_req(struct sock *sk,
487 const __be16 rport,
488 const __be32 raddr,
484 const __be32 laddr) 489 const __be32 laddr)
485{ 490{
486 const struct inet_connection_sock *icsk = inet_csk(sk); 491 struct inet_connection_sock *icsk = inet_csk(sk);
487 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 492 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
488 struct request_sock *req; 493 struct request_sock *req;
494 u32 hash = inet_synq_hash(raddr, rport, lopt->hash_rnd,
495 lopt->nr_table_entries);
489 496
490 for (req = lopt->syn_table[inet_synq_hash(raddr, rport, lopt->hash_rnd, 497 write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
491 lopt->nr_table_entries)]; 498 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
492 req != NULL;
493 req = req->dl_next) {
494 const struct inet_request_sock *ireq = inet_rsk(req); 499 const struct inet_request_sock *ireq = inet_rsk(req);
495 500
496 if (ireq->ir_rmt_port == rport && 501 if (ireq->ir_rmt_port == rport &&
497 ireq->ir_rmt_addr == raddr && 502 ireq->ir_rmt_addr == raddr &&
498 ireq->ir_loc_addr == laddr && 503 ireq->ir_loc_addr == laddr &&
499 AF_INET_FAMILY(req->rsk_ops->family)) { 504 AF_INET_FAMILY(req->rsk_ops->family)) {
505 atomic_inc(&req->rsk_refcnt);
500 WARN_ON(req->sk); 506 WARN_ON(req->sk);
501 break; 507 break;
502 } 508 }
503 } 509 }
510 write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
504 511
505 return req; 512 return req;
506} 513}
@@ -556,23 +563,23 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
556} 563}
557EXPORT_SYMBOL(inet_rtx_syn_ack); 564EXPORT_SYMBOL(inet_rtx_syn_ack);
558 565
559void inet_csk_reqsk_queue_prune(struct sock *parent, 566static void reqsk_timer_handler(unsigned long data)
560 const unsigned long interval,
561 const unsigned long timeout,
562 const unsigned long max_rto)
563{ 567{
564 struct inet_connection_sock *icsk = inet_csk(parent); 568 struct request_sock *req = (struct request_sock *)data;
569 struct sock *sk_listener = req->rsk_listener;
570 struct inet_connection_sock *icsk = inet_csk(sk_listener);
565 struct request_sock_queue *queue = &icsk->icsk_accept_queue; 571 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
566 struct listen_sock *lopt = queue->listen_opt; 572 struct listen_sock *lopt = queue->listen_opt;
567 int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; 573 int expire = 0, resend = 0;
568 int thresh = max_retries; 574 int max_retries, thresh;
569 unsigned long now = jiffies;
570 struct request_sock **reqp, *req;
571 int i, budget;
572 575
573 if (lopt == NULL || lopt->qlen == 0) 576 if (sk_listener->sk_state != TCP_LISTEN || !lopt) {
577 reqsk_put(req);
574 return; 578 return;
579 }
575 580
581 max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
582 thresh = max_retries;
576 /* Normally all the openreqs are young and become mature 583 /* Normally all the openreqs are young and become mature
577 * (i.e. converted to established socket) for first timeout. 584 * (i.e. converted to established socket) for first timeout.
578 * If synack was not acknowledged for 1 second, it means 585 * If synack was not acknowledged for 1 second, it means
@@ -590,71 +597,63 @@ void inet_csk_reqsk_queue_prune(struct sock *parent,
590 * embrions; and abort old ones without pity, if old 597 * embrions; and abort old ones without pity, if old
591 * ones are about to clog our table. 598 * ones are about to clog our table.
592 */ 599 */
593 if (lopt->qlen>>(lopt->max_qlen_log-1)) { 600 if (listen_sock_qlen(lopt) >> (lopt->max_qlen_log - 1)) {
594 int young = (lopt->qlen_young<<1); 601 int young = listen_sock_young(lopt) << 1;
595 602
596 while (thresh > 2) { 603 while (thresh > 2) {
597 if (lopt->qlen < young) 604 if (listen_sock_qlen(lopt) < young)
598 break; 605 break;
599 thresh--; 606 thresh--;
600 young <<= 1; 607 young <<= 1;
601 } 608 }
602 } 609 }
603
604 if (queue->rskq_defer_accept) 610 if (queue->rskq_defer_accept)
605 max_retries = queue->rskq_defer_accept; 611 max_retries = queue->rskq_defer_accept;
612 syn_ack_recalc(req, thresh, max_retries, queue->rskq_defer_accept,
613 &expire, &resend);
614 req->rsk_ops->syn_ack_timeout(sk_listener, req);
615 if (!expire &&
616 (!resend ||
617 !inet_rtx_syn_ack(sk_listener, req) ||
618 inet_rsk(req)->acked)) {
619 unsigned long timeo;
620
621 if (req->num_timeout++ == 0)
622 atomic_inc(&lopt->young_dec);
623 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
624 mod_timer_pinned(&req->rsk_timer, jiffies + timeo);
625 return;
626 }
627 inet_csk_reqsk_queue_drop(sk_listener, req);
628 reqsk_put(req);
629}
606 630
607 budget = 2 * (lopt->nr_table_entries / (timeout / interval)); 631void reqsk_queue_hash_req(struct request_sock_queue *queue,
608 i = lopt->clock_hand; 632 u32 hash, struct request_sock *req,
609 633 unsigned long timeout)
610 do { 634{
611 reqp = &lopt->syn_table[i]; 635 struct listen_sock *lopt = queue->listen_opt;
612 if (!*reqp)
613 goto next_bucket;
614 write_lock(&queue->syn_wait_lock);
615 while ((req = *reqp) != NULL) {
616 if (time_after_eq(now, req->expires)) {
617 int expire = 0, resend = 0;
618
619 syn_ack_recalc(req, thresh, max_retries,
620 queue->rskq_defer_accept,
621 &expire, &resend);
622 req->rsk_ops->syn_ack_timeout(parent, req);
623 if (!expire &&
624 (!resend ||
625 !inet_rtx_syn_ack(parent, req) ||
626 inet_rsk(req)->acked)) {
627 unsigned long timeo;
628
629 if (req->num_timeout++ == 0)
630 lopt->qlen_young--;
631 timeo = min(timeout << req->num_timeout,
632 max_rto);
633 req->expires = now + timeo;
634 reqp = &req->dl_next;
635 continue;
636 }
637 636
638 /* Drop this request */ 637 req->num_retrans = 0;
639 *reqp = req->dl_next; 638 req->num_timeout = 0;
640 reqsk_queue_removed(queue, req); 639 req->sk = NULL;
641 reqsk_put(req);
642 continue;
643 }
644 reqp = &req->dl_next;
645 }
646 write_unlock(&queue->syn_wait_lock);
647next_bucket:
648 i = (i + 1) & (lopt->nr_table_entries - 1);
649 640
650 } while (--budget > 0); 641 /* before letting lookups find us, make sure all req fields
642 * are committed to memory and refcnt initialized.
643 */
644 smp_wmb();
645 atomic_set(&req->rsk_refcnt, 2);
646 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req);
647 req->rsk_hash = hash;
651 648
652 lopt->clock_hand = i; 649 write_lock(&queue->syn_wait_lock);
650 req->dl_next = lopt->syn_table[hash];
651 lopt->syn_table[hash] = req;
652 write_unlock(&queue->syn_wait_lock);
653 653
654 if (lopt->qlen) 654 mod_timer_pinned(&req->rsk_timer, jiffies + timeout);
655 inet_csk_reset_keepalive_timer(parent, interval);
656} 655}
657EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune); 656EXPORT_SYMBOL(reqsk_queue_hash_req);
658 657
659/** 658/**
660 * inet_csk_clone_lock - clone an inet socket, and lock its clone 659 * inet_csk_clone_lock - clone an inet socket, and lock its clone
@@ -790,8 +789,6 @@ void inet_csk_listen_stop(struct sock *sk)
790 struct request_sock *acc_req; 789 struct request_sock *acc_req;
791 struct request_sock *req; 790 struct request_sock *req;
792 791
793 inet_csk_delete_keepalive_timer(sk);
794
795 /* make all the listen_opt local to us */ 792 /* make all the listen_opt local to us */
796 acc_req = reqsk_queue_yank_acceptq(queue); 793 acc_req = reqsk_queue_yank_acceptq(queue);
797 794
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 74c39c9f3e11..34073bbe2700 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -285,7 +285,7 @@ static int inet_req_diag_fill(struct sock *sk, struct sk_buff *skb,
285 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) != 285 BUILD_BUG_ON(offsetof(struct inet_request_sock, ir_cookie) !=
286 offsetof(struct sock, sk_cookie)); 286 offsetof(struct sock, sk_cookie));
287 287
288 tmo = inet_reqsk(sk)->expires - jiffies; 288 tmo = inet_reqsk(sk)->rsk_timer.expires - jiffies;
289 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0; 289 r->idiag_expires = (tmo >= 0) ? jiffies_to_msecs(tmo) : 0;
290 r->idiag_rqueue = 0; 290 r->idiag_rqueue = 0;
291 r->idiag_wqueue = 0; 291 r->idiag_wqueue = 0;
@@ -719,7 +719,7 @@ static int inet_diag_dump_reqs(struct sk_buff *skb, struct sock *sk,
719 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock); 719 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
720 720
721 lopt = icsk->icsk_accept_queue.listen_opt; 721 lopt = icsk->icsk_accept_queue.listen_opt;
722 if (!lopt || !lopt->qlen) 722 if (!lopt || !listen_sock_qlen(lopt))
723 goto out; 723 goto out;
724 724
725 if (bc) { 725 if (bc) {
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index ef01d8570358..805dc444741d 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -361,7 +361,6 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
361 goto out; 361 goto out;
362 } 362 }
363 363
364 req->expires = 0UL;
365 req->num_retrans = 0; 364 req->num_retrans = 0;
366 365
367 /* 366 /*
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 82e375a0cbcf..2eb887ec0ce3 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -240,7 +240,7 @@ static bool tcp_fastopen_queue_check(struct sock *sk)
240 struct request_sock *req1; 240 struct request_sock *req1;
241 spin_lock(&fastopenq->lock); 241 spin_lock(&fastopenq->lock);
242 req1 = fastopenq->rskq_rst_head; 242 req1 = fastopenq->rskq_rst_head;
243 if ((req1 == NULL) || time_after(req1->expires, jiffies)) { 243 if (!req1 || time_after(req1->rsk_timer.expires, jiffies)) {
244 spin_unlock(&fastopenq->lock); 244 spin_unlock(&fastopenq->lock);
245 NET_INC_STATS_BH(sock_net(sk), 245 NET_INC_STATS_BH(sock_net(sk),
246 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW); 246 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 19c3770f1e97..5554b8f33d41 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -475,6 +475,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
475 475
476 if (seq != tcp_rsk(req)->snt_isn) { 476 if (seq != tcp_rsk(req)->snt_isn) {
477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 477 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
478 reqsk_put(req);
478 goto out; 479 goto out;
479 } 480 }
480 481
@@ -486,6 +487,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
486 */ 487 */
487 inet_csk_reqsk_queue_drop(sk, req); 488 inet_csk_reqsk_queue_drop(sk, req);
488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 489 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
490 reqsk_put(req);
489 goto out; 491 goto out;
490 492
491 case TCP_SYN_SENT: 493 case TCP_SYN_SENT:
@@ -1398,8 +1400,11 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1398 struct sock *nsk; 1400 struct sock *nsk;
1399 1401
1400 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1402 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1401 if (req) 1403 if (req) {
1402 return tcp_check_req(sk, skb, req, false); 1404 nsk = tcp_check_req(sk, skb, req, false);
1405 reqsk_put(req);
1406 return nsk;
1407 }
1403 1408
1404 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr, 1409 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1405 th->source, iph->daddr, th->dest, inet_iif(skb)); 1410 th->source, iph->daddr, th->dest, inet_iif(skb));
@@ -2208,7 +2213,7 @@ static void get_openreq4(const struct request_sock *req,
2208 struct seq_file *f, int i, kuid_t uid) 2213 struct seq_file *f, int i, kuid_t uid)
2209{ 2214{
2210 const struct inet_request_sock *ireq = inet_rsk(req); 2215 const struct inet_request_sock *ireq = inet_rsk(req);
2211 long delta = req->expires - jiffies; 2216 long delta = req->rsk_timer.expires - jiffies;
2212 2217
2213 seq_printf(f, "%4d: %08X:%04X %08X:%04X" 2218 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2214 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", 2219 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 848bcab358e4..274e96fb369b 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -629,8 +629,9 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
629 &tcp_rsk(req)->last_oow_ack_time) && 629 &tcp_rsk(req)->last_oow_ack_time) &&
630 630
631 !inet_rtx_syn_ack(sk, req)) 631 !inet_rtx_syn_ack(sk, req))
632 req->expires = min(TCP_TIMEOUT_INIT << req->num_timeout, 632 mod_timer_pending(&req->rsk_timer, jiffies +
633 TCP_RTO_MAX) + jiffies; 633 min(TCP_TIMEOUT_INIT << req->num_timeout,
634 TCP_RTO_MAX));
634 return NULL; 635 return NULL;
635 } 636 }
636 637
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 15505936511d..3daa6b5d766d 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -539,16 +539,6 @@ static void tcp_write_timer(unsigned long data)
539 sock_put(sk); 539 sock_put(sk);
540} 540}
541 541
542/*
543 * Timer for listening sockets
544 */
545
546static void tcp_synack_timer(struct sock *sk)
547{
548 inet_csk_reqsk_queue_prune(sk, TCP_SYNQ_INTERVAL,
549 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
550}
551
552void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req) 542void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req)
553{ 543{
554 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS); 544 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEOUTS);
@@ -583,7 +573,7 @@ static void tcp_keepalive_timer (unsigned long data)
583 } 573 }
584 574
585 if (sk->sk_state == TCP_LISTEN) { 575 if (sk->sk_state == TCP_LISTEN) {
586 tcp_synack_timer(sk); 576 pr_err("Hmm... keepalive on a LISTEN ???\n");
587 goto out; 577 goto out;
588 } 578 }
589 579
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index b7acb9ebc4f5..2f3bbe569e8f 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -112,21 +112,20 @@ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport,
112 return c & (synq_hsize - 1); 112 return c & (synq_hsize - 1);
113} 113}
114 114
115struct request_sock *inet6_csk_search_req(const struct sock *sk, 115struct request_sock *inet6_csk_search_req(struct sock *sk,
116 const __be16 rport, 116 const __be16 rport,
117 const struct in6_addr *raddr, 117 const struct in6_addr *raddr,
118 const struct in6_addr *laddr, 118 const struct in6_addr *laddr,
119 const int iif) 119 const int iif)
120{ 120{
121 const struct inet_connection_sock *icsk = inet_csk(sk); 121 struct inet_connection_sock *icsk = inet_csk(sk);
122 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt; 122 struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
123 struct request_sock *req; 123 struct request_sock *req;
124 u32 hash = inet6_synq_hash(raddr, rport, lopt->hash_rnd,
125 lopt->nr_table_entries);
124 126
125 for (req = lopt->syn_table[inet6_synq_hash(raddr, rport, 127 write_lock(&icsk->icsk_accept_queue.syn_wait_lock);
126 lopt->hash_rnd, 128 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) {
127 lopt->nr_table_entries)];
128 req != NULL;
129 req = req->dl_next) {
130 const struct inet_request_sock *ireq = inet_rsk(req); 129 const struct inet_request_sock *ireq = inet_rsk(req);
131 130
132 if (ireq->ir_rmt_port == rport && 131 if (ireq->ir_rmt_port == rport &&
@@ -134,12 +133,14 @@ struct request_sock *inet6_csk_search_req(const struct sock *sk,
134 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) && 133 ipv6_addr_equal(&ireq->ir_v6_rmt_addr, raddr) &&
135 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) && 134 ipv6_addr_equal(&ireq->ir_v6_loc_addr, laddr) &&
136 (!ireq->ir_iif || ireq->ir_iif == iif)) { 135 (!ireq->ir_iif || ireq->ir_iif == iif)) {
136 atomic_inc(&req->rsk_refcnt);
137 WARN_ON(req->sk != NULL); 137 WARN_ON(req->sk != NULL);
138 return req; 138 break;
139 } 139 }
140 } 140 }
141 write_unlock(&icsk->icsk_accept_queue.syn_wait_lock);
141 142
142 return NULL; 143 return req;
143} 144}
144EXPORT_SYMBOL_GPL(inet6_csk_search_req); 145EXPORT_SYMBOL_GPL(inet6_csk_search_req);
145 146
diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c
index da5823e5e5a7..2819137fc87d 100644
--- a/net/ipv6/syncookies.c
+++ b/net/ipv6/syncookies.c
@@ -222,7 +222,6 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
222 222
223 ireq->ir_mark = inet_request_mark(sk, skb); 223 ireq->ir_mark = inet_request_mark(sk, skb);
224 224
225 req->expires = 0UL;
226 req->num_retrans = 0; 225 req->num_retrans = 0;
227 ireq->snd_wscale = tcp_opt.snd_wscale; 226 ireq->snd_wscale = tcp_opt.snd_wscale;
228 ireq->sack_ok = tcp_opt.sack_ok; 227 ireq->sack_ok = tcp_opt.sack_ok;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 146f123b52c9..6e3f90db038c 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -421,11 +421,13 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
421 421
422 if (seq != tcp_rsk(req)->snt_isn) { 422 if (seq != tcp_rsk(req)->snt_isn) {
423 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS); 423 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
424 reqsk_put(req);
424 goto out; 425 goto out;
425 } 426 }
426 427
427 inet_csk_reqsk_queue_drop(sk, req); 428 inet_csk_reqsk_queue_drop(sk, req);
428 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); 429 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
430 reqsk_put(req);
429 goto out; 431 goto out;
430 432
431 case TCP_SYN_SENT: 433 case TCP_SYN_SENT:
@@ -988,9 +990,11 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
988 req = inet6_csk_search_req(sk, th->source, 990 req = inet6_csk_search_req(sk, th->source,
989 &ipv6_hdr(skb)->saddr, 991 &ipv6_hdr(skb)->saddr,
990 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 992 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
991 if (req) 993 if (req) {
992 return tcp_check_req(sk, skb, req, false); 994 nsk = tcp_check_req(sk, skb, req, false);
993 995 reqsk_put(req);
996 return nsk;
997 }
994 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo, 998 nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
995 &ipv6_hdr(skb)->saddr, th->source, 999 &ipv6_hdr(skb)->saddr, th->source,
996 &ipv6_hdr(skb)->daddr, ntohs(th->dest), 1000 &ipv6_hdr(skb)->daddr, ntohs(th->dest),
@@ -1670,7 +1674,7 @@ static void tcp_v6_destroy_sock(struct sock *sk)
1670static void get_openreq6(struct seq_file *seq, 1674static void get_openreq6(struct seq_file *seq,
1671 struct request_sock *req, int i, kuid_t uid) 1675 struct request_sock *req, int i, kuid_t uid)
1672{ 1676{
1673 int ttd = req->expires - jiffies; 1677 long ttd = req->rsk_timer.expires - jiffies;
1674 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr; 1678 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1675 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr; 1679 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
1676 1680