aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/inet_hashtables.c
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-12-02 17:31:19 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-03 19:17:43 -0500
commit13475a30b66cdb9250a34052c19ac98847373030 (patch)
tree5f28f671092c2948726fdde92e20c3371cfceb77 /net/ipv4/inet_hashtables.c
parentff33a6e2ab97f4cde484cdf1a41778af6d6b7cff (diff)
tcp: connect() race with timewait reuse
Its currently possible that several threads issuing a connect() find the same timewait socket and try to reuse it, leading to list corruptions. Condition for bug is that these threads bound their socket on same address/port of to-be-find timewait socket, and connected to same target. (SO_REUSEADDR needed) To fix this problem, we could unhash timewait socket while holding ehash lock, to make sure lookups/changes will be serialized. Only first thread finds the timewait socket, other ones find the established socket and return an EADDRNOTAVAIL error. This second version takes into account Evgeniy's review and makes sure inet_twsk_put() is called outside of locked sections. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_hashtables.c')
-rw-r--r--net/ipv4/inet_hashtables.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 94ef51aa5bc9..21e5e32d8c60 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -286,6 +286,7 @@ static int __inet_check_established(struct inet_timewait_death_row *death_row,
286 struct sock *sk2; 286 struct sock *sk2;
287 const struct hlist_nulls_node *node; 287 const struct hlist_nulls_node *node;
288 struct inet_timewait_sock *tw; 288 struct inet_timewait_sock *tw;
289 int twrefcnt = 0;
289 290
290 spin_lock(lock); 291 spin_lock(lock);
291 292
@@ -318,20 +319,23 @@ unique:
318 sk->sk_hash = hash; 319 sk->sk_hash = hash;
319 WARN_ON(!sk_unhashed(sk)); 320 WARN_ON(!sk_unhashed(sk));
320 __sk_nulls_add_node_rcu(sk, &head->chain); 321 __sk_nulls_add_node_rcu(sk, &head->chain);
322 if (tw) {
323 twrefcnt = inet_twsk_unhash(tw);
324 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
325 }
321 spin_unlock(lock); 326 spin_unlock(lock);
327 if (twrefcnt)
328 inet_twsk_put(tw);
322 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 329 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
323 330
324 if (twp) { 331 if (twp) {
325 *twp = tw; 332 *twp = tw;
326 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
327 } else if (tw) { 333 } else if (tw) {
328 /* Silly. Should hash-dance instead... */ 334 /* Silly. Should hash-dance instead... */
329 inet_twsk_deschedule(tw, death_row); 335 inet_twsk_deschedule(tw, death_row);
330 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
331 336
332 inet_twsk_put(tw); 337 inet_twsk_put(tw);
333 } 338 }
334
335 return 0; 339 return 0;
336 340
337not_unique: 341not_unique: