aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv6
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-12-02 17:31:19 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-03 19:17:43 -0500
commit13475a30b66cdb9250a34052c19ac98847373030 (patch)
tree5f28f671092c2948726fdde92e20c3371cfceb77 /net/ipv6
parentff33a6e2ab97f4cde484cdf1a41778af6d6b7cff (diff)
tcp: connect() race with timewait reuse
Its currently possible that several threads issuing a connect() find the same timewait socket and try to reuse it, leading to list corruptions. Condition for bug is that these threads bound their socket on same address/port of to-be-find timewait socket, and connected to same target. (SO_REUSEADDR needed) To fix this problem, we could unhash timewait socket while holding ehash lock, to make sure lookups/changes will be serialized. Only first thread finds the timewait socket, other ones find the established socket and return an EADDRNOTAVAIL error. This second version takes into account Evgeniy's review and makes sure inet_twsk_put() is called outside of locked sections. Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv6')
-rw-r--r--net/ipv6/inet6_hashtables.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 00c6a3e6cddf..c813e294ec0c 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -223,6 +223,7 @@ static int __inet6_check_established(struct inet_timewait_death_row *death_row,
223 struct sock *sk2; 223 struct sock *sk2;
224 const struct hlist_nulls_node *node; 224 const struct hlist_nulls_node *node;
225 struct inet_timewait_sock *tw; 225 struct inet_timewait_sock *tw;
226 int twrefcnt = 0;
226 227
227 spin_lock(lock); 228 spin_lock(lock);
228 229
@@ -250,19 +251,23 @@ unique:
250 * in hash table socket with a funny identity. */ 251 * in hash table socket with a funny identity. */
251 inet->inet_num = lport; 252 inet->inet_num = lport;
252 inet->inet_sport = htons(lport); 253 inet->inet_sport = htons(lport);
254 sk->sk_hash = hash;
253 WARN_ON(!sk_unhashed(sk)); 255 WARN_ON(!sk_unhashed(sk));
254 __sk_nulls_add_node_rcu(sk, &head->chain); 256 __sk_nulls_add_node_rcu(sk, &head->chain);
255 sk->sk_hash = hash; 257 if (tw) {
258 twrefcnt = inet_twsk_unhash(tw);
259 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
260 }
256 spin_unlock(lock); 261 spin_unlock(lock);
262 if (twrefcnt)
263 inet_twsk_put(tw);
257 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1); 264 sock_prot_inuse_add(sock_net(sk), sk->sk_prot, 1);
258 265
259 if (twp != NULL) { 266 if (twp) {
260 *twp = tw; 267 *twp = tw;
261 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED); 268 } else if (tw) {
262 } else if (tw != NULL) {
263 /* Silly. Should hash-dance instead... */ 269 /* Silly. Should hash-dance instead... */
264 inet_twsk_deschedule(tw, death_row); 270 inet_twsk_deschedule(tw, death_row);
265 NET_INC_STATS_BH(net, LINUX_MIB_TIMEWAITRECYCLED);
266 271
267 inet_twsk_put(tw); 272 inet_twsk_put(tw);
268 } 273 }