aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4
diff options
context:
space:
mode:
authorEric Dumazet <eric.dumazet@gmail.com>2009-12-03 22:47:42 -0500
committerDavid S. Miller <davem@davemloft.net>2009-12-08 23:17:51 -0500
commit3cdaedae635b17ce23c738ce7d364b442310cdec (patch)
treeaf07cdf6c31cca8d1a094bd104efa65e1e95e270 /net/ipv4
parent9327f7053e3993c125944fdb137a0618319ef2a0 (diff)
tcp: Fix a connect() race with timewait sockets
When we find a timewait connection in __inet_hash_connect() and reuse it for a new connection request, we have a race window, releasing bind list lock and reacquiring it in __inet_twsk_kill() to remove timewait socket from list. Another thread might find the timewait socket we already chose, leading to list corruption and crashes. Fix is to remove timewait socket from bind list before releasing the bind lock. Note: This problem happens if sysctl_tcp_tw_reuse is set. Reported-by: kapil dakhane <kdakhane@gmail.com> Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/inet_timewait_sock.c29
2 files changed, 23 insertions, 8 deletions
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index c4201b7ece38..2b79377b468d 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -502,6 +502,8 @@ ok:
502 inet_sk(sk)->inet_sport = htons(port); 502 inet_sk(sk)->inet_sport = htons(port);
503 twrefcnt += hash(sk, tw); 503 twrefcnt += hash(sk, tw);
504 } 504 }
505 if (tw)
506 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
505 spin_unlock(&head->lock); 507 spin_unlock(&head->lock);
506 508
507 if (tw) { 509 if (tw) {
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c
index 0fdf45e4c90c..bf4b1e2a4305 100644
--- a/net/ipv4/inet_timewait_sock.c
+++ b/net/ipv4/inet_timewait_sock.c
@@ -29,12 +29,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw)
29 return 1; 29 return 1;
30} 30}
31 31
32/*
33 * unhash a timewait socket from bind hash
34 * lock must be hold by caller
35 */
36int inet_twsk_bind_unhash(struct inet_timewait_sock *tw,
37 struct inet_hashinfo *hashinfo)
38{
39 struct inet_bind_bucket *tb = tw->tw_tb;
40
41 if (!tb)
42 return 0;
43
44 __hlist_del(&tw->tw_bind_node);
45 tw->tw_tb = NULL;
46 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
47 return 1;
48}
49
32/* Must be called with locally disabled BHs. */ 50/* Must be called with locally disabled BHs. */
33static void __inet_twsk_kill(struct inet_timewait_sock *tw, 51static void __inet_twsk_kill(struct inet_timewait_sock *tw,
34 struct inet_hashinfo *hashinfo) 52 struct inet_hashinfo *hashinfo)
35{ 53{
36 struct inet_bind_hashbucket *bhead; 54 struct inet_bind_hashbucket *bhead;
37 struct inet_bind_bucket *tb;
38 int refcnt; 55 int refcnt;
39 /* Unlink from established hashes. */ 56 /* Unlink from established hashes. */
40 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); 57 spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash);
@@ -46,15 +63,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw,
46 /* Disassociate with bind bucket. */ 63 /* Disassociate with bind bucket. */
47 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, 64 bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num,
48 hashinfo->bhash_size)]; 65 hashinfo->bhash_size)];
66
49 spin_lock(&bhead->lock); 67 spin_lock(&bhead->lock);
50 tb = tw->tw_tb; 68 refcnt += inet_twsk_bind_unhash(tw, hashinfo);
51 if (tb) {
52 __hlist_del(&tw->tw_bind_node);
53 tw->tw_tb = NULL;
54 inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
55 refcnt++;
56 }
57 spin_unlock(&bhead->lock); 69 spin_unlock(&bhead->lock);
70
58#ifdef SOCK_REFCNT_DEBUG 71#ifdef SOCK_REFCNT_DEBUG
59 if (atomic_read(&tw->tw_refcnt) != 1) { 72 if (atomic_read(&tw->tw_refcnt) != 1) {
60 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", 73 printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n",