diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-12-03 22:47:42 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-08 23:17:51 -0500 |
commit | 3cdaedae635b17ce23c738ce7d364b442310cdec (patch) | |
tree | af07cdf6c31cca8d1a094bd104efa65e1e95e270 /net/ipv4/inet_timewait_sock.c | |
parent | 9327f7053e3993c125944fdb137a0618319ef2a0 (diff) |
tcp: Fix a connect() race with timewait sockets
When we find a timewait connection in __inet_hash_connect() and reuse
it for a new connection request, we have a race window, releasing bind
list lock and reacquiring it in __inet_twsk_kill() to remove timewait
socket from list.
Another thread might find the timewait socket we already chose, leading to
list corruption and crashes.
Fix is to remove timewait socket from bind list before releasing the bind lock.
Note: This problem happens if sysctl_tcp_tw_reuse is set.
Reported-by: kapil dakhane <kdakhane@gmail.com>
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/ipv4/inet_timewait_sock.c')
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 29 |
1 files changed, 21 insertions, 8 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 0fdf45e4c90c..bf4b1e2a4305 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -29,12 +29,29 @@ int inet_twsk_unhash(struct inet_timewait_sock *tw) | |||
29 | return 1; | 29 | return 1; |
30 | } | 30 | } |
31 | 31 | ||
32 | /* | ||
33 | * unhash a timewait socket from bind hash | ||
34 | * lock must be hold by caller | ||
35 | */ | ||
36 | int inet_twsk_bind_unhash(struct inet_timewait_sock *tw, | ||
37 | struct inet_hashinfo *hashinfo) | ||
38 | { | ||
39 | struct inet_bind_bucket *tb = tw->tw_tb; | ||
40 | |||
41 | if (!tb) | ||
42 | return 0; | ||
43 | |||
44 | __hlist_del(&tw->tw_bind_node); | ||
45 | tw->tw_tb = NULL; | ||
46 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | ||
47 | return 1; | ||
48 | } | ||
49 | |||
32 | /* Must be called with locally disabled BHs. */ | 50 | /* Must be called with locally disabled BHs. */ |
33 | static void __inet_twsk_kill(struct inet_timewait_sock *tw, | 51 | static void __inet_twsk_kill(struct inet_timewait_sock *tw, |
34 | struct inet_hashinfo *hashinfo) | 52 | struct inet_hashinfo *hashinfo) |
35 | { | 53 | { |
36 | struct inet_bind_hashbucket *bhead; | 54 | struct inet_bind_hashbucket *bhead; |
37 | struct inet_bind_bucket *tb; | ||
38 | int refcnt; | 55 | int refcnt; |
39 | /* Unlink from established hashes. */ | 56 | /* Unlink from established hashes. */ |
40 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); | 57 | spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); |
@@ -46,15 +63,11 @@ static void __inet_twsk_kill(struct inet_timewait_sock *tw, | |||
46 | /* Disassociate with bind bucket. */ | 63 | /* Disassociate with bind bucket. */ |
47 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, | 64 | bhead = &hashinfo->bhash[inet_bhashfn(twsk_net(tw), tw->tw_num, |
48 | hashinfo->bhash_size)]; | 65 | hashinfo->bhash_size)]; |
66 | |||
49 | spin_lock(&bhead->lock); | 67 | spin_lock(&bhead->lock); |
50 | tb = tw->tw_tb; | 68 | refcnt += inet_twsk_bind_unhash(tw, hashinfo); |
51 | if (tb) { | ||
52 | __hlist_del(&tw->tw_bind_node); | ||
53 | tw->tw_tb = NULL; | ||
54 | inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb); | ||
55 | refcnt++; | ||
56 | } | ||
57 | spin_unlock(&bhead->lock); | 69 | spin_unlock(&bhead->lock); |
70 | |||
58 | #ifdef SOCK_REFCNT_DEBUG | 71 | #ifdef SOCK_REFCNT_DEBUG |
59 | if (atomic_read(&tw->tw_refcnt) != 1) { | 72 | if (atomic_read(&tw->tw_refcnt) != 1) { |
60 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", | 73 | printk(KERN_DEBUG "%s timewait_sock %p refcnt=%d\n", |