diff options
author | Eric Dumazet <eric.dumazet@gmail.com> | 2009-12-02 19:49:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-12-03 19:17:44 -0500 |
commit | 47e1c323069bcef0acb8a2b48921688573f5ca63 (patch) | |
tree | bebfe86ae36055adae2100200e87727d21a7b762 /net | |
parent | 13475a30b66cdb9250a34052c19ac98847373030 (diff) |
tcp: fix a timewait refcnt race
After TCP RCU conversion, tw->tw_refcnt should not be set to 1 in
inet_twsk_alloc(). It allows a RCU reader to get this timewait socket,
while we not yet stabilized it.
Only choice we have is to set tw_refcnt to 0 in inet_twsk_alloc(),
then atomic_add() it later, once everything is done.
Location of this atomic_add() is tricky, because we dont want another
writer to find this timewait in ehash, while tw_refcnt is still zero !
Thanks to Kapil Dakhane tests and reports.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r-- | net/ipv4/inet_timewait_sock.c | 19 |
1 files changed, 16 insertions, 3 deletions
diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index 11a107a5af4..0fdf45e4c90 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c | |||
@@ -109,7 +109,6 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
109 | tw->tw_tb = icsk->icsk_bind_hash; | 109 | tw->tw_tb = icsk->icsk_bind_hash; |
110 | WARN_ON(!icsk->icsk_bind_hash); | 110 | WARN_ON(!icsk->icsk_bind_hash); |
111 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); | 111 | inet_twsk_add_bind_node(tw, &tw->tw_tb->owners); |
112 | atomic_inc(&tw->tw_refcnt); | ||
113 | spin_unlock(&bhead->lock); | 112 | spin_unlock(&bhead->lock); |
114 | 113 | ||
115 | spin_lock(lock); | 114 | spin_lock(lock); |
@@ -119,13 +118,22 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, | |||
119 | * Should be done before removing sk from established chain | 118 | * Should be done before removing sk from established chain |
120 | * because readers are lockless and search established first. | 119 | * because readers are lockless and search established first. |
121 | */ | 120 | */ |
122 | atomic_inc(&tw->tw_refcnt); | ||
123 | inet_twsk_add_node_rcu(tw, &ehead->twchain); | 121 | inet_twsk_add_node_rcu(tw, &ehead->twchain); |
124 | 122 | ||
125 | /* Step 3: Remove SK from established hash. */ | 123 | /* Step 3: Remove SK from established hash. */ |
126 | if (__sk_nulls_del_node_init_rcu(sk)) | 124 | if (__sk_nulls_del_node_init_rcu(sk)) |
127 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); | 125 | sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); |
128 | 126 | ||
127 | /* | ||
128 | * Notes : | ||
129 | * - We initially set tw_refcnt to 0 in inet_twsk_alloc() | ||
130 | * - We add one reference for the bhash link | ||
131 | * - We add one reference for the ehash link | ||
132 | * - We want this refcnt update done before allowing other | ||
133 | * threads to find this tw in ehash chain. | ||
134 | */ | ||
135 | atomic_add(1 + 1 + 1, &tw->tw_refcnt); | ||
136 | |||
129 | spin_unlock(lock); | 137 | spin_unlock(lock); |
130 | } | 138 | } |
131 | 139 | ||
@@ -157,7 +165,12 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, const int stat | |||
157 | tw->tw_transparent = inet->transparent; | 165 | tw->tw_transparent = inet->transparent; |
158 | tw->tw_prot = sk->sk_prot_creator; | 166 | tw->tw_prot = sk->sk_prot_creator; |
159 | twsk_net_set(tw, hold_net(sock_net(sk))); | 167 | twsk_net_set(tw, hold_net(sock_net(sk))); |
160 | atomic_set(&tw->tw_refcnt, 1); | 168 | /* |
169 | * Because we use RCU lookups, we should not set tw_refcnt | ||
170 | * to a non null value before everything is setup for this | ||
171 | * timewait socket. | ||
172 | */ | ||
173 | atomic_set(&tw->tw_refcnt, 0); | ||
161 | inet_twsk_dead_node_init(tw); | 174 | inet_twsk_dead_node_init(tw); |
162 | __module_get(tw->tw_prot->owner); | 175 | __module_get(tw->tw_prot->owner); |
163 | } | 176 | } |