aboutsummaryrefslogtreecommitdiffstats
path: root/net/ipv4/ip_sockglue.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/ipv4/ip_sockglue.c')
-rw-r--r--net/ipv4/ip_sockglue.c19
1 files changed, 15 insertions, 4 deletions
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 08b9519a24f..47fff528ff3 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -241,9 +241,13 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc)
241struct ip_ra_chain *ip_ra_chain; 241struct ip_ra_chain *ip_ra_chain;
242static DEFINE_SPINLOCK(ip_ra_lock); 242static DEFINE_SPINLOCK(ip_ra_lock);
243 243
244static void ip_ra_free_rcu(struct rcu_head *head) 244
245static void ip_ra_destroy_rcu(struct rcu_head *head)
245{ 246{
246 kfree(container_of(head, struct ip_ra_chain, rcu)); 247 struct ip_ra_chain *ra = container_of(head, struct ip_ra_chain, rcu);
248
249 sock_put(ra->saved_sk);
250 kfree(ra);
247} 251}
248 252
249int ip_ra_control(struct sock *sk, unsigned char on, 253int ip_ra_control(struct sock *sk, unsigned char on,
@@ -264,13 +268,20 @@ int ip_ra_control(struct sock *sk, unsigned char on,
264 kfree(new_ra); 268 kfree(new_ra);
265 return -EADDRINUSE; 269 return -EADDRINUSE;
266 } 270 }
271 /* dont let ip_call_ra_chain() use sk again */
272 ra->sk = NULL;
267 rcu_assign_pointer(*rap, ra->next); 273 rcu_assign_pointer(*rap, ra->next);
268 spin_unlock_bh(&ip_ra_lock); 274 spin_unlock_bh(&ip_ra_lock);
269 275
270 if (ra->destructor) 276 if (ra->destructor)
271 ra->destructor(sk); 277 ra->destructor(sk);
272 sock_put(sk); 278 /*
273 call_rcu(&ra->rcu, ip_ra_free_rcu); 279 * Delay sock_put(sk) and kfree(ra) after one rcu grace
280 * period. This guarantee ip_call_ra_chain() dont need
281 * to mess with socket refcounts.
282 */
283 ra->saved_sk = sk;
284 call_rcu(&ra->rcu, ip_ra_destroy_rcu);
274 return 0; 285 return 0;
275 } 286 }
276 } 287 }