aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2007-05-03 06:17:14 -0400
committerDavid S. Miller <davem@davemloft.net>2007-05-03 06:17:14 -0400
commit3f660d66dfbc13ea4b61d3865851b348444c24b4 (patch)
tree3e2e67f1589d3568423673651d682fd14322b93b /net/netlink/af_netlink.c
parentbe52178b9f73969b583c6a781ca613f4e601221a (diff)
[NETLINK]: Kill CB only when socket is unused
Since we can still receive packets until all references to the socket are gone, we don't need to kill the CB until that happens. This also aligns ourselves with the receive queue purging which happens at that point. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c29
1 files changed, 15 insertions, 14 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 42d2fb94eff1..7fc6b4da4f02 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -140,6 +140,15 @@ static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
140 140
141static void netlink_sock_destruct(struct sock *sk) 141static void netlink_sock_destruct(struct sock *sk)
142{ 142{
143 struct netlink_sock *nlk = nlk_sk(sk);
144
145 BUG_ON(mutex_is_locked(nlk_sk(sk)->cb_mutex));
146 if (nlk->cb) {
147 if (nlk->cb->done)
148 nlk->cb->done(nlk->cb);
149 netlink_destroy_callback(nlk->cb);
150 }
151
143 skb_queue_purge(&sk->sk_receive_queue); 152 skb_queue_purge(&sk->sk_receive_queue);
144 153
145 if (!sock_flag(sk, SOCK_DEAD)) { 154 if (!sock_flag(sk, SOCK_DEAD)) {
@@ -148,7 +157,6 @@ static void netlink_sock_destruct(struct sock *sk)
148 } 157 }
149 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); 158 BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc));
150 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); 159 BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc));
151 BUG_TRAP(!nlk_sk(sk)->cb);
152 BUG_TRAP(!nlk_sk(sk)->groups); 160 BUG_TRAP(!nlk_sk(sk)->groups);
153} 161}
154 162
@@ -456,17 +464,10 @@ static int netlink_release(struct socket *sock)
456 sock_orphan(sk); 464 sock_orphan(sk);
457 nlk = nlk_sk(sk); 465 nlk = nlk_sk(sk);
458 466
459 mutex_lock(nlk->cb_mutex); 467 /*
460 if (nlk->cb) { 468 * OK. Socket is unlinked, any packets that arrive now
461 if (nlk->cb->done) 469 * will be purged.
462 nlk->cb->done(nlk->cb); 470 */
463 netlink_destroy_callback(nlk->cb);
464 nlk->cb = NULL;
465 }
466 mutex_unlock(nlk->cb_mutex);
467
468 /* OK. Socket is unlinked, and, therefore,
469 no new packets will arrive */
470 471
471 sock->sk = NULL; 472 sock->sk = NULL;
472 wake_up_interruptible_all(&nlk->wait); 473 wake_up_interruptible_all(&nlk->wait);
@@ -1426,9 +1427,9 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1426 return -ECONNREFUSED; 1427 return -ECONNREFUSED;
1427 } 1428 }
1428 nlk = nlk_sk(sk); 1429 nlk = nlk_sk(sk);
1429 /* A dump or destruction is in progress... */ 1430 /* A dump is in progress... */
1430 mutex_lock(nlk->cb_mutex); 1431 mutex_lock(nlk->cb_mutex);
1431 if (nlk->cb || sock_flag(sk, SOCK_DEAD)) { 1432 if (nlk->cb) {
1432 mutex_unlock(nlk->cb_mutex); 1433 mutex_unlock(nlk->cb_mutex);
1433 netlink_destroy_callback(cb); 1434 netlink_destroy_callback(cb);
1434 sock_put(sk); 1435 sock_put(sk);