aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorHerbert Xu <herbert@gondor.apana.org.au>2016-12-05 02:28:21 -0500
committerDavid S. Miller <davem@davemloft.net>2016-12-05 19:43:42 -0500
commited5d7788a934a4b6d6d025e948ed4da496b4f12e (patch)
tree9f10caf66c776a0ea3d55504e647897408105996 /net
parentffe3bb85c19e1dbf96cc13aad823ae0a8855d066 (diff)
netlink: Do not schedule work from sk_destruct
It is wrong to schedule a work from sk_destruct using the socket as the memory reserve because the socket will be freed immediately after the return from sk_destruct. Instead we should do the deferral prior to sk_free. This patch does just that. Fixes: 707693c8a498 ("netlink: Call cb->done from a worker thread") Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Tested-by: Andrey Konovalov <andreyknvl@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
-rw-r--r--net/netlink/af_netlink.c32
1 files changed, 15 insertions, 17 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 602e5ebe9db3..246f29d365c0 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -322,11 +322,13 @@ static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
322 sk_mem_charge(sk, skb->truesize); 322 sk_mem_charge(sk, skb->truesize);
323} 323}
324 324
325static void __netlink_sock_destruct(struct sock *sk) 325static void netlink_sock_destruct(struct sock *sk)
326{ 326{
327 struct netlink_sock *nlk = nlk_sk(sk); 327 struct netlink_sock *nlk = nlk_sk(sk);
328 328
329 if (nlk->cb_running) { 329 if (nlk->cb_running) {
330 if (nlk->cb.done)
331 nlk->cb.done(&nlk->cb);
330 module_put(nlk->cb.module); 332 module_put(nlk->cb.module);
331 kfree_skb(nlk->cb.skb); 333 kfree_skb(nlk->cb.skb);
332 } 334 }
@@ -348,21 +350,7 @@ static void netlink_sock_destruct_work(struct work_struct *work)
348 struct netlink_sock *nlk = container_of(work, struct netlink_sock, 350 struct netlink_sock *nlk = container_of(work, struct netlink_sock,
349 work); 351 work);
350 352
351 nlk->cb.done(&nlk->cb); 353 sk_free(&nlk->sk);
352 __netlink_sock_destruct(&nlk->sk);
353}
354
355static void netlink_sock_destruct(struct sock *sk)
356{
357 struct netlink_sock *nlk = nlk_sk(sk);
358
359 if (nlk->cb_running && nlk->cb.done) {
360 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
361 schedule_work(&nlk->work);
362 return;
363 }
364
365 __netlink_sock_destruct(sk);
366} 354}
367 355
368/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on 356/* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on
@@ -667,8 +655,18 @@ out_module:
667static void deferred_put_nlk_sk(struct rcu_head *head) 655static void deferred_put_nlk_sk(struct rcu_head *head)
668{ 656{
669 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu); 657 struct netlink_sock *nlk = container_of(head, struct netlink_sock, rcu);
658 struct sock *sk = &nlk->sk;
659
660 if (!atomic_dec_and_test(&sk->sk_refcnt))
661 return;
662
663 if (nlk->cb_running && nlk->cb.done) {
664 INIT_WORK(&nlk->work, netlink_sock_destruct_work);
665 schedule_work(&nlk->work);
666 return;
667 }
670 668
671 sock_put(&nlk->sk); 669 sk_free(sk);
672} 670}
673 671
674static int netlink_release(struct socket *sock) 672static int netlink_release(struct socket *sock)