aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink
diff options
context:
space:
mode:
authorPatrick McHardy <kaber@trash.net>2013-04-17 02:47:00 -0400
committerDavid S. Miller <davem@davemloft.net>2013-04-19 14:57:57 -0400
commitcf0a018ac669955c10e4fca24fa55dde58434e9a (patch)
treeb2c087fe536127cf30639a773a8da78452994041 /net/netlink
parent1298ca4671acb10310baa550ed044c553e3a3387 (diff)
netlink: add netlink_skb_set_owner_r()
For mmap'ed I/O a netlink specific skb destructor needs to be invoked after the final kfree_skb() to clean up state. This doesn't work currently since the skb's ownership is transfered to the receiving socket using skb_set_owner_r(), which orphans the skb, thereby invoking the destructor prematurely. Since netlink doesn't account skbs to the originating socket, there's no need to orphan the skb. Add a netlink specific skb_set_owner_r() variant that does not orphan the skb and use a netlink specific destructor to call sock_rfree(). Signed-off-by: Patrick McHardy <kaber@trash.net> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink')
-rw-r--r--net/netlink/af_netlink.c20
1 files changed, 17 insertions, 3 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 26779c24b1d4..58b9025978fa 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -119,6 +119,20 @@ static void netlink_consume_callback(struct netlink_callback *cb)
119 kfree(cb); 119 kfree(cb);
120} 120}
121 121
122static void netlink_skb_destructor(struct sk_buff *skb)
123{
124 sock_rfree(skb);
125}
126
127static void netlink_skb_set_owner_r(struct sk_buff *skb, struct sock *sk)
128{
129 WARN_ON(skb->sk != NULL);
130 skb->sk = sk;
131 skb->destructor = netlink_skb_destructor;
132 atomic_add(skb->truesize, &sk->sk_rmem_alloc);
133 sk_mem_charge(sk, skb->truesize);
134}
135
122static void netlink_sock_destruct(struct sock *sk) 136static void netlink_sock_destruct(struct sock *sk)
123{ 137{
124 struct netlink_sock *nlk = nlk_sk(sk); 138 struct netlink_sock *nlk = nlk_sk(sk);
@@ -820,7 +834,7 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
820 } 834 }
821 return 1; 835 return 1;
822 } 836 }
823 skb_set_owner_r(skb, sk); 837 netlink_skb_set_owner_r(skb, sk);
824 return 0; 838 return 0;
825} 839}
826 840
@@ -890,7 +904,7 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
890 ret = -ECONNREFUSED; 904 ret = -ECONNREFUSED;
891 if (nlk->netlink_rcv != NULL) { 905 if (nlk->netlink_rcv != NULL) {
892 ret = skb->len; 906 ret = skb->len;
893 skb_set_owner_r(skb, sk); 907 netlink_skb_set_owner_r(skb, sk);
894 NETLINK_CB(skb).sk = ssk; 908 NETLINK_CB(skb).sk = ssk;
895 nlk->netlink_rcv(skb); 909 nlk->netlink_rcv(skb);
896 consume_skb(skb); 910 consume_skb(skb);
@@ -962,7 +976,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
962 976
963 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 977 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
964 !test_bit(NETLINK_CONGESTED, &nlk->state)) { 978 !test_bit(NETLINK_CONGESTED, &nlk->state)) {
965 skb_set_owner_r(skb, sk); 979 netlink_skb_set_owner_r(skb, sk);
966 __netlink_sendskb(sk, skb); 980 __netlink_sendskb(sk, skb);
967 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 981 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1);
968 } 982 }