aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
authorDaniel Borkmann <dborkman@redhat.com>2013-06-21 13:38:07 -0400
committerDavid S. Miller <davem@davemloft.net>2013-06-24 19:39:05 -0400
commitbcbde0d449eda7afa8f63280b165c8300dbd00e2 (patch)
treedbe6689bfee850c54006291e939718c685de1158 /net/netlink/af_netlink.c
parent77e2af0312b12dccd5043a7cc9cd49ab6a212996 (diff)
net: netlink: virtual tap device management
Similarly to the networking receive path with ptype_all taps, we add the possibility to register netdevices that are for ARPHRD_NETLINK to the netlink subsystem, so that those can be used for netlink analyzers resp. debuggers. We do not offer a direct callback function as out-of-tree modules could do crap with it. Instead, a netdevice must be registered properly and only receives a clone, managed by the netlink layer. Symbols are exported as GPL-only. Signed-off-by: Daniel Borkmann <dborkman@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c107
1 files changed, 107 insertions, 0 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 275d901d7e46..6967fbcca6c5 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -57,6 +57,7 @@
57#include <linux/audit.h> 57#include <linux/audit.h>
58#include <linux/mutex.h> 58#include <linux/mutex.h>
59#include <linux/vmalloc.h> 59#include <linux/vmalloc.h>
60#include <linux/if_arp.h>
60#include <asm/cacheflush.h> 61#include <asm/cacheflush.h>
61 62
62#include <net/net_namespace.h> 63#include <net/net_namespace.h>
@@ -101,6 +102,9 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
101 102
102static ATOMIC_NOTIFIER_HEAD(netlink_chain); 103static ATOMIC_NOTIFIER_HEAD(netlink_chain);
103 104
105static DEFINE_SPINLOCK(netlink_tap_lock);
106static struct list_head netlink_tap_all __read_mostly;
107
104static inline u32 netlink_group_mask(u32 group) 108static inline u32 netlink_group_mask(u32 group)
105{ 109{
106 return group ? 1 << (group - 1) : 0; 110 return group ? 1 << (group - 1) : 0;
@@ -111,6 +115,100 @@ static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u
111 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask]; 115 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
112} 116}
113 117
118int netlink_add_tap(struct netlink_tap *nt)
119{
120 if (unlikely(nt->dev->type != ARPHRD_NETLINK))
121 return -EINVAL;
122
123 spin_lock(&netlink_tap_lock);
124 list_add_rcu(&nt->list, &netlink_tap_all);
125 spin_unlock(&netlink_tap_lock);
126
127 if (nt->module)
128 __module_get(nt->module);
129
130 return 0;
131}
132EXPORT_SYMBOL_GPL(netlink_add_tap);
133
134int __netlink_remove_tap(struct netlink_tap *nt)
135{
136 bool found = false;
137 struct netlink_tap *tmp;
138
139 spin_lock(&netlink_tap_lock);
140
141 list_for_each_entry(tmp, &netlink_tap_all, list) {
142 if (nt == tmp) {
143 list_del_rcu(&nt->list);
144 found = true;
145 goto out;
146 }
147 }
148
149 pr_warn("__netlink_remove_tap: %p not found\n", nt);
150out:
151 spin_unlock(&netlink_tap_lock);
152
153 if (found && nt->module)
154 module_put(nt->module);
155
156 return found ? 0 : -ENODEV;
157}
158EXPORT_SYMBOL_GPL(__netlink_remove_tap);
159
160int netlink_remove_tap(struct netlink_tap *nt)
161{
162 int ret;
163
164 ret = __netlink_remove_tap(nt);
165 synchronize_net();
166
167 return ret;
168}
169EXPORT_SYMBOL_GPL(netlink_remove_tap);
170
171static int __netlink_deliver_tap_skb(struct sk_buff *skb,
172 struct net_device *dev)
173{
174 struct sk_buff *nskb;
175 int ret = -ENOMEM;
176
177 dev_hold(dev);
178 nskb = skb_clone(skb, GFP_ATOMIC);
179 if (nskb) {
180 nskb->dev = dev;
181 ret = dev_queue_xmit(nskb);
182 if (unlikely(ret > 0))
183 ret = net_xmit_errno(ret);
184 }
185
186 dev_put(dev);
187 return ret;
188}
189
190static void __netlink_deliver_tap(struct sk_buff *skb)
191{
192 int ret;
193 struct netlink_tap *tmp;
194
195 list_for_each_entry_rcu(tmp, &netlink_tap_all, list) {
196 ret = __netlink_deliver_tap_skb(skb, tmp->dev);
197 if (unlikely(ret))
198 break;
199 }
200}
201
202static void netlink_deliver_tap(struct sk_buff *skb)
203{
204 rcu_read_lock();
205
206 if (unlikely(!list_empty(&netlink_tap_all)))
207 __netlink_deliver_tap(skb);
208
209 rcu_read_unlock();
210}
211
114static void netlink_overrun(struct sock *sk) 212static void netlink_overrun(struct sock *sk)
115{ 213{
116 struct netlink_sock *nlk = nlk_sk(sk); 214 struct netlink_sock *nlk = nlk_sk(sk);
@@ -1518,6 +1616,8 @@ static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb)
1518{ 1616{
1519 int len = skb->len; 1617 int len = skb->len;
1520 1618
1619 netlink_deliver_tap(skb);
1620
1521#ifdef CONFIG_NETLINK_MMAP 1621#ifdef CONFIG_NETLINK_MMAP
1522 if (netlink_skb_is_mmaped(skb)) 1622 if (netlink_skb_is_mmaped(skb))
1523 netlink_queue_mmaped_skb(sk, skb); 1623 netlink_queue_mmaped_skb(sk, skb);
@@ -1578,6 +1678,11 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
1578 1678
1579 ret = -ECONNREFUSED; 1679 ret = -ECONNREFUSED;
1580 if (nlk->netlink_rcv != NULL) { 1680 if (nlk->netlink_rcv != NULL) {
1681 /* We could do a netlink_deliver_tap(skb) here as well
1682 * but since this is intended for the kernel only, we
1683 * should rather let it stay under the hood.
1684 */
1685
1581 ret = skb->len; 1686 ret = skb->len;
1582 netlink_skb_set_owner_r(skb, sk); 1687 netlink_skb_set_owner_r(skb, sk);
1583 NETLINK_CB(skb).sk = ssk; 1688 NETLINK_CB(skb).sk = ssk;
@@ -2975,6 +3080,8 @@ static int __init netlink_proto_init(void)
2975 nl_table[i].compare = netlink_compare; 3080 nl_table[i].compare = netlink_compare;
2976 } 3081 }
2977 3082
3083 INIT_LIST_HEAD(&netlink_tap_all);
3084
2978 netlink_add_usersock_entry(); 3085 netlink_add_usersock_entry();
2979 3086
2980 sock_register(&netlink_family_ops); 3087 sock_register(&netlink_family_ops);