diff options
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/Makefile | 5 | ||||
-rw-r--r-- | net/netlink/af_netlink.c | 1454 |
2 files changed, 1459 insertions, 0 deletions
diff --git a/net/netlink/Makefile b/net/netlink/Makefile new file mode 100644 index 000000000000..39d9c2dcd03c --- /dev/null +++ b/net/netlink/Makefile | |||
@@ -0,0 +1,5 @@ | |||
1 | # | ||
2 | # Makefile for the netlink driver. | ||
3 | # | ||
4 | |||
5 | obj-y := af_netlink.o | ||
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c new file mode 100644 index 000000000000..1d5905c90cd4 --- /dev/null +++ b/net/netlink/af_netlink.c | |||
@@ -0,0 +1,1454 @@ | |||
1 | /* | ||
2 | * NETLINK Kernel-user communication protocol. | ||
3 | * | ||
4 | * Authors: Alan Cox <alan@redhat.com> | ||
5 | * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | * Tue Jun 26 14:36:48 MEST 2001 Herbert "herp" Rosmanith | ||
13 | * added netlink_proto_exit | ||
14 | * Tue Jan 22 18:32:44 BRST 2002 Arnaldo C. de Melo <acme@conectiva.com.br> | ||
15 | * use nlk_sk, as sk->protinfo is on a diet 8) | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/module.h> | ||
21 | |||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/major.h> | ||
25 | #include <linux/signal.h> | ||
26 | #include <linux/sched.h> | ||
27 | #include <linux/errno.h> | ||
28 | #include <linux/string.h> | ||
29 | #include <linux/stat.h> | ||
30 | #include <linux/socket.h> | ||
31 | #include <linux/un.h> | ||
32 | #include <linux/fcntl.h> | ||
33 | #include <linux/termios.h> | ||
34 | #include <linux/sockios.h> | ||
35 | #include <linux/net.h> | ||
36 | #include <linux/fs.h> | ||
37 | #include <linux/slab.h> | ||
38 | #include <asm/uaccess.h> | ||
39 | #include <linux/skbuff.h> | ||
40 | #include <linux/netdevice.h> | ||
41 | #include <linux/rtnetlink.h> | ||
42 | #include <linux/proc_fs.h> | ||
43 | #include <linux/seq_file.h> | ||
44 | #include <linux/smp_lock.h> | ||
45 | #include <linux/notifier.h> | ||
46 | #include <linux/security.h> | ||
47 | #include <linux/jhash.h> | ||
48 | #include <linux/jiffies.h> | ||
49 | #include <linux/random.h> | ||
50 | #include <linux/bitops.h> | ||
51 | #include <linux/mm.h> | ||
52 | #include <linux/types.h> | ||
53 | #include <net/sock.h> | ||
54 | #include <net/scm.h> | ||
55 | |||
56 | #define Nprintk(a...) | ||
57 | |||
58 | struct netlink_sock { | ||
59 | /* struct sock has to be the first member of netlink_sock */ | ||
60 | struct sock sk; | ||
61 | u32 pid; | ||
62 | unsigned int groups; | ||
63 | u32 dst_pid; | ||
64 | unsigned int dst_groups; | ||
65 | unsigned long state; | ||
66 | wait_queue_head_t wait; | ||
67 | struct netlink_callback *cb; | ||
68 | spinlock_t cb_lock; | ||
69 | void (*data_ready)(struct sock *sk, int bytes); | ||
70 | }; | ||
71 | |||
72 | static inline struct netlink_sock *nlk_sk(struct sock *sk) | ||
73 | { | ||
74 | return (struct netlink_sock *)sk; | ||
75 | } | ||
76 | |||
77 | struct nl_pid_hash { | ||
78 | struct hlist_head *table; | ||
79 | unsigned long rehash_time; | ||
80 | |||
81 | unsigned int mask; | ||
82 | unsigned int shift; | ||
83 | |||
84 | unsigned int entries; | ||
85 | unsigned int max_shift; | ||
86 | |||
87 | u32 rnd; | ||
88 | }; | ||
89 | |||
90 | struct netlink_table { | ||
91 | struct nl_pid_hash hash; | ||
92 | struct hlist_head mc_list; | ||
93 | unsigned int nl_nonroot; | ||
94 | }; | ||
95 | |||
96 | static struct netlink_table *nl_table; | ||
97 | |||
98 | static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); | ||
99 | |||
100 | static int netlink_dump(struct sock *sk); | ||
101 | static void netlink_destroy_callback(struct netlink_callback *cb); | ||
102 | |||
103 | static DEFINE_RWLOCK(nl_table_lock); | ||
104 | static atomic_t nl_table_users = ATOMIC_INIT(0); | ||
105 | |||
106 | static struct notifier_block *netlink_chain; | ||
107 | |||
108 | static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid) | ||
109 | { | ||
110 | return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask]; | ||
111 | } | ||
112 | |||
113 | static void netlink_sock_destruct(struct sock *sk) | ||
114 | { | ||
115 | skb_queue_purge(&sk->sk_receive_queue); | ||
116 | |||
117 | if (!sock_flag(sk, SOCK_DEAD)) { | ||
118 | printk("Freeing alive netlink socket %p\n", sk); | ||
119 | return; | ||
120 | } | ||
121 | BUG_TRAP(!atomic_read(&sk->sk_rmem_alloc)); | ||
122 | BUG_TRAP(!atomic_read(&sk->sk_wmem_alloc)); | ||
123 | BUG_TRAP(!nlk_sk(sk)->cb); | ||
124 | } | ||
125 | |||
126 | /* This lock without WQ_FLAG_EXCLUSIVE is good on UP and it is _very_ bad on SMP. | ||
127 | * Look, when several writers sleep and reader wakes them up, all but one | ||
128 | * immediately hit write lock and grab all the cpus. Exclusive sleep solves | ||
129 | * this, _but_ remember, it adds useless work on UP machines. | ||
130 | */ | ||
131 | |||
132 | static void netlink_table_grab(void) | ||
133 | { | ||
134 | write_lock_bh(&nl_table_lock); | ||
135 | |||
136 | if (atomic_read(&nl_table_users)) { | ||
137 | DECLARE_WAITQUEUE(wait, current); | ||
138 | |||
139 | add_wait_queue_exclusive(&nl_table_wait, &wait); | ||
140 | for(;;) { | ||
141 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
142 | if (atomic_read(&nl_table_users) == 0) | ||
143 | break; | ||
144 | write_unlock_bh(&nl_table_lock); | ||
145 | schedule(); | ||
146 | write_lock_bh(&nl_table_lock); | ||
147 | } | ||
148 | |||
149 | __set_current_state(TASK_RUNNING); | ||
150 | remove_wait_queue(&nl_table_wait, &wait); | ||
151 | } | ||
152 | } | ||
153 | |||
154 | static __inline__ void netlink_table_ungrab(void) | ||
155 | { | ||
156 | write_unlock_bh(&nl_table_lock); | ||
157 | wake_up(&nl_table_wait); | ||
158 | } | ||
159 | |||
160 | static __inline__ void | ||
161 | netlink_lock_table(void) | ||
162 | { | ||
163 | /* read_lock() synchronizes us to netlink_table_grab */ | ||
164 | |||
165 | read_lock(&nl_table_lock); | ||
166 | atomic_inc(&nl_table_users); | ||
167 | read_unlock(&nl_table_lock); | ||
168 | } | ||
169 | |||
170 | static __inline__ void | ||
171 | netlink_unlock_table(void) | ||
172 | { | ||
173 | if (atomic_dec_and_test(&nl_table_users)) | ||
174 | wake_up(&nl_table_wait); | ||
175 | } | ||
176 | |||
177 | static __inline__ struct sock *netlink_lookup(int protocol, u32 pid) | ||
178 | { | ||
179 | struct nl_pid_hash *hash = &nl_table[protocol].hash; | ||
180 | struct hlist_head *head; | ||
181 | struct sock *sk; | ||
182 | struct hlist_node *node; | ||
183 | |||
184 | read_lock(&nl_table_lock); | ||
185 | head = nl_pid_hashfn(hash, pid); | ||
186 | sk_for_each(sk, node, head) { | ||
187 | if (nlk_sk(sk)->pid == pid) { | ||
188 | sock_hold(sk); | ||
189 | goto found; | ||
190 | } | ||
191 | } | ||
192 | sk = NULL; | ||
193 | found: | ||
194 | read_unlock(&nl_table_lock); | ||
195 | return sk; | ||
196 | } | ||
197 | |||
198 | static inline struct hlist_head *nl_pid_hash_alloc(size_t size) | ||
199 | { | ||
200 | if (size <= PAGE_SIZE) | ||
201 | return kmalloc(size, GFP_ATOMIC); | ||
202 | else | ||
203 | return (struct hlist_head *) | ||
204 | __get_free_pages(GFP_ATOMIC, get_order(size)); | ||
205 | } | ||
206 | |||
207 | static inline void nl_pid_hash_free(struct hlist_head *table, size_t size) | ||
208 | { | ||
209 | if (size <= PAGE_SIZE) | ||
210 | kfree(table); | ||
211 | else | ||
212 | free_pages((unsigned long)table, get_order(size)); | ||
213 | } | ||
214 | |||
215 | static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow) | ||
216 | { | ||
217 | unsigned int omask, mask, shift; | ||
218 | size_t osize, size; | ||
219 | struct hlist_head *otable, *table; | ||
220 | int i; | ||
221 | |||
222 | omask = mask = hash->mask; | ||
223 | osize = size = (mask + 1) * sizeof(*table); | ||
224 | shift = hash->shift; | ||
225 | |||
226 | if (grow) { | ||
227 | if (++shift > hash->max_shift) | ||
228 | return 0; | ||
229 | mask = mask * 2 + 1; | ||
230 | size *= 2; | ||
231 | } | ||
232 | |||
233 | table = nl_pid_hash_alloc(size); | ||
234 | if (!table) | ||
235 | return 0; | ||
236 | |||
237 | memset(table, 0, size); | ||
238 | otable = hash->table; | ||
239 | hash->table = table; | ||
240 | hash->mask = mask; | ||
241 | hash->shift = shift; | ||
242 | get_random_bytes(&hash->rnd, sizeof(hash->rnd)); | ||
243 | |||
244 | for (i = 0; i <= omask; i++) { | ||
245 | struct sock *sk; | ||
246 | struct hlist_node *node, *tmp; | ||
247 | |||
248 | sk_for_each_safe(sk, node, tmp, &otable[i]) | ||
249 | __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid)); | ||
250 | } | ||
251 | |||
252 | nl_pid_hash_free(otable, osize); | ||
253 | hash->rehash_time = jiffies + 10 * 60 * HZ; | ||
254 | return 1; | ||
255 | } | ||
256 | |||
257 | static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len) | ||
258 | { | ||
259 | int avg = hash->entries >> hash->shift; | ||
260 | |||
261 | if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1)) | ||
262 | return 1; | ||
263 | |||
264 | if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { | ||
265 | nl_pid_hash_rehash(hash, 0); | ||
266 | return 1; | ||
267 | } | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static struct proto_ops netlink_ops; | ||
273 | |||
274 | static int netlink_insert(struct sock *sk, u32 pid) | ||
275 | { | ||
276 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | ||
277 | struct hlist_head *head; | ||
278 | int err = -EADDRINUSE; | ||
279 | struct sock *osk; | ||
280 | struct hlist_node *node; | ||
281 | int len; | ||
282 | |||
283 | netlink_table_grab(); | ||
284 | head = nl_pid_hashfn(hash, pid); | ||
285 | len = 0; | ||
286 | sk_for_each(osk, node, head) { | ||
287 | if (nlk_sk(osk)->pid == pid) | ||
288 | break; | ||
289 | len++; | ||
290 | } | ||
291 | if (node) | ||
292 | goto err; | ||
293 | |||
294 | err = -EBUSY; | ||
295 | if (nlk_sk(sk)->pid) | ||
296 | goto err; | ||
297 | |||
298 | err = -ENOMEM; | ||
299 | if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) | ||
300 | goto err; | ||
301 | |||
302 | if (len && nl_pid_hash_dilute(hash, len)) | ||
303 | head = nl_pid_hashfn(hash, pid); | ||
304 | hash->entries++; | ||
305 | nlk_sk(sk)->pid = pid; | ||
306 | sk_add_node(sk, head); | ||
307 | err = 0; | ||
308 | |||
309 | err: | ||
310 | netlink_table_ungrab(); | ||
311 | return err; | ||
312 | } | ||
313 | |||
314 | static void netlink_remove(struct sock *sk) | ||
315 | { | ||
316 | netlink_table_grab(); | ||
317 | nl_table[sk->sk_protocol].hash.entries--; | ||
318 | sk_del_node_init(sk); | ||
319 | if (nlk_sk(sk)->groups) | ||
320 | __sk_del_bind_node(sk); | ||
321 | netlink_table_ungrab(); | ||
322 | } | ||
323 | |||
324 | static struct proto netlink_proto = { | ||
325 | .name = "NETLINK", | ||
326 | .owner = THIS_MODULE, | ||
327 | .obj_size = sizeof(struct netlink_sock), | ||
328 | }; | ||
329 | |||
330 | static int netlink_create(struct socket *sock, int protocol) | ||
331 | { | ||
332 | struct sock *sk; | ||
333 | struct netlink_sock *nlk; | ||
334 | |||
335 | sock->state = SS_UNCONNECTED; | ||
336 | |||
337 | if (sock->type != SOCK_RAW && sock->type != SOCK_DGRAM) | ||
338 | return -ESOCKTNOSUPPORT; | ||
339 | |||
340 | if (protocol<0 || protocol >= MAX_LINKS) | ||
341 | return -EPROTONOSUPPORT; | ||
342 | |||
343 | sock->ops = &netlink_ops; | ||
344 | |||
345 | sk = sk_alloc(PF_NETLINK, GFP_KERNEL, &netlink_proto, 1); | ||
346 | if (!sk) | ||
347 | return -ENOMEM; | ||
348 | |||
349 | sock_init_data(sock, sk); | ||
350 | |||
351 | nlk = nlk_sk(sk); | ||
352 | |||
353 | spin_lock_init(&nlk->cb_lock); | ||
354 | init_waitqueue_head(&nlk->wait); | ||
355 | sk->sk_destruct = netlink_sock_destruct; | ||
356 | |||
357 | sk->sk_protocol = protocol; | ||
358 | return 0; | ||
359 | } | ||
360 | |||
361 | static int netlink_release(struct socket *sock) | ||
362 | { | ||
363 | struct sock *sk = sock->sk; | ||
364 | struct netlink_sock *nlk; | ||
365 | |||
366 | if (!sk) | ||
367 | return 0; | ||
368 | |||
369 | netlink_remove(sk); | ||
370 | nlk = nlk_sk(sk); | ||
371 | |||
372 | spin_lock(&nlk->cb_lock); | ||
373 | if (nlk->cb) { | ||
374 | nlk->cb->done(nlk->cb); | ||
375 | netlink_destroy_callback(nlk->cb); | ||
376 | nlk->cb = NULL; | ||
377 | __sock_put(sk); | ||
378 | } | ||
379 | spin_unlock(&nlk->cb_lock); | ||
380 | |||
381 | /* OK. Socket is unlinked, and, therefore, | ||
382 | no new packets will arrive */ | ||
383 | |||
384 | sock_orphan(sk); | ||
385 | sock->sk = NULL; | ||
386 | wake_up_interruptible_all(&nlk->wait); | ||
387 | |||
388 | skb_queue_purge(&sk->sk_write_queue); | ||
389 | |||
390 | if (nlk->pid && !nlk->groups) { | ||
391 | struct netlink_notify n = { | ||
392 | .protocol = sk->sk_protocol, | ||
393 | .pid = nlk->pid, | ||
394 | }; | ||
395 | notifier_call_chain(&netlink_chain, NETLINK_URELEASE, &n); | ||
396 | } | ||
397 | |||
398 | sock_put(sk); | ||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | static int netlink_autobind(struct socket *sock) | ||
403 | { | ||
404 | struct sock *sk = sock->sk; | ||
405 | struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; | ||
406 | struct hlist_head *head; | ||
407 | struct sock *osk; | ||
408 | struct hlist_node *node; | ||
409 | s32 pid = current->pid; | ||
410 | int err; | ||
411 | static s32 rover = -4097; | ||
412 | |||
413 | retry: | ||
414 | cond_resched(); | ||
415 | netlink_table_grab(); | ||
416 | head = nl_pid_hashfn(hash, pid); | ||
417 | sk_for_each(osk, node, head) { | ||
418 | if (nlk_sk(osk)->pid == pid) { | ||
419 | /* Bind collision, search negative pid values. */ | ||
420 | pid = rover--; | ||
421 | if (rover > -4097) | ||
422 | rover = -4097; | ||
423 | netlink_table_ungrab(); | ||
424 | goto retry; | ||
425 | } | ||
426 | } | ||
427 | netlink_table_ungrab(); | ||
428 | |||
429 | err = netlink_insert(sk, pid); | ||
430 | if (err == -EADDRINUSE) | ||
431 | goto retry; | ||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static inline int netlink_capable(struct socket *sock, unsigned int flag) | ||
436 | { | ||
437 | return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) || | ||
438 | capable(CAP_NET_ADMIN); | ||
439 | } | ||
440 | |||
441 | static int netlink_bind(struct socket *sock, struct sockaddr *addr, int addr_len) | ||
442 | { | ||
443 | struct sock *sk = sock->sk; | ||
444 | struct netlink_sock *nlk = nlk_sk(sk); | ||
445 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | ||
446 | int err; | ||
447 | |||
448 | if (nladdr->nl_family != AF_NETLINK) | ||
449 | return -EINVAL; | ||
450 | |||
451 | /* Only superuser is allowed to listen multicasts */ | ||
452 | if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_RECV)) | ||
453 | return -EPERM; | ||
454 | |||
455 | if (nlk->pid) { | ||
456 | if (nladdr->nl_pid != nlk->pid) | ||
457 | return -EINVAL; | ||
458 | } else { | ||
459 | err = nladdr->nl_pid ? | ||
460 | netlink_insert(sk, nladdr->nl_pid) : | ||
461 | netlink_autobind(sock); | ||
462 | if (err) | ||
463 | return err; | ||
464 | } | ||
465 | |||
466 | if (!nladdr->nl_groups && !nlk->groups) | ||
467 | return 0; | ||
468 | |||
469 | netlink_table_grab(); | ||
470 | if (nlk->groups && !nladdr->nl_groups) | ||
471 | __sk_del_bind_node(sk); | ||
472 | else if (!nlk->groups && nladdr->nl_groups) | ||
473 | sk_add_bind_node(sk, &nl_table[sk->sk_protocol].mc_list); | ||
474 | nlk->groups = nladdr->nl_groups; | ||
475 | netlink_table_ungrab(); | ||
476 | |||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | static int netlink_connect(struct socket *sock, struct sockaddr *addr, | ||
481 | int alen, int flags) | ||
482 | { | ||
483 | int err = 0; | ||
484 | struct sock *sk = sock->sk; | ||
485 | struct netlink_sock *nlk = nlk_sk(sk); | ||
486 | struct sockaddr_nl *nladdr=(struct sockaddr_nl*)addr; | ||
487 | |||
488 | if (addr->sa_family == AF_UNSPEC) { | ||
489 | sk->sk_state = NETLINK_UNCONNECTED; | ||
490 | nlk->dst_pid = 0; | ||
491 | nlk->dst_groups = 0; | ||
492 | return 0; | ||
493 | } | ||
494 | if (addr->sa_family != AF_NETLINK) | ||
495 | return -EINVAL; | ||
496 | |||
497 | /* Only superuser is allowed to send multicasts */ | ||
498 | if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND)) | ||
499 | return -EPERM; | ||
500 | |||
501 | if (!nlk->pid) | ||
502 | err = netlink_autobind(sock); | ||
503 | |||
504 | if (err == 0) { | ||
505 | sk->sk_state = NETLINK_CONNECTED; | ||
506 | nlk->dst_pid = nladdr->nl_pid; | ||
507 | nlk->dst_groups = nladdr->nl_groups; | ||
508 | } | ||
509 | |||
510 | return err; | ||
511 | } | ||
512 | |||
513 | static int netlink_getname(struct socket *sock, struct sockaddr *addr, int *addr_len, int peer) | ||
514 | { | ||
515 | struct sock *sk = sock->sk; | ||
516 | struct netlink_sock *nlk = nlk_sk(sk); | ||
517 | struct sockaddr_nl *nladdr=(struct sockaddr_nl *)addr; | ||
518 | |||
519 | nladdr->nl_family = AF_NETLINK; | ||
520 | nladdr->nl_pad = 0; | ||
521 | *addr_len = sizeof(*nladdr); | ||
522 | |||
523 | if (peer) { | ||
524 | nladdr->nl_pid = nlk->dst_pid; | ||
525 | nladdr->nl_groups = nlk->dst_groups; | ||
526 | } else { | ||
527 | nladdr->nl_pid = nlk->pid; | ||
528 | nladdr->nl_groups = nlk->groups; | ||
529 | } | ||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static void netlink_overrun(struct sock *sk) | ||
534 | { | ||
535 | if (!test_and_set_bit(0, &nlk_sk(sk)->state)) { | ||
536 | sk->sk_err = ENOBUFS; | ||
537 | sk->sk_error_report(sk); | ||
538 | } | ||
539 | } | ||
540 | |||
541 | static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid) | ||
542 | { | ||
543 | int protocol = ssk->sk_protocol; | ||
544 | struct sock *sock; | ||
545 | struct netlink_sock *nlk; | ||
546 | |||
547 | sock = netlink_lookup(protocol, pid); | ||
548 | if (!sock) | ||
549 | return ERR_PTR(-ECONNREFUSED); | ||
550 | |||
551 | /* Don't bother queuing skb if kernel socket has no input function */ | ||
552 | nlk = nlk_sk(sock); | ||
553 | if ((nlk->pid == 0 && !nlk->data_ready) || | ||
554 | (sock->sk_state == NETLINK_CONNECTED && | ||
555 | nlk->dst_pid != nlk_sk(ssk)->pid)) { | ||
556 | sock_put(sock); | ||
557 | return ERR_PTR(-ECONNREFUSED); | ||
558 | } | ||
559 | return sock; | ||
560 | } | ||
561 | |||
562 | struct sock *netlink_getsockbyfilp(struct file *filp) | ||
563 | { | ||
564 | struct inode *inode = filp->f_dentry->d_inode; | ||
565 | struct sock *sock; | ||
566 | |||
567 | if (!S_ISSOCK(inode->i_mode)) | ||
568 | return ERR_PTR(-ENOTSOCK); | ||
569 | |||
570 | sock = SOCKET_I(inode)->sk; | ||
571 | if (sock->sk_family != AF_NETLINK) | ||
572 | return ERR_PTR(-EINVAL); | ||
573 | |||
574 | sock_hold(sock); | ||
575 | return sock; | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Attach a skb to a netlink socket. | ||
580 | * The caller must hold a reference to the destination socket. On error, the | ||
581 | * reference is dropped. The skb is not send to the destination, just all | ||
582 | * all error checks are performed and memory in the queue is reserved. | ||
583 | * Return values: | ||
584 | * < 0: error. skb freed, reference to sock dropped. | ||
585 | * 0: continue | ||
586 | * 1: repeat lookup - reference dropped while waiting for socket memory. | ||
587 | */ | ||
588 | int netlink_attachskb(struct sock *sk, struct sk_buff *skb, int nonblock, long timeo) | ||
589 | { | ||
590 | struct netlink_sock *nlk; | ||
591 | |||
592 | nlk = nlk_sk(sk); | ||
593 | |||
594 | if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | ||
595 | test_bit(0, &nlk->state)) { | ||
596 | DECLARE_WAITQUEUE(wait, current); | ||
597 | if (!timeo) { | ||
598 | if (!nlk->pid) | ||
599 | netlink_overrun(sk); | ||
600 | sock_put(sk); | ||
601 | kfree_skb(skb); | ||
602 | return -EAGAIN; | ||
603 | } | ||
604 | |||
605 | __set_current_state(TASK_INTERRUPTIBLE); | ||
606 | add_wait_queue(&nlk->wait, &wait); | ||
607 | |||
608 | if ((atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf || | ||
609 | test_bit(0, &nlk->state)) && | ||
610 | !sock_flag(sk, SOCK_DEAD)) | ||
611 | timeo = schedule_timeout(timeo); | ||
612 | |||
613 | __set_current_state(TASK_RUNNING); | ||
614 | remove_wait_queue(&nlk->wait, &wait); | ||
615 | sock_put(sk); | ||
616 | |||
617 | if (signal_pending(current)) { | ||
618 | kfree_skb(skb); | ||
619 | return sock_intr_errno(timeo); | ||
620 | } | ||
621 | return 1; | ||
622 | } | ||
623 | skb_set_owner_r(skb, sk); | ||
624 | return 0; | ||
625 | } | ||
626 | |||
627 | int netlink_sendskb(struct sock *sk, struct sk_buff *skb, int protocol) | ||
628 | { | ||
629 | struct netlink_sock *nlk; | ||
630 | int len = skb->len; | ||
631 | |||
632 | nlk = nlk_sk(sk); | ||
633 | |||
634 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
635 | sk->sk_data_ready(sk, len); | ||
636 | sock_put(sk); | ||
637 | return len; | ||
638 | } | ||
639 | |||
640 | void netlink_detachskb(struct sock *sk, struct sk_buff *skb) | ||
641 | { | ||
642 | kfree_skb(skb); | ||
643 | sock_put(sk); | ||
644 | } | ||
645 | |||
646 | static inline struct sk_buff *netlink_trim(struct sk_buff *skb, int allocation) | ||
647 | { | ||
648 | int delta; | ||
649 | |||
650 | skb_orphan(skb); | ||
651 | |||
652 | delta = skb->end - skb->tail; | ||
653 | if (delta * 2 < skb->truesize) | ||
654 | return skb; | ||
655 | |||
656 | if (skb_shared(skb)) { | ||
657 | struct sk_buff *nskb = skb_clone(skb, allocation); | ||
658 | if (!nskb) | ||
659 | return skb; | ||
660 | kfree_skb(skb); | ||
661 | skb = nskb; | ||
662 | } | ||
663 | |||
664 | if (!pskb_expand_head(skb, 0, -delta, allocation)) | ||
665 | skb->truesize -= delta; | ||
666 | |||
667 | return skb; | ||
668 | } | ||
669 | |||
670 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, u32 pid, int nonblock) | ||
671 | { | ||
672 | struct sock *sk; | ||
673 | int err; | ||
674 | long timeo; | ||
675 | |||
676 | skb = netlink_trim(skb, gfp_any()); | ||
677 | |||
678 | timeo = sock_sndtimeo(ssk, nonblock); | ||
679 | retry: | ||
680 | sk = netlink_getsockbypid(ssk, pid); | ||
681 | if (IS_ERR(sk)) { | ||
682 | kfree_skb(skb); | ||
683 | return PTR_ERR(sk); | ||
684 | } | ||
685 | err = netlink_attachskb(sk, skb, nonblock, timeo); | ||
686 | if (err == 1) | ||
687 | goto retry; | ||
688 | if (err) | ||
689 | return err; | ||
690 | |||
691 | return netlink_sendskb(sk, skb, ssk->sk_protocol); | ||
692 | } | ||
693 | |||
694 | static __inline__ int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) | ||
695 | { | ||
696 | struct netlink_sock *nlk = nlk_sk(sk); | ||
697 | |||
698 | if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && | ||
699 | !test_bit(0, &nlk->state)) { | ||
700 | skb_set_owner_r(skb, sk); | ||
701 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
702 | sk->sk_data_ready(sk, skb->len); | ||
703 | return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf; | ||
704 | } | ||
705 | return -1; | ||
706 | } | ||
707 | |||
708 | struct netlink_broadcast_data { | ||
709 | struct sock *exclude_sk; | ||
710 | u32 pid; | ||
711 | u32 group; | ||
712 | int failure; | ||
713 | int congested; | ||
714 | int delivered; | ||
715 | int allocation; | ||
716 | struct sk_buff *skb, *skb2; | ||
717 | }; | ||
718 | |||
719 | static inline int do_one_broadcast(struct sock *sk, | ||
720 | struct netlink_broadcast_data *p) | ||
721 | { | ||
722 | struct netlink_sock *nlk = nlk_sk(sk); | ||
723 | int val; | ||
724 | |||
725 | if (p->exclude_sk == sk) | ||
726 | goto out; | ||
727 | |||
728 | if (nlk->pid == p->pid || !(nlk->groups & p->group)) | ||
729 | goto out; | ||
730 | |||
731 | if (p->failure) { | ||
732 | netlink_overrun(sk); | ||
733 | goto out; | ||
734 | } | ||
735 | |||
736 | sock_hold(sk); | ||
737 | if (p->skb2 == NULL) { | ||
738 | if (atomic_read(&p->skb->users) != 1) { | ||
739 | p->skb2 = skb_clone(p->skb, p->allocation); | ||
740 | } else { | ||
741 | p->skb2 = p->skb; | ||
742 | atomic_inc(&p->skb->users); | ||
743 | } | ||
744 | } | ||
745 | if (p->skb2 == NULL) { | ||
746 | netlink_overrun(sk); | ||
747 | /* Clone failed. Notify ALL listeners. */ | ||
748 | p->failure = 1; | ||
749 | } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { | ||
750 | netlink_overrun(sk); | ||
751 | } else { | ||
752 | p->congested |= val; | ||
753 | p->delivered = 1; | ||
754 | p->skb2 = NULL; | ||
755 | } | ||
756 | sock_put(sk); | ||
757 | |||
758 | out: | ||
759 | return 0; | ||
760 | } | ||
761 | |||
762 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, | ||
763 | u32 group, int allocation) | ||
764 | { | ||
765 | struct netlink_broadcast_data info; | ||
766 | struct hlist_node *node; | ||
767 | struct sock *sk; | ||
768 | |||
769 | skb = netlink_trim(skb, allocation); | ||
770 | |||
771 | info.exclude_sk = ssk; | ||
772 | info.pid = pid; | ||
773 | info.group = group; | ||
774 | info.failure = 0; | ||
775 | info.congested = 0; | ||
776 | info.delivered = 0; | ||
777 | info.allocation = allocation; | ||
778 | info.skb = skb; | ||
779 | info.skb2 = NULL; | ||
780 | |||
781 | /* While we sleep in clone, do not allow to change socket list */ | ||
782 | |||
783 | netlink_lock_table(); | ||
784 | |||
785 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | ||
786 | do_one_broadcast(sk, &info); | ||
787 | |||
788 | netlink_unlock_table(); | ||
789 | |||
790 | if (info.skb2) | ||
791 | kfree_skb(info.skb2); | ||
792 | kfree_skb(skb); | ||
793 | |||
794 | if (info.delivered) { | ||
795 | if (info.congested && (allocation & __GFP_WAIT)) | ||
796 | yield(); | ||
797 | return 0; | ||
798 | } | ||
799 | if (info.failure) | ||
800 | return -ENOBUFS; | ||
801 | return -ESRCH; | ||
802 | } | ||
803 | |||
804 | struct netlink_set_err_data { | ||
805 | struct sock *exclude_sk; | ||
806 | u32 pid; | ||
807 | u32 group; | ||
808 | int code; | ||
809 | }; | ||
810 | |||
811 | static inline int do_one_set_err(struct sock *sk, | ||
812 | struct netlink_set_err_data *p) | ||
813 | { | ||
814 | struct netlink_sock *nlk = nlk_sk(sk); | ||
815 | |||
816 | if (sk == p->exclude_sk) | ||
817 | goto out; | ||
818 | |||
819 | if (nlk->pid == p->pid || !(nlk->groups & p->group)) | ||
820 | goto out; | ||
821 | |||
822 | sk->sk_err = p->code; | ||
823 | sk->sk_error_report(sk); | ||
824 | out: | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | void netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code) | ||
829 | { | ||
830 | struct netlink_set_err_data info; | ||
831 | struct hlist_node *node; | ||
832 | struct sock *sk; | ||
833 | |||
834 | info.exclude_sk = ssk; | ||
835 | info.pid = pid; | ||
836 | info.group = group; | ||
837 | info.code = code; | ||
838 | |||
839 | read_lock(&nl_table_lock); | ||
840 | |||
841 | sk_for_each_bound(sk, node, &nl_table[ssk->sk_protocol].mc_list) | ||
842 | do_one_set_err(sk, &info); | ||
843 | |||
844 | read_unlock(&nl_table_lock); | ||
845 | } | ||
846 | |||
847 | static inline void netlink_rcv_wake(struct sock *sk) | ||
848 | { | ||
849 | struct netlink_sock *nlk = nlk_sk(sk); | ||
850 | |||
851 | if (!skb_queue_len(&sk->sk_receive_queue)) | ||
852 | clear_bit(0, &nlk->state); | ||
853 | if (!test_bit(0, &nlk->state)) | ||
854 | wake_up_interruptible(&nlk->wait); | ||
855 | } | ||
856 | |||
857 | static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock, | ||
858 | struct msghdr *msg, size_t len) | ||
859 | { | ||
860 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | ||
861 | struct sock *sk = sock->sk; | ||
862 | struct netlink_sock *nlk = nlk_sk(sk); | ||
863 | struct sockaddr_nl *addr=msg->msg_name; | ||
864 | u32 dst_pid; | ||
865 | u32 dst_groups; | ||
866 | struct sk_buff *skb; | ||
867 | int err; | ||
868 | struct scm_cookie scm; | ||
869 | |||
870 | if (msg->msg_flags&MSG_OOB) | ||
871 | return -EOPNOTSUPP; | ||
872 | |||
873 | if (NULL == siocb->scm) | ||
874 | siocb->scm = &scm; | ||
875 | err = scm_send(sock, msg, siocb->scm); | ||
876 | if (err < 0) | ||
877 | return err; | ||
878 | |||
879 | if (msg->msg_namelen) { | ||
880 | if (addr->nl_family != AF_NETLINK) | ||
881 | return -EINVAL; | ||
882 | dst_pid = addr->nl_pid; | ||
883 | dst_groups = addr->nl_groups; | ||
884 | if (dst_groups && !netlink_capable(sock, NL_NONROOT_SEND)) | ||
885 | return -EPERM; | ||
886 | } else { | ||
887 | dst_pid = nlk->dst_pid; | ||
888 | dst_groups = nlk->dst_groups; | ||
889 | } | ||
890 | |||
891 | if (!nlk->pid) { | ||
892 | err = netlink_autobind(sock); | ||
893 | if (err) | ||
894 | goto out; | ||
895 | } | ||
896 | |||
897 | err = -EMSGSIZE; | ||
898 | if (len > sk->sk_sndbuf - 32) | ||
899 | goto out; | ||
900 | err = -ENOBUFS; | ||
901 | skb = alloc_skb(len, GFP_KERNEL); | ||
902 | if (skb==NULL) | ||
903 | goto out; | ||
904 | |||
905 | NETLINK_CB(skb).pid = nlk->pid; | ||
906 | NETLINK_CB(skb).groups = nlk->groups; | ||
907 | NETLINK_CB(skb).dst_pid = dst_pid; | ||
908 | NETLINK_CB(skb).dst_groups = dst_groups; | ||
909 | memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred)); | ||
910 | |||
911 | /* What can I do? Netlink is asynchronous, so that | ||
912 | we will have to save current capabilities to | ||
913 | check them, when this message will be delivered | ||
914 | to corresponding kernel module. --ANK (980802) | ||
915 | */ | ||
916 | |||
917 | err = -EFAULT; | ||
918 | if (memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len)) { | ||
919 | kfree_skb(skb); | ||
920 | goto out; | ||
921 | } | ||
922 | |||
923 | err = security_netlink_send(sk, skb); | ||
924 | if (err) { | ||
925 | kfree_skb(skb); | ||
926 | goto out; | ||
927 | } | ||
928 | |||
929 | if (dst_groups) { | ||
930 | atomic_inc(&skb->users); | ||
931 | netlink_broadcast(sk, skb, dst_pid, dst_groups, GFP_KERNEL); | ||
932 | } | ||
933 | err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT); | ||
934 | |||
935 | out: | ||
936 | return err; | ||
937 | } | ||
938 | |||
939 | static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | ||
940 | struct msghdr *msg, size_t len, | ||
941 | int flags) | ||
942 | { | ||
943 | struct sock_iocb *siocb = kiocb_to_siocb(kiocb); | ||
944 | struct scm_cookie scm; | ||
945 | struct sock *sk = sock->sk; | ||
946 | struct netlink_sock *nlk = nlk_sk(sk); | ||
947 | int noblock = flags&MSG_DONTWAIT; | ||
948 | size_t copied; | ||
949 | struct sk_buff *skb; | ||
950 | int err; | ||
951 | |||
952 | if (flags&MSG_OOB) | ||
953 | return -EOPNOTSUPP; | ||
954 | |||
955 | copied = 0; | ||
956 | |||
957 | skb = skb_recv_datagram(sk,flags,noblock,&err); | ||
958 | if (skb==NULL) | ||
959 | goto out; | ||
960 | |||
961 | msg->msg_namelen = 0; | ||
962 | |||
963 | copied = skb->len; | ||
964 | if (len < copied) { | ||
965 | msg->msg_flags |= MSG_TRUNC; | ||
966 | copied = len; | ||
967 | } | ||
968 | |||
969 | skb->h.raw = skb->data; | ||
970 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
971 | |||
972 | if (msg->msg_name) { | ||
973 | struct sockaddr_nl *addr = (struct sockaddr_nl*)msg->msg_name; | ||
974 | addr->nl_family = AF_NETLINK; | ||
975 | addr->nl_pad = 0; | ||
976 | addr->nl_pid = NETLINK_CB(skb).pid; | ||
977 | addr->nl_groups = NETLINK_CB(skb).dst_groups; | ||
978 | msg->msg_namelen = sizeof(*addr); | ||
979 | } | ||
980 | |||
981 | if (NULL == siocb->scm) { | ||
982 | memset(&scm, 0, sizeof(scm)); | ||
983 | siocb->scm = &scm; | ||
984 | } | ||
985 | siocb->scm->creds = *NETLINK_CREDS(skb); | ||
986 | skb_free_datagram(sk, skb); | ||
987 | |||
988 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | ||
989 | netlink_dump(sk); | ||
990 | |||
991 | scm_recv(sock, msg, siocb->scm, flags); | ||
992 | |||
993 | out: | ||
994 | netlink_rcv_wake(sk); | ||
995 | return err ? : copied; | ||
996 | } | ||
997 | |||
998 | static void netlink_data_ready(struct sock *sk, int len) | ||
999 | { | ||
1000 | struct netlink_sock *nlk = nlk_sk(sk); | ||
1001 | |||
1002 | if (nlk->data_ready) | ||
1003 | nlk->data_ready(sk, len); | ||
1004 | netlink_rcv_wake(sk); | ||
1005 | } | ||
1006 | |||
1007 | /* | ||
1008 | * We export these functions to other modules. They provide a | ||
1009 | * complete set of kernel non-blocking support for message | ||
1010 | * queueing. | ||
1011 | */ | ||
1012 | |||
1013 | struct sock * | ||
1014 | netlink_kernel_create(int unit, void (*input)(struct sock *sk, int len)) | ||
1015 | { | ||
1016 | struct socket *sock; | ||
1017 | struct sock *sk; | ||
1018 | |||
1019 | if (!nl_table) | ||
1020 | return NULL; | ||
1021 | |||
1022 | if (unit<0 || unit>=MAX_LINKS) | ||
1023 | return NULL; | ||
1024 | |||
1025 | if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) | ||
1026 | return NULL; | ||
1027 | |||
1028 | if (netlink_create(sock, unit) < 0) { | ||
1029 | sock_release(sock); | ||
1030 | return NULL; | ||
1031 | } | ||
1032 | sk = sock->sk; | ||
1033 | sk->sk_data_ready = netlink_data_ready; | ||
1034 | if (input) | ||
1035 | nlk_sk(sk)->data_ready = input; | ||
1036 | |||
1037 | if (netlink_insert(sk, 0)) { | ||
1038 | sock_release(sock); | ||
1039 | return NULL; | ||
1040 | } | ||
1041 | return sk; | ||
1042 | } | ||
1043 | |||
1044 | void netlink_set_nonroot(int protocol, unsigned int flags) | ||
1045 | { | ||
1046 | if ((unsigned int)protocol < MAX_LINKS) | ||
1047 | nl_table[protocol].nl_nonroot = flags; | ||
1048 | } | ||
1049 | |||
1050 | static void netlink_destroy_callback(struct netlink_callback *cb) | ||
1051 | { | ||
1052 | if (cb->skb) | ||
1053 | kfree_skb(cb->skb); | ||
1054 | kfree(cb); | ||
1055 | } | ||
1056 | |||
1057 | /* | ||
1058 | * It looks a bit ugly. | ||
1059 | * It would be better to create kernel thread. | ||
1060 | */ | ||
1061 | |||
1062 | static int netlink_dump(struct sock *sk) | ||
1063 | { | ||
1064 | struct netlink_sock *nlk = nlk_sk(sk); | ||
1065 | struct netlink_callback *cb; | ||
1066 | struct sk_buff *skb; | ||
1067 | struct nlmsghdr *nlh; | ||
1068 | int len; | ||
1069 | |||
1070 | skb = sock_rmalloc(sk, NLMSG_GOODSIZE, 0, GFP_KERNEL); | ||
1071 | if (!skb) | ||
1072 | return -ENOBUFS; | ||
1073 | |||
1074 | spin_lock(&nlk->cb_lock); | ||
1075 | |||
1076 | cb = nlk->cb; | ||
1077 | if (cb == NULL) { | ||
1078 | spin_unlock(&nlk->cb_lock); | ||
1079 | kfree_skb(skb); | ||
1080 | return -EINVAL; | ||
1081 | } | ||
1082 | |||
1083 | len = cb->dump(skb, cb); | ||
1084 | |||
1085 | if (len > 0) { | ||
1086 | spin_unlock(&nlk->cb_lock); | ||
1087 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1088 | sk->sk_data_ready(sk, len); | ||
1089 | return 0; | ||
1090 | } | ||
1091 | |||
1092 | nlh = __nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLMSG_DONE, sizeof(int)); | ||
1093 | nlh->nlmsg_flags |= NLM_F_MULTI; | ||
1094 | memcpy(NLMSG_DATA(nlh), &len, sizeof(len)); | ||
1095 | skb_queue_tail(&sk->sk_receive_queue, skb); | ||
1096 | sk->sk_data_ready(sk, skb->len); | ||
1097 | |||
1098 | cb->done(cb); | ||
1099 | nlk->cb = NULL; | ||
1100 | spin_unlock(&nlk->cb_lock); | ||
1101 | |||
1102 | netlink_destroy_callback(cb); | ||
1103 | __sock_put(sk); | ||
1104 | return 0; | ||
1105 | } | ||
1106 | |||
1107 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | ||
1108 | struct nlmsghdr *nlh, | ||
1109 | int (*dump)(struct sk_buff *skb, struct netlink_callback*), | ||
1110 | int (*done)(struct netlink_callback*)) | ||
1111 | { | ||
1112 | struct netlink_callback *cb; | ||
1113 | struct sock *sk; | ||
1114 | struct netlink_sock *nlk; | ||
1115 | |||
1116 | cb = kmalloc(sizeof(*cb), GFP_KERNEL); | ||
1117 | if (cb == NULL) | ||
1118 | return -ENOBUFS; | ||
1119 | |||
1120 | memset(cb, 0, sizeof(*cb)); | ||
1121 | cb->dump = dump; | ||
1122 | cb->done = done; | ||
1123 | cb->nlh = nlh; | ||
1124 | atomic_inc(&skb->users); | ||
1125 | cb->skb = skb; | ||
1126 | |||
1127 | sk = netlink_lookup(ssk->sk_protocol, NETLINK_CB(skb).pid); | ||
1128 | if (sk == NULL) { | ||
1129 | netlink_destroy_callback(cb); | ||
1130 | return -ECONNREFUSED; | ||
1131 | } | ||
1132 | nlk = nlk_sk(sk); | ||
1133 | /* A dump is in progress... */ | ||
1134 | spin_lock(&nlk->cb_lock); | ||
1135 | if (nlk->cb) { | ||
1136 | spin_unlock(&nlk->cb_lock); | ||
1137 | netlink_destroy_callback(cb); | ||
1138 | sock_put(sk); | ||
1139 | return -EBUSY; | ||
1140 | } | ||
1141 | nlk->cb = cb; | ||
1142 | sock_hold(sk); | ||
1143 | spin_unlock(&nlk->cb_lock); | ||
1144 | |||
1145 | netlink_dump(sk); | ||
1146 | sock_put(sk); | ||
1147 | return 0; | ||
1148 | } | ||
1149 | |||
1150 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | ||
1151 | { | ||
1152 | struct sk_buff *skb; | ||
1153 | struct nlmsghdr *rep; | ||
1154 | struct nlmsgerr *errmsg; | ||
1155 | int size; | ||
1156 | |||
1157 | if (err == 0) | ||
1158 | size = NLMSG_SPACE(sizeof(struct nlmsgerr)); | ||
1159 | else | ||
1160 | size = NLMSG_SPACE(4 + NLMSG_ALIGN(nlh->nlmsg_len)); | ||
1161 | |||
1162 | skb = alloc_skb(size, GFP_KERNEL); | ||
1163 | if (!skb) { | ||
1164 | struct sock *sk; | ||
1165 | |||
1166 | sk = netlink_lookup(in_skb->sk->sk_protocol, | ||
1167 | NETLINK_CB(in_skb).pid); | ||
1168 | if (sk) { | ||
1169 | sk->sk_err = ENOBUFS; | ||
1170 | sk->sk_error_report(sk); | ||
1171 | sock_put(sk); | ||
1172 | } | ||
1173 | return; | ||
1174 | } | ||
1175 | |||
1176 | rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | ||
1177 | NLMSG_ERROR, sizeof(struct nlmsgerr)); | ||
1178 | errmsg = NLMSG_DATA(rep); | ||
1179 | errmsg->error = err; | ||
1180 | memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(struct nlmsghdr)); | ||
1181 | netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT); | ||
1182 | } | ||
1183 | |||
1184 | |||
1185 | #ifdef CONFIG_PROC_FS | ||
1186 | struct nl_seq_iter { | ||
1187 | int link; | ||
1188 | int hash_idx; | ||
1189 | }; | ||
1190 | |||
1191 | static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos) | ||
1192 | { | ||
1193 | struct nl_seq_iter *iter = seq->private; | ||
1194 | int i, j; | ||
1195 | struct sock *s; | ||
1196 | struct hlist_node *node; | ||
1197 | loff_t off = 0; | ||
1198 | |||
1199 | for (i=0; i<MAX_LINKS; i++) { | ||
1200 | struct nl_pid_hash *hash = &nl_table[i].hash; | ||
1201 | |||
1202 | for (j = 0; j <= hash->mask; j++) { | ||
1203 | sk_for_each(s, node, &hash->table[j]) { | ||
1204 | if (off == pos) { | ||
1205 | iter->link = i; | ||
1206 | iter->hash_idx = j; | ||
1207 | return s; | ||
1208 | } | ||
1209 | ++off; | ||
1210 | } | ||
1211 | } | ||
1212 | } | ||
1213 | return NULL; | ||
1214 | } | ||
1215 | |||
1216 | static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) | ||
1217 | { | ||
1218 | read_lock(&nl_table_lock); | ||
1219 | return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; | ||
1220 | } | ||
1221 | |||
1222 | static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) | ||
1223 | { | ||
1224 | struct sock *s; | ||
1225 | struct nl_seq_iter *iter; | ||
1226 | int i, j; | ||
1227 | |||
1228 | ++*pos; | ||
1229 | |||
1230 | if (v == SEQ_START_TOKEN) | ||
1231 | return netlink_seq_socket_idx(seq, 0); | ||
1232 | |||
1233 | s = sk_next(v); | ||
1234 | if (s) | ||
1235 | return s; | ||
1236 | |||
1237 | iter = seq->private; | ||
1238 | i = iter->link; | ||
1239 | j = iter->hash_idx + 1; | ||
1240 | |||
1241 | do { | ||
1242 | struct nl_pid_hash *hash = &nl_table[i].hash; | ||
1243 | |||
1244 | for (; j <= hash->mask; j++) { | ||
1245 | s = sk_head(&hash->table[j]); | ||
1246 | if (s) { | ||
1247 | iter->link = i; | ||
1248 | iter->hash_idx = j; | ||
1249 | return s; | ||
1250 | } | ||
1251 | } | ||
1252 | |||
1253 | j = 0; | ||
1254 | } while (++i < MAX_LINKS); | ||
1255 | |||
1256 | return NULL; | ||
1257 | } | ||
1258 | |||
1259 | static void netlink_seq_stop(struct seq_file *seq, void *v) | ||
1260 | { | ||
1261 | read_unlock(&nl_table_lock); | ||
1262 | } | ||
1263 | |||
1264 | |||
1265 | static int netlink_seq_show(struct seq_file *seq, void *v) | ||
1266 | { | ||
1267 | if (v == SEQ_START_TOKEN) | ||
1268 | seq_puts(seq, | ||
1269 | "sk Eth Pid Groups " | ||
1270 | "Rmem Wmem Dump Locks\n"); | ||
1271 | else { | ||
1272 | struct sock *s = v; | ||
1273 | struct netlink_sock *nlk = nlk_sk(s); | ||
1274 | |||
1275 | seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %d\n", | ||
1276 | s, | ||
1277 | s->sk_protocol, | ||
1278 | nlk->pid, | ||
1279 | nlk->groups, | ||
1280 | atomic_read(&s->sk_rmem_alloc), | ||
1281 | atomic_read(&s->sk_wmem_alloc), | ||
1282 | nlk->cb, | ||
1283 | atomic_read(&s->sk_refcnt) | ||
1284 | ); | ||
1285 | |||
1286 | } | ||
1287 | return 0; | ||
1288 | } | ||
1289 | |||
1290 | static struct seq_operations netlink_seq_ops = { | ||
1291 | .start = netlink_seq_start, | ||
1292 | .next = netlink_seq_next, | ||
1293 | .stop = netlink_seq_stop, | ||
1294 | .show = netlink_seq_show, | ||
1295 | }; | ||
1296 | |||
1297 | |||
1298 | static int netlink_seq_open(struct inode *inode, struct file *file) | ||
1299 | { | ||
1300 | struct seq_file *seq; | ||
1301 | struct nl_seq_iter *iter; | ||
1302 | int err; | ||
1303 | |||
1304 | iter = kmalloc(sizeof(*iter), GFP_KERNEL); | ||
1305 | if (!iter) | ||
1306 | return -ENOMEM; | ||
1307 | |||
1308 | err = seq_open(file, &netlink_seq_ops); | ||
1309 | if (err) { | ||
1310 | kfree(iter); | ||
1311 | return err; | ||
1312 | } | ||
1313 | |||
1314 | memset(iter, 0, sizeof(*iter)); | ||
1315 | seq = file->private_data; | ||
1316 | seq->private = iter; | ||
1317 | return 0; | ||
1318 | } | ||
1319 | |||
1320 | static struct file_operations netlink_seq_fops = { | ||
1321 | .owner = THIS_MODULE, | ||
1322 | .open = netlink_seq_open, | ||
1323 | .read = seq_read, | ||
1324 | .llseek = seq_lseek, | ||
1325 | .release = seq_release_private, | ||
1326 | }; | ||
1327 | |||
1328 | #endif | ||
1329 | |||
1330 | int netlink_register_notifier(struct notifier_block *nb) | ||
1331 | { | ||
1332 | return notifier_chain_register(&netlink_chain, nb); | ||
1333 | } | ||
1334 | |||
1335 | int netlink_unregister_notifier(struct notifier_block *nb) | ||
1336 | { | ||
1337 | return notifier_chain_unregister(&netlink_chain, nb); | ||
1338 | } | ||
1339 | |||
1340 | static struct proto_ops netlink_ops = { | ||
1341 | .family = PF_NETLINK, | ||
1342 | .owner = THIS_MODULE, | ||
1343 | .release = netlink_release, | ||
1344 | .bind = netlink_bind, | ||
1345 | .connect = netlink_connect, | ||
1346 | .socketpair = sock_no_socketpair, | ||
1347 | .accept = sock_no_accept, | ||
1348 | .getname = netlink_getname, | ||
1349 | .poll = datagram_poll, | ||
1350 | .ioctl = sock_no_ioctl, | ||
1351 | .listen = sock_no_listen, | ||
1352 | .shutdown = sock_no_shutdown, | ||
1353 | .setsockopt = sock_no_setsockopt, | ||
1354 | .getsockopt = sock_no_getsockopt, | ||
1355 | .sendmsg = netlink_sendmsg, | ||
1356 | .recvmsg = netlink_recvmsg, | ||
1357 | .mmap = sock_no_mmap, | ||
1358 | .sendpage = sock_no_sendpage, | ||
1359 | }; | ||
1360 | |||
1361 | static struct net_proto_family netlink_family_ops = { | ||
1362 | .family = PF_NETLINK, | ||
1363 | .create = netlink_create, | ||
1364 | .owner = THIS_MODULE, /* for consistency 8) */ | ||
1365 | }; | ||
1366 | |||
1367 | extern void netlink_skb_parms_too_large(void); | ||
1368 | |||
1369 | static int __init netlink_proto_init(void) | ||
1370 | { | ||
1371 | struct sk_buff *dummy_skb; | ||
1372 | int i; | ||
1373 | unsigned long max; | ||
1374 | unsigned int order; | ||
1375 | int err = proto_register(&netlink_proto, 0); | ||
1376 | |||
1377 | if (err != 0) | ||
1378 | goto out; | ||
1379 | |||
1380 | if (sizeof(struct netlink_skb_parms) > sizeof(dummy_skb->cb)) | ||
1381 | netlink_skb_parms_too_large(); | ||
1382 | |||
1383 | nl_table = kmalloc(sizeof(*nl_table) * MAX_LINKS, GFP_KERNEL); | ||
1384 | if (!nl_table) { | ||
1385 | enomem: | ||
1386 | printk(KERN_CRIT "netlink_init: Cannot allocate nl_table\n"); | ||
1387 | return -ENOMEM; | ||
1388 | } | ||
1389 | |||
1390 | memset(nl_table, 0, sizeof(*nl_table) * MAX_LINKS); | ||
1391 | |||
1392 | if (num_physpages >= (128 * 1024)) | ||
1393 | max = num_physpages >> (21 - PAGE_SHIFT); | ||
1394 | else | ||
1395 | max = num_physpages >> (23 - PAGE_SHIFT); | ||
1396 | |||
1397 | order = get_bitmask_order(max) - 1 + PAGE_SHIFT; | ||
1398 | max = (1UL << order) / sizeof(struct hlist_head); | ||
1399 | order = get_bitmask_order(max > UINT_MAX ? UINT_MAX : max) - 1; | ||
1400 | |||
1401 | for (i = 0; i < MAX_LINKS; i++) { | ||
1402 | struct nl_pid_hash *hash = &nl_table[i].hash; | ||
1403 | |||
1404 | hash->table = nl_pid_hash_alloc(1 * sizeof(*hash->table)); | ||
1405 | if (!hash->table) { | ||
1406 | while (i-- > 0) | ||
1407 | nl_pid_hash_free(nl_table[i].hash.table, | ||
1408 | 1 * sizeof(*hash->table)); | ||
1409 | kfree(nl_table); | ||
1410 | goto enomem; | ||
1411 | } | ||
1412 | memset(hash->table, 0, 1 * sizeof(*hash->table)); | ||
1413 | hash->max_shift = order; | ||
1414 | hash->shift = 0; | ||
1415 | hash->mask = 0; | ||
1416 | hash->rehash_time = jiffies; | ||
1417 | } | ||
1418 | |||
1419 | sock_register(&netlink_family_ops); | ||
1420 | #ifdef CONFIG_PROC_FS | ||
1421 | proc_net_fops_create("netlink", 0, &netlink_seq_fops); | ||
1422 | #endif | ||
1423 | /* The netlink device handler may be needed early. */ | ||
1424 | rtnetlink_init(); | ||
1425 | out: | ||
1426 | return err; | ||
1427 | } | ||
1428 | |||
1429 | static void __exit netlink_proto_exit(void) | ||
1430 | { | ||
1431 | sock_unregister(PF_NETLINK); | ||
1432 | proc_net_remove("netlink"); | ||
1433 | kfree(nl_table); | ||
1434 | nl_table = NULL; | ||
1435 | proto_unregister(&netlink_proto); | ||
1436 | } | ||
1437 | |||
1438 | core_initcall(netlink_proto_init); | ||
1439 | module_exit(netlink_proto_exit); | ||
1440 | |||
1441 | MODULE_LICENSE("GPL"); | ||
1442 | |||
1443 | MODULE_ALIAS_NETPROTO(PF_NETLINK); | ||
1444 | |||
1445 | EXPORT_SYMBOL(netlink_ack); | ||
1446 | EXPORT_SYMBOL(netlink_broadcast); | ||
1447 | EXPORT_SYMBOL(netlink_dump_start); | ||
1448 | EXPORT_SYMBOL(netlink_kernel_create); | ||
1449 | EXPORT_SYMBOL(netlink_register_notifier); | ||
1450 | EXPORT_SYMBOL(netlink_set_err); | ||
1451 | EXPORT_SYMBOL(netlink_set_nonroot); | ||
1452 | EXPORT_SYMBOL(netlink_unicast); | ||
1453 | EXPORT_SYMBOL(netlink_unregister_notifier); | ||
1454 | |||