aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-17 16:15:55 -0500
commit8dea78da5cee153b8af9c07a2745f6c55057fe12 (patch)
treea8f4d49d63b1ecc92f2fddceba0655b2472c5bd9 /net/netlink
parent406089d01562f1e2bf9f089fd7637009ebaad589 (diff)
Patched in Tegra support.
Diffstat (limited to 'net/netlink')
-rw-r--r--net/netlink/af_netlink.c434
-rw-r--r--net/netlink/genetlink.c153
2 files changed, 221 insertions, 366 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index c0353d55d56..0a4db0211da 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -67,8 +67,8 @@
67struct netlink_sock { 67struct netlink_sock {
68 /* struct sock has to be the first member of netlink_sock */ 68 /* struct sock has to be the first member of netlink_sock */
69 struct sock sk; 69 struct sock sk;
70 u32 portid; 70 u32 pid;
71 u32 dst_portid; 71 u32 dst_pid;
72 u32 dst_group; 72 u32 dst_group;
73 u32 flags; 73 u32 flags;
74 u32 subscriptions; 74 u32 subscriptions;
@@ -80,7 +80,6 @@ struct netlink_sock {
80 struct mutex *cb_mutex; 80 struct mutex *cb_mutex;
81 struct mutex cb_def_mutex; 81 struct mutex cb_def_mutex;
82 void (*netlink_rcv)(struct sk_buff *skb); 82 void (*netlink_rcv)(struct sk_buff *skb);
83 void (*netlink_bind)(int group);
84 struct module *module; 83 struct module *module;
85}; 84};
86 85
@@ -104,29 +103,28 @@ static inline int netlink_is_kernel(struct sock *sk)
104 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET; 103 return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
105} 104}
106 105
107struct nl_portid_hash { 106struct nl_pid_hash {
108 struct hlist_head *table; 107 struct hlist_head *table;
109 unsigned long rehash_time; 108 unsigned long rehash_time;
110 109
111 unsigned int mask; 110 unsigned int mask;
112 unsigned int shift; 111 unsigned int shift;
113 112
114 unsigned int entries; 113 unsigned int entries;
115 unsigned int max_shift; 114 unsigned int max_shift;
116 115
117 u32 rnd; 116 u32 rnd;
118}; 117};
119 118
120struct netlink_table { 119struct netlink_table {
121 struct nl_portid_hash hash; 120 struct nl_pid_hash hash;
122 struct hlist_head mc_list; 121 struct hlist_head mc_list;
123 struct listeners __rcu *listeners; 122 struct listeners __rcu *listeners;
124 unsigned int flags; 123 unsigned int nl_nonroot;
125 unsigned int groups; 124 unsigned int groups;
126 struct mutex *cb_mutex; 125 struct mutex *cb_mutex;
127 struct module *module; 126 struct module *module;
128 void (*bind)(int group); 127 int registered;
129 int registered;
130}; 128};
131 129
132static struct netlink_table *nl_table; 130static struct netlink_table *nl_table;
@@ -134,34 +132,21 @@ static struct netlink_table *nl_table;
134static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait); 132static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
135 133
136static int netlink_dump(struct sock *sk); 134static int netlink_dump(struct sock *sk);
135static void netlink_destroy_callback(struct netlink_callback *cb);
137 136
138static DEFINE_RWLOCK(nl_table_lock); 137static DEFINE_RWLOCK(nl_table_lock);
139static atomic_t nl_table_users = ATOMIC_INIT(0); 138static atomic_t nl_table_users = ATOMIC_INIT(0);
140 139
141#define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
142
143static ATOMIC_NOTIFIER_HEAD(netlink_chain); 140static ATOMIC_NOTIFIER_HEAD(netlink_chain);
144 141
145static inline u32 netlink_group_mask(u32 group) 142static u32 netlink_group_mask(u32 group)
146{ 143{
147 return group ? 1 << (group - 1) : 0; 144 return group ? 1 << (group - 1) : 0;
148} 145}
149 146
150static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid) 147static struct hlist_head *nl_pid_hashfn(struct nl_pid_hash *hash, u32 pid)
151{
152 return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
153}
154
155static void netlink_destroy_callback(struct netlink_callback *cb)
156{
157 kfree_skb(cb->skb);
158 kfree(cb);
159}
160
161static void netlink_consume_callback(struct netlink_callback *cb)
162{ 148{
163 consume_skb(cb->skb); 149 return &hash->table[jhash_1word(pid, hash->rnd) & hash->mask];
164 kfree(cb);
165} 150}
166 151
167static void netlink_sock_destruct(struct sock *sk) 152static void netlink_sock_destruct(struct sock *sk)
@@ -171,8 +156,6 @@ static void netlink_sock_destruct(struct sock *sk)
171 if (nlk->cb) { 156 if (nlk->cb) {
172 if (nlk->cb->done) 157 if (nlk->cb->done)
173 nlk->cb->done(nlk->cb); 158 nlk->cb->done(nlk->cb);
174
175 module_put(nlk->cb->module);
176 netlink_destroy_callback(nlk->cb); 159 netlink_destroy_callback(nlk->cb);
177 } 160 }
178 161
@@ -243,17 +226,18 @@ netlink_unlock_table(void)
243 wake_up(&nl_table_wait); 226 wake_up(&nl_table_wait);
244} 227}
245 228
246static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid) 229static inline struct sock *netlink_lookup(struct net *net, int protocol,
230 u32 pid)
247{ 231{
248 struct nl_portid_hash *hash = &nl_table[protocol].hash; 232 struct nl_pid_hash *hash = &nl_table[protocol].hash;
249 struct hlist_head *head; 233 struct hlist_head *head;
250 struct sock *sk; 234 struct sock *sk;
251 struct hlist_node *node; 235 struct hlist_node *node;
252 236
253 read_lock(&nl_table_lock); 237 read_lock(&nl_table_lock);
254 head = nl_portid_hashfn(hash, portid); 238 head = nl_pid_hashfn(hash, pid);
255 sk_for_each(sk, node, head) { 239 sk_for_each(sk, node, head) {
256 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->portid == portid)) { 240 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
257 sock_hold(sk); 241 sock_hold(sk);
258 goto found; 242 goto found;
259 } 243 }
@@ -264,7 +248,7 @@ found:
264 return sk; 248 return sk;
265} 249}
266 250
267static struct hlist_head *nl_portid_hash_zalloc(size_t size) 251static inline struct hlist_head *nl_pid_hash_zalloc(size_t size)
268{ 252{
269 if (size <= PAGE_SIZE) 253 if (size <= PAGE_SIZE)
270 return kzalloc(size, GFP_ATOMIC); 254 return kzalloc(size, GFP_ATOMIC);
@@ -274,7 +258,7 @@ static struct hlist_head *nl_portid_hash_zalloc(size_t size)
274 get_order(size)); 258 get_order(size));
275} 259}
276 260
277static void nl_portid_hash_free(struct hlist_head *table, size_t size) 261static inline void nl_pid_hash_free(struct hlist_head *table, size_t size)
278{ 262{
279 if (size <= PAGE_SIZE) 263 if (size <= PAGE_SIZE)
280 kfree(table); 264 kfree(table);
@@ -282,7 +266,7 @@ static void nl_portid_hash_free(struct hlist_head *table, size_t size)
282 free_pages((unsigned long)table, get_order(size)); 266 free_pages((unsigned long)table, get_order(size));
283} 267}
284 268
285static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow) 269static int nl_pid_hash_rehash(struct nl_pid_hash *hash, int grow)
286{ 270{
287 unsigned int omask, mask, shift; 271 unsigned int omask, mask, shift;
288 size_t osize, size; 272 size_t osize, size;
@@ -300,7 +284,7 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
300 size *= 2; 284 size *= 2;
301 } 285 }
302 286
303 table = nl_portid_hash_zalloc(size); 287 table = nl_pid_hash_zalloc(size);
304 if (!table) 288 if (!table)
305 return 0; 289 return 0;
306 290
@@ -315,23 +299,23 @@ static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
315 struct hlist_node *node, *tmp; 299 struct hlist_node *node, *tmp;
316 300
317 sk_for_each_safe(sk, node, tmp, &otable[i]) 301 sk_for_each_safe(sk, node, tmp, &otable[i])
318 __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid)); 302 __sk_add_node(sk, nl_pid_hashfn(hash, nlk_sk(sk)->pid));
319 } 303 }
320 304
321 nl_portid_hash_free(otable, osize); 305 nl_pid_hash_free(otable, osize);
322 hash->rehash_time = jiffies + 10 * 60 * HZ; 306 hash->rehash_time = jiffies + 10 * 60 * HZ;
323 return 1; 307 return 1;
324} 308}
325 309
326static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len) 310static inline int nl_pid_hash_dilute(struct nl_pid_hash *hash, int len)
327{ 311{
328 int avg = hash->entries >> hash->shift; 312 int avg = hash->entries >> hash->shift;
329 313
330 if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1)) 314 if (unlikely(avg > 1) && nl_pid_hash_rehash(hash, 1))
331 return 1; 315 return 1;
332 316
333 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) { 317 if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
334 nl_portid_hash_rehash(hash, 0); 318 nl_pid_hash_rehash(hash, 0);
335 return 1; 319 return 1;
336 } 320 }
337 321
@@ -347,11 +331,6 @@ netlink_update_listeners(struct sock *sk)
347 struct hlist_node *node; 331 struct hlist_node *node;
348 unsigned long mask; 332 unsigned long mask;
349 unsigned int i; 333 unsigned int i;
350 struct listeners *listeners;
351
352 listeners = nl_deref_protected(tbl->listeners);
353 if (!listeners)
354 return;
355 334
356 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) { 335 for (i = 0; i < NLGRPLONGS(tbl->groups); i++) {
357 mask = 0; 336 mask = 0;
@@ -359,15 +338,15 @@ netlink_update_listeners(struct sock *sk)
359 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups)) 338 if (i < NLGRPLONGS(nlk_sk(sk)->ngroups))
360 mask |= nlk_sk(sk)->groups[i]; 339 mask |= nlk_sk(sk)->groups[i];
361 } 340 }
362 listeners->masks[i] = mask; 341 tbl->listeners->masks[i] = mask;
363 } 342 }
364 /* this function is only called with the netlink table "grabbed", which 343 /* this function is only called with the netlink table "grabbed", which
365 * makes sure updates are visible before bind or setsockopt return. */ 344 * makes sure updates are visible before bind or setsockopt return. */
366} 345}
367 346
368static int netlink_insert(struct sock *sk, struct net *net, u32 portid) 347static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
369{ 348{
370 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; 349 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
371 struct hlist_head *head; 350 struct hlist_head *head;
372 int err = -EADDRINUSE; 351 int err = -EADDRINUSE;
373 struct sock *osk; 352 struct sock *osk;
@@ -375,10 +354,10 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
375 int len; 354 int len;
376 355
377 netlink_table_grab(); 356 netlink_table_grab();
378 head = nl_portid_hashfn(hash, portid); 357 head = nl_pid_hashfn(hash, pid);
379 len = 0; 358 len = 0;
380 sk_for_each(osk, node, head) { 359 sk_for_each(osk, node, head) {
381 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->portid == portid)) 360 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
382 break; 361 break;
383 len++; 362 len++;
384 } 363 }
@@ -386,17 +365,17 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
386 goto err; 365 goto err;
387 366
388 err = -EBUSY; 367 err = -EBUSY;
389 if (nlk_sk(sk)->portid) 368 if (nlk_sk(sk)->pid)
390 goto err; 369 goto err;
391 370
392 err = -ENOMEM; 371 err = -ENOMEM;
393 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX)) 372 if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
394 goto err; 373 goto err;
395 374
396 if (len && nl_portid_hash_dilute(hash, len)) 375 if (len && nl_pid_hash_dilute(hash, len))
397 head = nl_portid_hashfn(hash, portid); 376 head = nl_pid_hashfn(hash, pid);
398 hash->entries++; 377 hash->entries++;
399 nlk_sk(sk)->portid = portid; 378 nlk_sk(sk)->pid = pid;
400 sk_add_node(sk, head); 379 sk_add_node(sk, head);
401 err = 0; 380 err = 0;
402 381
@@ -436,9 +415,9 @@ static int __netlink_create(struct net *net, struct socket *sock,
436 sock_init_data(sock, sk); 415 sock_init_data(sock, sk);
437 416
438 nlk = nlk_sk(sk); 417 nlk = nlk_sk(sk);
439 if (cb_mutex) { 418 if (cb_mutex)
440 nlk->cb_mutex = cb_mutex; 419 nlk->cb_mutex = cb_mutex;
441 } else { 420 else {
442 nlk->cb_mutex = &nlk->cb_def_mutex; 421 nlk->cb_mutex = &nlk->cb_def_mutex;
443 mutex_init(nlk->cb_mutex); 422 mutex_init(nlk->cb_mutex);
444 } 423 }
@@ -455,7 +434,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
455 struct module *module = NULL; 434 struct module *module = NULL;
456 struct mutex *cb_mutex; 435 struct mutex *cb_mutex;
457 struct netlink_sock *nlk; 436 struct netlink_sock *nlk;
458 void (*bind)(int group);
459 int err = 0; 437 int err = 0;
460 438
461 sock->state = SS_UNCONNECTED; 439 sock->state = SS_UNCONNECTED;
@@ -480,7 +458,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
480 else 458 else
481 err = -EPROTONOSUPPORT; 459 err = -EPROTONOSUPPORT;
482 cb_mutex = nl_table[protocol].cb_mutex; 460 cb_mutex = nl_table[protocol].cb_mutex;
483 bind = nl_table[protocol].bind;
484 netlink_unlock_table(); 461 netlink_unlock_table();
485 462
486 if (err < 0) 463 if (err < 0)
@@ -496,7 +473,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
496 473
497 nlk = nlk_sk(sock->sk); 474 nlk = nlk_sk(sock->sk);
498 nlk->module = module; 475 nlk->module = module;
499 nlk->netlink_bind = bind;
500out: 476out:
501 return err; 477 return err;
502 478
@@ -527,11 +503,11 @@ static int netlink_release(struct socket *sock)
527 503
528 skb_queue_purge(&sk->sk_write_queue); 504 skb_queue_purge(&sk->sk_write_queue);
529 505
530 if (nlk->portid) { 506 if (nlk->pid) {
531 struct netlink_notify n = { 507 struct netlink_notify n = {
532 .net = sock_net(sk), 508 .net = sock_net(sk),
533 .protocol = sk->sk_protocol, 509 .protocol = sk->sk_protocol,
534 .portid = nlk->portid, 510 .pid = nlk->pid,
535 }; 511 };
536 atomic_notifier_call_chain(&netlink_chain, 512 atomic_notifier_call_chain(&netlink_chain,
537 NETLINK_URELEASE, &n); 513 NETLINK_URELEASE, &n);
@@ -543,19 +519,12 @@ static int netlink_release(struct socket *sock)
543 if (netlink_is_kernel(sk)) { 519 if (netlink_is_kernel(sk)) {
544 BUG_ON(nl_table[sk->sk_protocol].registered == 0); 520 BUG_ON(nl_table[sk->sk_protocol].registered == 0);
545 if (--nl_table[sk->sk_protocol].registered == 0) { 521 if (--nl_table[sk->sk_protocol].registered == 0) {
546 struct listeners *old; 522 kfree(nl_table[sk->sk_protocol].listeners);
547
548 old = nl_deref_protected(nl_table[sk->sk_protocol].listeners);
549 RCU_INIT_POINTER(nl_table[sk->sk_protocol].listeners, NULL);
550 kfree_rcu(old, rcu);
551 nl_table[sk->sk_protocol].module = NULL; 523 nl_table[sk->sk_protocol].module = NULL;
552 nl_table[sk->sk_protocol].bind = NULL;
553 nl_table[sk->sk_protocol].flags = 0;
554 nl_table[sk->sk_protocol].registered = 0; 524 nl_table[sk->sk_protocol].registered = 0;
555 } 525 }
556 } else if (nlk->subscriptions) { 526 } else if (nlk->subscriptions)
557 netlink_update_listeners(sk); 527 netlink_update_listeners(sk);
558 }
559 netlink_table_ungrab(); 528 netlink_table_ungrab();
560 529
561 kfree(nlk->groups); 530 kfree(nlk->groups);
@@ -572,24 +541,24 @@ static int netlink_autobind(struct socket *sock)
572{ 541{
573 struct sock *sk = sock->sk; 542 struct sock *sk = sock->sk;
574 struct net *net = sock_net(sk); 543 struct net *net = sock_net(sk);
575 struct nl_portid_hash *hash = &nl_table[sk->sk_protocol].hash; 544 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
576 struct hlist_head *head; 545 struct hlist_head *head;
577 struct sock *osk; 546 struct sock *osk;
578 struct hlist_node *node; 547 struct hlist_node *node;
579 s32 portid = task_tgid_vnr(current); 548 s32 pid = task_tgid_vnr(current);
580 int err; 549 int err;
581 static s32 rover = -4097; 550 static s32 rover = -4097;
582 551
583retry: 552retry:
584 cond_resched(); 553 cond_resched();
585 netlink_table_grab(); 554 netlink_table_grab();
586 head = nl_portid_hashfn(hash, portid); 555 head = nl_pid_hashfn(hash, pid);
587 sk_for_each(osk, node, head) { 556 sk_for_each(osk, node, head) {
588 if (!net_eq(sock_net(osk), net)) 557 if (!net_eq(sock_net(osk), net))
589 continue; 558 continue;
590 if (nlk_sk(osk)->portid == portid) { 559 if (nlk_sk(osk)->pid == pid) {
591 /* Bind collision, search negative portid values. */ 560 /* Bind collision, search negative pid values. */
592 portid = rover--; 561 pid = rover--;
593 if (rover > -4097) 562 if (rover > -4097)
594 rover = -4097; 563 rover = -4097;
595 netlink_table_ungrab(); 564 netlink_table_ungrab();
@@ -598,7 +567,7 @@ retry:
598 } 567 }
599 netlink_table_ungrab(); 568 netlink_table_ungrab();
600 569
601 err = netlink_insert(sk, net, portid); 570 err = netlink_insert(sk, net, pid);
602 if (err == -EADDRINUSE) 571 if (err == -EADDRINUSE)
603 goto retry; 572 goto retry;
604 573
@@ -609,10 +578,10 @@ retry:
609 return err; 578 return err;
610} 579}
611 580
612static inline int netlink_capable(const struct socket *sock, unsigned int flag) 581static inline int netlink_capable(struct socket *sock, unsigned int flag)
613{ 582{
614 return (nl_table[sock->sk->sk_protocol].flags & flag) || 583 return (nl_table[sock->sk->sk_protocol].nl_nonroot & flag) ||
615 ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN); 584 capable(CAP_NET_ADMIN);
616} 585}
617 586
618static void 587static void
@@ -669,23 +638,20 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
669 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 638 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
670 int err; 639 int err;
671 640
672 if (addr_len < sizeof(struct sockaddr_nl))
673 return -EINVAL;
674
675 if (nladdr->nl_family != AF_NETLINK) 641 if (nladdr->nl_family != AF_NETLINK)
676 return -EINVAL; 642 return -EINVAL;
677 643
678 /* Only superuser is allowed to listen multicasts */ 644 /* Only superuser is allowed to listen multicasts */
679 if (nladdr->nl_groups) { 645 if (nladdr->nl_groups) {
680 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) 646 if (!netlink_capable(sock, NL_NONROOT_RECV))
681 return -EPERM; 647 return -EPERM;
682 err = netlink_realloc_groups(sk); 648 err = netlink_realloc_groups(sk);
683 if (err) 649 if (err)
684 return err; 650 return err;
685 } 651 }
686 652
687 if (nlk->portid) { 653 if (nlk->pid) {
688 if (nladdr->nl_pid != nlk->portid) 654 if (nladdr->nl_pid != nlk->pid)
689 return -EINVAL; 655 return -EINVAL;
690 } else { 656 } else {
691 err = nladdr->nl_pid ? 657 err = nladdr->nl_pid ?
@@ -706,15 +672,6 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
706 netlink_update_listeners(sk); 672 netlink_update_listeners(sk);
707 netlink_table_ungrab(); 673 netlink_table_ungrab();
708 674
709 if (nlk->netlink_bind && nlk->groups[0]) {
710 int i;
711
712 for (i=0; i<nlk->ngroups; i++) {
713 if (test_bit(i, nlk->groups))
714 nlk->netlink_bind(i);
715 }
716 }
717
718 return 0; 675 return 0;
719} 676}
720 677
@@ -731,7 +688,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
731 688
732 if (addr->sa_family == AF_UNSPEC) { 689 if (addr->sa_family == AF_UNSPEC) {
733 sk->sk_state = NETLINK_UNCONNECTED; 690 sk->sk_state = NETLINK_UNCONNECTED;
734 nlk->dst_portid = 0; 691 nlk->dst_pid = 0;
735 nlk->dst_group = 0; 692 nlk->dst_group = 0;
736 return 0; 693 return 0;
737 } 694 }
@@ -739,15 +696,15 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
739 return -EINVAL; 696 return -EINVAL;
740 697
741 /* Only superuser is allowed to send multicasts */ 698 /* Only superuser is allowed to send multicasts */
742 if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND)) 699 if (nladdr->nl_groups && !netlink_capable(sock, NL_NONROOT_SEND))
743 return -EPERM; 700 return -EPERM;
744 701
745 if (!nlk->portid) 702 if (!nlk->pid)
746 err = netlink_autobind(sock); 703 err = netlink_autobind(sock);
747 704
748 if (err == 0) { 705 if (err == 0) {
749 sk->sk_state = NETLINK_CONNECTED; 706 sk->sk_state = NETLINK_CONNECTED;
750 nlk->dst_portid = nladdr->nl_pid; 707 nlk->dst_pid = nladdr->nl_pid;
751 nlk->dst_group = ffs(nladdr->nl_groups); 708 nlk->dst_group = ffs(nladdr->nl_groups);
752 } 709 }
753 710
@@ -766,10 +723,10 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr,
766 *addr_len = sizeof(*nladdr); 723 *addr_len = sizeof(*nladdr);
767 724
768 if (peer) { 725 if (peer) {
769 nladdr->nl_pid = nlk->dst_portid; 726 nladdr->nl_pid = nlk->dst_pid;
770 nladdr->nl_groups = netlink_group_mask(nlk->dst_group); 727 nladdr->nl_groups = netlink_group_mask(nlk->dst_group);
771 } else { 728 } else {
772 nladdr->nl_pid = nlk->portid; 729 nladdr->nl_pid = nlk->pid;
773 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0; 730 nladdr->nl_groups = nlk->groups ? nlk->groups[0] : 0;
774 } 731 }
775 return 0; 732 return 0;
@@ -788,19 +745,19 @@ static void netlink_overrun(struct sock *sk)
788 atomic_inc(&sk->sk_drops); 745 atomic_inc(&sk->sk_drops);
789} 746}
790 747
791static struct sock *netlink_getsockbyportid(struct sock *ssk, u32 portid) 748static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
792{ 749{
793 struct sock *sock; 750 struct sock *sock;
794 struct netlink_sock *nlk; 751 struct netlink_sock *nlk;
795 752
796 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, portid); 753 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
797 if (!sock) 754 if (!sock)
798 return ERR_PTR(-ECONNREFUSED); 755 return ERR_PTR(-ECONNREFUSED);
799 756
800 /* Don't bother queuing skb if kernel socket has no input function */ 757 /* Don't bother queuing skb if kernel socket has no input function */
801 nlk = nlk_sk(sock); 758 nlk = nlk_sk(sock);
802 if (sock->sk_state == NETLINK_CONNECTED && 759 if (sock->sk_state == NETLINK_CONNECTED &&
803 nlk->dst_portid != nlk_sk(ssk)->portid) { 760 nlk->dst_pid != nlk_sk(ssk)->pid) {
804 sock_put(sock); 761 sock_put(sock);
805 return ERR_PTR(-ECONNREFUSED); 762 return ERR_PTR(-ECONNREFUSED);
806 } 763 }
@@ -873,19 +830,12 @@ int netlink_attachskb(struct sock *sk, struct sk_buff *skb,
873 return 0; 830 return 0;
874} 831}
875 832
876static int __netlink_sendskb(struct sock *sk, struct sk_buff *skb) 833int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
877{ 834{
878 int len = skb->len; 835 int len = skb->len;
879 836
880 skb_queue_tail(&sk->sk_receive_queue, skb); 837 skb_queue_tail(&sk->sk_receive_queue, skb);
881 sk->sk_data_ready(sk, len); 838 sk->sk_data_ready(sk, len);
882 return len;
883}
884
885int netlink_sendskb(struct sock *sk, struct sk_buff *skb)
886{
887 int len = __netlink_sendskb(sk, skb);
888
889 sock_put(sk); 839 sock_put(sk);
890 return len; 840 return len;
891} 841}
@@ -896,7 +846,8 @@ void netlink_detachskb(struct sock *sk, struct sk_buff *skb)
896 sock_put(sk); 846 sock_put(sk);
897} 847}
898 848
899static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation) 849static inline struct sk_buff *netlink_trim(struct sk_buff *skb,
850 gfp_t allocation)
900{ 851{
901 int delta; 852 int delta;
902 853
@@ -910,7 +861,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
910 struct sk_buff *nskb = skb_clone(skb, allocation); 861 struct sk_buff *nskb = skb_clone(skb, allocation);
911 if (!nskb) 862 if (!nskb)
912 return skb; 863 return skb;
913 consume_skb(skb); 864 kfree_skb(skb);
914 skb = nskb; 865 skb = nskb;
915 } 866 }
916 867
@@ -920,7 +871,7 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
920 return skb; 871 return skb;
921} 872}
922 873
923static void netlink_rcv_wake(struct sock *sk) 874static inline void netlink_rcv_wake(struct sock *sk)
924{ 875{
925 struct netlink_sock *nlk = nlk_sk(sk); 876 struct netlink_sock *nlk = nlk_sk(sk);
926 877
@@ -930,8 +881,7 @@ static void netlink_rcv_wake(struct sock *sk)
930 wake_up_interruptible(&nlk->wait); 881 wake_up_interruptible(&nlk->wait);
931} 882}
932 883
933static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb, 884static inline int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb)
934 struct sock *ssk)
935{ 885{
936 int ret; 886 int ret;
937 struct netlink_sock *nlk = nlk_sk(sk); 887 struct netlink_sock *nlk = nlk_sk(sk);
@@ -940,18 +890,15 @@ static int netlink_unicast_kernel(struct sock *sk, struct sk_buff *skb,
940 if (nlk->netlink_rcv != NULL) { 890 if (nlk->netlink_rcv != NULL) {
941 ret = skb->len; 891 ret = skb->len;
942 skb_set_owner_r(skb, sk); 892 skb_set_owner_r(skb, sk);
943 NETLINK_CB(skb).ssk = ssk;
944 nlk->netlink_rcv(skb); 893 nlk->netlink_rcv(skb);
945 consume_skb(skb);
946 } else {
947 kfree_skb(skb);
948 } 894 }
895 kfree_skb(skb);
949 sock_put(sk); 896 sock_put(sk);
950 return ret; 897 return ret;
951} 898}
952 899
953int netlink_unicast(struct sock *ssk, struct sk_buff *skb, 900int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
954 u32 portid, int nonblock) 901 u32 pid, int nonblock)
955{ 902{
956 struct sock *sk; 903 struct sock *sk;
957 int err; 904 int err;
@@ -961,13 +908,13 @@ int netlink_unicast(struct sock *ssk, struct sk_buff *skb,
961 908
962 timeo = sock_sndtimeo(ssk, nonblock); 909 timeo = sock_sndtimeo(ssk, nonblock);
963retry: 910retry:
964 sk = netlink_getsockbyportid(ssk, portid); 911 sk = netlink_getsockbypid(ssk, pid);
965 if (IS_ERR(sk)) { 912 if (IS_ERR(sk)) {
966 kfree_skb(skb); 913 kfree_skb(skb);
967 return PTR_ERR(sk); 914 return PTR_ERR(sk);
968 } 915 }
969 if (netlink_is_kernel(sk)) 916 if (netlink_is_kernel(sk))
970 return netlink_unicast_kernel(sk, skb, ssk); 917 return netlink_unicast_kernel(sk, skb);
971 918
972 if (sk_filter(sk, skb)) { 919 if (sk_filter(sk, skb)) {
973 err = skb->len; 920 err = skb->len;
@@ -996,7 +943,7 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
996 rcu_read_lock(); 943 rcu_read_lock();
997 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners); 944 listeners = rcu_dereference(nl_table[sk->sk_protocol].listeners);
998 945
999 if (listeners && group - 1 < nl_table[sk->sk_protocol].groups) 946 if (group - 1 < nl_table[sk->sk_protocol].groups)
1000 res = test_bit(group - 1, listeners->masks); 947 res = test_bit(group - 1, listeners->masks);
1001 948
1002 rcu_read_unlock(); 949 rcu_read_unlock();
@@ -1005,15 +952,17 @@ int netlink_has_listeners(struct sock *sk, unsigned int group)
1005} 952}
1006EXPORT_SYMBOL_GPL(netlink_has_listeners); 953EXPORT_SYMBOL_GPL(netlink_has_listeners);
1007 954
1008static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb) 955static inline int netlink_broadcast_deliver(struct sock *sk,
956 struct sk_buff *skb)
1009{ 957{
1010 struct netlink_sock *nlk = nlk_sk(sk); 958 struct netlink_sock *nlk = nlk_sk(sk);
1011 959
1012 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf && 960 if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
1013 !test_bit(0, &nlk->state)) { 961 !test_bit(0, &nlk->state)) {
1014 skb_set_owner_r(skb, sk); 962 skb_set_owner_r(skb, sk);
1015 __netlink_sendskb(sk, skb); 963 skb_queue_tail(&sk->sk_receive_queue, skb);
1016 return atomic_read(&sk->sk_rmem_alloc) > (sk->sk_rcvbuf >> 1); 964 sk->sk_data_ready(sk, skb->len);
965 return atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf;
1017 } 966 }
1018 return -1; 967 return -1;
1019} 968}
@@ -1021,7 +970,7 @@ static int netlink_broadcast_deliver(struct sock *sk, struct sk_buff *skb)
1021struct netlink_broadcast_data { 970struct netlink_broadcast_data {
1022 struct sock *exclude_sk; 971 struct sock *exclude_sk;
1023 struct net *net; 972 struct net *net;
1024 u32 portid; 973 u32 pid;
1025 u32 group; 974 u32 group;
1026 int failure; 975 int failure;
1027 int delivery_failure; 976 int delivery_failure;
@@ -1033,7 +982,7 @@ struct netlink_broadcast_data {
1033 void *tx_data; 982 void *tx_data;
1034}; 983};
1035 984
1036static int do_one_broadcast(struct sock *sk, 985static inline int do_one_broadcast(struct sock *sk,
1037 struct netlink_broadcast_data *p) 986 struct netlink_broadcast_data *p)
1038{ 987{
1039 struct netlink_sock *nlk = nlk_sk(sk); 988 struct netlink_sock *nlk = nlk_sk(sk);
@@ -1042,7 +991,7 @@ static int do_one_broadcast(struct sock *sk,
1042 if (p->exclude_sk == sk) 991 if (p->exclude_sk == sk)
1043 goto out; 992 goto out;
1044 993
1045 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 994 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1046 !test_bit(p->group - 1, nlk->groups)) 995 !test_bit(p->group - 1, nlk->groups))
1047 goto out; 996 goto out;
1048 997
@@ -1094,7 +1043,7 @@ out:
1094 return 0; 1043 return 0;
1095} 1044}
1096 1045
1097int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid, 1046int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 pid,
1098 u32 group, gfp_t allocation, 1047 u32 group, gfp_t allocation,
1099 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data), 1048 int (*filter)(struct sock *dsk, struct sk_buff *skb, void *data),
1100 void *filter_data) 1049 void *filter_data)
@@ -1108,7 +1057,7 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1108 1057
1109 info.exclude_sk = ssk; 1058 info.exclude_sk = ssk;
1110 info.net = net; 1059 info.net = net;
1111 info.portid = portid; 1060 info.pid = pid;
1112 info.group = group; 1061 info.group = group;
1113 info.failure = 0; 1062 info.failure = 0;
1114 info.delivery_failure = 0; 1063 info.delivery_failure = 0;
@@ -1134,8 +1083,8 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1134 if (info.delivery_failure) { 1083 if (info.delivery_failure) {
1135 kfree_skb(info.skb2); 1084 kfree_skb(info.skb2);
1136 return -ENOBUFS; 1085 return -ENOBUFS;
1137 } 1086 } else
1138 consume_skb(info.skb2); 1087 consume_skb(info.skb2);
1139 1088
1140 if (info.delivered) { 1089 if (info.delivered) {
1141 if (info.congested && (allocation & __GFP_WAIT)) 1090 if (info.congested && (allocation & __GFP_WAIT))
@@ -1146,22 +1095,23 @@ int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid
1146} 1095}
1147EXPORT_SYMBOL(netlink_broadcast_filtered); 1096EXPORT_SYMBOL(netlink_broadcast_filtered);
1148 1097
1149int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 portid, 1098int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
1150 u32 group, gfp_t allocation) 1099 u32 group, gfp_t allocation)
1151{ 1100{
1152 return netlink_broadcast_filtered(ssk, skb, portid, group, allocation, 1101 return netlink_broadcast_filtered(ssk, skb, pid, group, allocation,
1153 NULL, NULL); 1102 NULL, NULL);
1154} 1103}
1155EXPORT_SYMBOL(netlink_broadcast); 1104EXPORT_SYMBOL(netlink_broadcast);
1156 1105
1157struct netlink_set_err_data { 1106struct netlink_set_err_data {
1158 struct sock *exclude_sk; 1107 struct sock *exclude_sk;
1159 u32 portid; 1108 u32 pid;
1160 u32 group; 1109 u32 group;
1161 int code; 1110 int code;
1162}; 1111};
1163 1112
1164static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p) 1113static inline int do_one_set_err(struct sock *sk,
1114 struct netlink_set_err_data *p)
1165{ 1115{
1166 struct netlink_sock *nlk = nlk_sk(sk); 1116 struct netlink_sock *nlk = nlk_sk(sk);
1167 int ret = 0; 1117 int ret = 0;
@@ -1172,7 +1122,7 @@ static int do_one_set_err(struct sock *sk, struct netlink_set_err_data *p)
1172 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) 1122 if (!net_eq(sock_net(sk), sock_net(p->exclude_sk)))
1173 goto out; 1123 goto out;
1174 1124
1175 if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups || 1125 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
1176 !test_bit(p->group - 1, nlk->groups)) 1126 !test_bit(p->group - 1, nlk->groups))
1177 goto out; 1127 goto out;
1178 1128
@@ -1190,14 +1140,14 @@ out:
1190/** 1140/**
1191 * netlink_set_err - report error to broadcast listeners 1141 * netlink_set_err - report error to broadcast listeners
1192 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create() 1142 * @ssk: the kernel netlink socket, as returned by netlink_kernel_create()
1193 * @portid: the PORTID of a process that we want to skip (if any) 1143 * @pid: the PID of a process that we want to skip (if any)
1194 * @groups: the broadcast group that will notice the error 1144 * @groups: the broadcast group that will notice the error
1195 * @code: error code, must be negative (as usual in kernelspace) 1145 * @code: error code, must be negative (as usual in kernelspace)
1196 * 1146 *
1197 * This function returns the number of broadcast listeners that have set the 1147 * This function returns the number of broadcast listeners that have set the
1198 * NETLINK_RECV_NO_ENOBUFS socket option. 1148 * NETLINK_RECV_NO_ENOBUFS socket option.
1199 */ 1149 */
1200int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code) 1150int netlink_set_err(struct sock *ssk, u32 pid, u32 group, int code)
1201{ 1151{
1202 struct netlink_set_err_data info; 1152 struct netlink_set_err_data info;
1203 struct hlist_node *node; 1153 struct hlist_node *node;
@@ -1205,7 +1155,7 @@ int netlink_set_err(struct sock *ssk, u32 portid, u32 group, int code)
1205 int ret = 0; 1155 int ret = 0;
1206 1156
1207 info.exclude_sk = ssk; 1157 info.exclude_sk = ssk;
1208 info.portid = portid; 1158 info.pid = pid;
1209 info.group = group; 1159 info.group = group;
1210 /* sk->sk_err wants a positive error value */ 1160 /* sk->sk_err wants a positive error value */
1211 info.code = -code; 1161 info.code = -code;
@@ -1262,7 +1212,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1262 break; 1212 break;
1263 case NETLINK_ADD_MEMBERSHIP: 1213 case NETLINK_ADD_MEMBERSHIP:
1264 case NETLINK_DROP_MEMBERSHIP: { 1214 case NETLINK_DROP_MEMBERSHIP: {
1265 if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV)) 1215 if (!netlink_capable(sock, NL_NONROOT_RECV))
1266 return -EPERM; 1216 return -EPERM;
1267 err = netlink_realloc_groups(sk); 1217 err = netlink_realloc_groups(sk);
1268 if (err) 1218 if (err)
@@ -1273,10 +1223,6 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1273 netlink_update_socket_mc(nlk, val, 1223 netlink_update_socket_mc(nlk, val,
1274 optname == NETLINK_ADD_MEMBERSHIP); 1224 optname == NETLINK_ADD_MEMBERSHIP);
1275 netlink_table_ungrab(); 1225 netlink_table_ungrab();
1276
1277 if (nlk->netlink_bind)
1278 nlk->netlink_bind(val);
1279
1280 err = 0; 1226 err = 0;
1281 break; 1227 break;
1282 } 1228 }
@@ -1292,9 +1238,8 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
1292 nlk->flags |= NETLINK_RECV_NO_ENOBUFS; 1238 nlk->flags |= NETLINK_RECV_NO_ENOBUFS;
1293 clear_bit(0, &nlk->state); 1239 clear_bit(0, &nlk->state);
1294 wake_up_interruptible(&nlk->wait); 1240 wake_up_interruptible(&nlk->wait);
1295 } else { 1241 } else
1296 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS; 1242 nlk->flags &= ~NETLINK_RECV_NO_ENOBUFS;
1297 }
1298 err = 0; 1243 err = 0;
1299 break; 1244 break;
1300 default: 1245 default:
@@ -1370,7 +1315,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1370 struct sock *sk = sock->sk; 1315 struct sock *sk = sock->sk;
1371 struct netlink_sock *nlk = nlk_sk(sk); 1316 struct netlink_sock *nlk = nlk_sk(sk);
1372 struct sockaddr_nl *addr = msg->msg_name; 1317 struct sockaddr_nl *addr = msg->msg_name;
1373 u32 dst_portid; 1318 u32 dst_pid;
1374 u32 dst_group; 1319 u32 dst_group;
1375 struct sk_buff *skb; 1320 struct sk_buff *skb;
1376 int err; 1321 int err;
@@ -1379,10 +1324,11 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1379 if (msg->msg_flags&MSG_OOB) 1324 if (msg->msg_flags&MSG_OOB)
1380 return -EOPNOTSUPP; 1325 return -EOPNOTSUPP;
1381 1326
1382 if (NULL == siocb->scm) 1327 if (NULL == siocb->scm) {
1383 siocb->scm = &scm; 1328 siocb->scm = &scm;
1384 1329 memset(&scm, 0, sizeof(scm));
1385 err = scm_send(sock, msg, siocb->scm, true); 1330 }
1331 err = scm_send(sock, msg, siocb->scm);
1386 if (err < 0) 1332 if (err < 0)
1387 return err; 1333 return err;
1388 1334
@@ -1390,18 +1336,17 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1390 err = -EINVAL; 1336 err = -EINVAL;
1391 if (addr->nl_family != AF_NETLINK) 1337 if (addr->nl_family != AF_NETLINK)
1392 goto out; 1338 goto out;
1393 dst_portid = addr->nl_pid; 1339 dst_pid = addr->nl_pid;
1394 dst_group = ffs(addr->nl_groups); 1340 dst_group = ffs(addr->nl_groups);
1395 err = -EPERM; 1341 err = -EPERM;
1396 if ((dst_group || dst_portid) && 1342 if (dst_group && !netlink_capable(sock, NL_NONROOT_SEND))
1397 !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
1398 goto out; 1343 goto out;
1399 } else { 1344 } else {
1400 dst_portid = nlk->dst_portid; 1345 dst_pid = nlk->dst_pid;
1401 dst_group = nlk->dst_group; 1346 dst_group = nlk->dst_group;
1402 } 1347 }
1403 1348
1404 if (!nlk->portid) { 1349 if (!nlk->pid) {
1405 err = netlink_autobind(sock); 1350 err = netlink_autobind(sock);
1406 if (err) 1351 if (err)
1407 goto out; 1352 goto out;
@@ -1415,9 +1360,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1415 if (skb == NULL) 1360 if (skb == NULL)
1416 goto out; 1361 goto out;
1417 1362
1418 NETLINK_CB(skb).portid = nlk->portid; 1363 NETLINK_CB(skb).pid = nlk->pid;
1419 NETLINK_CB(skb).dst_group = dst_group; 1364 NETLINK_CB(skb).dst_group = dst_group;
1420 NETLINK_CB(skb).creds = siocb->scm->creds; 1365 memcpy(NETLINK_CREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
1421 1366
1422 err = -EFAULT; 1367 err = -EFAULT;
1423 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { 1368 if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
@@ -1433,9 +1378,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
1433 1378
1434 if (dst_group) { 1379 if (dst_group) {
1435 atomic_inc(&skb->users); 1380 atomic_inc(&skb->users);
1436 netlink_broadcast(sk, skb, dst_portid, dst_group, GFP_KERNEL); 1381 netlink_broadcast(sk, skb, dst_pid, dst_group, GFP_KERNEL);
1437 } 1382 }
1438 err = netlink_unicast(sk, skb, dst_portid, msg->msg_flags&MSG_DONTWAIT); 1383 err = netlink_unicast(sk, skb, dst_pid, msg->msg_flags&MSG_DONTWAIT);
1439 1384
1440out: 1385out:
1441 scm_destroy(siocb->scm); 1386 scm_destroy(siocb->scm);
@@ -1498,7 +1443,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
1498 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name; 1443 struct sockaddr_nl *addr = (struct sockaddr_nl *)msg->msg_name;
1499 addr->nl_family = AF_NETLINK; 1444 addr->nl_family = AF_NETLINK;
1500 addr->nl_pad = 0; 1445 addr->nl_pad = 0;
1501 addr->nl_pid = NETLINK_CB(skb).portid; 1446 addr->nl_pid = NETLINK_CB(skb).pid;
1502 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group); 1447 addr->nl_groups = netlink_group_mask(NETLINK_CB(skb).dst_group);
1503 msg->msg_namelen = sizeof(*addr); 1448 msg->msg_namelen = sizeof(*addr);
1504 } 1449 }
@@ -1542,15 +1487,14 @@ static void netlink_data_ready(struct sock *sk, int len)
1542 */ 1487 */
1543 1488
1544struct sock * 1489struct sock *
1545__netlink_kernel_create(struct net *net, int unit, struct module *module, 1490netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1546 struct netlink_kernel_cfg *cfg) 1491 void (*input)(struct sk_buff *skb),
1492 struct mutex *cb_mutex, struct module *module)
1547{ 1493{
1548 struct socket *sock; 1494 struct socket *sock;
1549 struct sock *sk; 1495 struct sock *sk;
1550 struct netlink_sock *nlk; 1496 struct netlink_sock *nlk;
1551 struct listeners *listeners = NULL; 1497 struct listeners *listeners = NULL;
1552 struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL;
1553 unsigned int groups;
1554 1498
1555 BUG_ON(!nl_table); 1499 BUG_ON(!nl_table);
1556 1500
@@ -1572,18 +1516,16 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
1572 sk = sock->sk; 1516 sk = sock->sk;
1573 sk_change_net(sk, net); 1517 sk_change_net(sk, net);
1574 1518
1575 if (!cfg || cfg->groups < 32) 1519 if (groups < 32)
1576 groups = 32; 1520 groups = 32;
1577 else
1578 groups = cfg->groups;
1579 1521
1580 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL); 1522 listeners = kzalloc(sizeof(*listeners) + NLGRPSZ(groups), GFP_KERNEL);
1581 if (!listeners) 1523 if (!listeners)
1582 goto out_sock_release; 1524 goto out_sock_release;
1583 1525
1584 sk->sk_data_ready = netlink_data_ready; 1526 sk->sk_data_ready = netlink_data_ready;
1585 if (cfg && cfg->input) 1527 if (input)
1586 nlk_sk(sk)->netlink_rcv = cfg->input; 1528 nlk_sk(sk)->netlink_rcv = input;
1587 1529
1588 if (netlink_insert(sk, net, 0)) 1530 if (netlink_insert(sk, net, 0))
1589 goto out_sock_release; 1531 goto out_sock_release;
@@ -1597,10 +1539,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
1597 rcu_assign_pointer(nl_table[unit].listeners, listeners); 1539 rcu_assign_pointer(nl_table[unit].listeners, listeners);
1598 nl_table[unit].cb_mutex = cb_mutex; 1540 nl_table[unit].cb_mutex = cb_mutex;
1599 nl_table[unit].module = module; 1541 nl_table[unit].module = module;
1600 if (cfg) {
1601 nl_table[unit].bind = cfg->bind;
1602 nl_table[unit].flags = cfg->flags;
1603 }
1604 nl_table[unit].registered = 1; 1542 nl_table[unit].registered = 1;
1605 } else { 1543 } else {
1606 kfree(listeners); 1544 kfree(listeners);
@@ -1618,7 +1556,8 @@ out_sock_release_nosk:
1618 sock_release(sock); 1556 sock_release(sock);
1619 return NULL; 1557 return NULL;
1620} 1558}
1621EXPORT_SYMBOL(__netlink_kernel_create); 1559EXPORT_SYMBOL(netlink_kernel_create);
1560
1622 1561
1623void 1562void
1624netlink_kernel_release(struct sock *sk) 1563netlink_kernel_release(struct sock *sk)
@@ -1639,7 +1578,7 @@ int __netlink_change_ngroups(struct sock *sk, unsigned int groups)
1639 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC); 1578 new = kzalloc(sizeof(*new) + NLGRPSZ(groups), GFP_ATOMIC);
1640 if (!new) 1579 if (!new)
1641 return -ENOMEM; 1580 return -ENOMEM;
1642 old = nl_deref_protected(tbl->listeners); 1581 old = rcu_dereference_raw(tbl->listeners);
1643 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups)); 1582 memcpy(new->masks, old->masks, NLGRPSZ(tbl->groups));
1644 rcu_assign_pointer(tbl->listeners, new); 1583 rcu_assign_pointer(tbl->listeners, new);
1645 1584
@@ -1698,23 +1637,18 @@ void netlink_clear_multicast_users(struct sock *ksk, unsigned int group)
1698 netlink_table_ungrab(); 1637 netlink_table_ungrab();
1699} 1638}
1700 1639
1701struct nlmsghdr * 1640void netlink_set_nonroot(int protocol, unsigned int flags)
1702__nlmsg_put(struct sk_buff *skb, u32 portid, u32 seq, int type, int len, int flags)
1703{ 1641{
1704 struct nlmsghdr *nlh; 1642 if ((unsigned int)protocol < MAX_LINKS)
1705 int size = NLMSG_LENGTH(len); 1643 nl_table[protocol].nl_nonroot = flags;
1644}
1645EXPORT_SYMBOL(netlink_set_nonroot);
1706 1646
1707 nlh = (struct nlmsghdr*)skb_put(skb, NLMSG_ALIGN(size)); 1647static void netlink_destroy_callback(struct netlink_callback *cb)
1708 nlh->nlmsg_type = type; 1648{
1709 nlh->nlmsg_len = size; 1649 kfree_skb(cb->skb);
1710 nlh->nlmsg_flags = flags; 1650 kfree(cb);
1711 nlh->nlmsg_pid = portid;
1712 nlh->nlmsg_seq = seq;
1713 if (!__builtin_constant_p(size) || NLMSG_ALIGN(size) - size != 0)
1714 memset(NLMSG_DATA(nlh) + len, 0, NLMSG_ALIGN(size) - size);
1715 return nlh;
1716} 1651}
1717EXPORT_SYMBOL(__nlmsg_put);
1718 1652
1719/* 1653/*
1720 * It looks a bit ugly. 1654 * It looks a bit ugly.
@@ -1751,8 +1685,10 @@ static int netlink_dump(struct sock *sk)
1751 1685
1752 if (sk_filter(sk, skb)) 1686 if (sk_filter(sk, skb))
1753 kfree_skb(skb); 1687 kfree_skb(skb);
1754 else 1688 else {
1755 __netlink_sendskb(sk, skb); 1689 skb_queue_tail(&sk->sk_receive_queue, skb);
1690 sk->sk_data_ready(sk, skb->len);
1691 }
1756 return 0; 1692 return 0;
1757 } 1693 }
1758 1694
@@ -1766,16 +1702,17 @@ static int netlink_dump(struct sock *sk)
1766 1702
1767 if (sk_filter(sk, skb)) 1703 if (sk_filter(sk, skb))
1768 kfree_skb(skb); 1704 kfree_skb(skb);
1769 else 1705 else {
1770 __netlink_sendskb(sk, skb); 1706 skb_queue_tail(&sk->sk_receive_queue, skb);
1707 sk->sk_data_ready(sk, skb->len);
1708 }
1771 1709
1772 if (cb->done) 1710 if (cb->done)
1773 cb->done(cb); 1711 cb->done(cb);
1774 nlk->cb = NULL; 1712 nlk->cb = NULL;
1775 mutex_unlock(nlk->cb_mutex); 1713 mutex_unlock(nlk->cb_mutex);
1776 1714
1777 module_put(cb->module); 1715 netlink_destroy_callback(cb);
1778 netlink_consume_callback(cb);
1779 return 0; 1716 return 0;
1780 1717
1781errout_skb: 1718errout_skb:
@@ -1784,9 +1721,12 @@ errout_skb:
1784 return err; 1721 return err;
1785} 1722}
1786 1723
1787int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb, 1724int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1788 const struct nlmsghdr *nlh, 1725 const struct nlmsghdr *nlh,
1789 struct netlink_dump_control *control) 1726 int (*dump)(struct sk_buff *skb,
1727 struct netlink_callback *),
1728 int (*done)(struct netlink_callback *),
1729 u16 min_dump_alloc)
1790{ 1730{
1791 struct netlink_callback *cb; 1731 struct netlink_callback *cb;
1792 struct sock *sk; 1732 struct sock *sk;
@@ -1797,43 +1737,32 @@ int __netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1797 if (cb == NULL) 1737 if (cb == NULL)
1798 return -ENOBUFS; 1738 return -ENOBUFS;
1799 1739
1800 cb->dump = control->dump; 1740 cb->dump = dump;
1801 cb->done = control->done; 1741 cb->done = done;
1802 cb->nlh = nlh; 1742 cb->nlh = nlh;
1803 cb->data = control->data; 1743 cb->min_dump_alloc = min_dump_alloc;
1804 cb->module = control->module;
1805 cb->min_dump_alloc = control->min_dump_alloc;
1806 atomic_inc(&skb->users); 1744 atomic_inc(&skb->users);
1807 cb->skb = skb; 1745 cb->skb = skb;
1808 1746
1809 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).portid); 1747 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1810 if (sk == NULL) { 1748 if (sk == NULL) {
1811 netlink_destroy_callback(cb); 1749 netlink_destroy_callback(cb);
1812 return -ECONNREFUSED; 1750 return -ECONNREFUSED;
1813 } 1751 }
1814 nlk = nlk_sk(sk); 1752 nlk = nlk_sk(sk);
1815
1816 mutex_lock(nlk->cb_mutex);
1817 /* A dump is in progress... */ 1753 /* A dump is in progress... */
1754 mutex_lock(nlk->cb_mutex);
1818 if (nlk->cb) { 1755 if (nlk->cb) {
1819 mutex_unlock(nlk->cb_mutex); 1756 mutex_unlock(nlk->cb_mutex);
1820 netlink_destroy_callback(cb); 1757 netlink_destroy_callback(cb);
1821 ret = -EBUSY; 1758 sock_put(sk);
1822 goto out; 1759 return -EBUSY;
1823 }
1824 /* add reference of module which cb->dump belongs to */
1825 if (!try_module_get(cb->module)) {
1826 mutex_unlock(nlk->cb_mutex);
1827 netlink_destroy_callback(cb);
1828 ret = -EPROTONOSUPPORT;
1829 goto out;
1830 } 1760 }
1831
1832 nlk->cb = cb; 1761 nlk->cb = cb;
1833 mutex_unlock(nlk->cb_mutex); 1762 mutex_unlock(nlk->cb_mutex);
1834 1763
1835 ret = netlink_dump(sk); 1764 ret = netlink_dump(sk);
1836out: 1765
1837 sock_put(sk); 1766 sock_put(sk);
1838 1767
1839 if (ret) 1768 if (ret)
@@ -1844,7 +1773,7 @@ out:
1844 */ 1773 */
1845 return -EINTR; 1774 return -EINTR;
1846} 1775}
1847EXPORT_SYMBOL(__netlink_dump_start); 1776EXPORT_SYMBOL(netlink_dump_start);
1848 1777
1849void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) 1778void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1850{ 1779{
@@ -1863,7 +1792,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1863 1792
1864 sk = netlink_lookup(sock_net(in_skb->sk), 1793 sk = netlink_lookup(sock_net(in_skb->sk),
1865 in_skb->sk->sk_protocol, 1794 in_skb->sk->sk_protocol,
1866 NETLINK_CB(in_skb).portid); 1795 NETLINK_CB(in_skb).pid);
1867 if (sk) { 1796 if (sk) {
1868 sk->sk_err = ENOBUFS; 1797 sk->sk_err = ENOBUFS;
1869 sk->sk_error_report(sk); 1798 sk->sk_error_report(sk);
@@ -1872,12 +1801,12 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1872 return; 1801 return;
1873 } 1802 }
1874 1803
1875 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 1804 rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1876 NLMSG_ERROR, payload, 0); 1805 NLMSG_ERROR, payload, 0);
1877 errmsg = nlmsg_data(rep); 1806 errmsg = nlmsg_data(rep);
1878 errmsg->error = err; 1807 errmsg->error = err;
1879 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); 1808 memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh));
1880 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).portid, MSG_DONTWAIT); 1809 netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
1881} 1810}
1882EXPORT_SYMBOL(netlink_ack); 1811EXPORT_SYMBOL(netlink_ack);
1883 1812
@@ -1927,33 +1856,33 @@ EXPORT_SYMBOL(netlink_rcv_skb);
1927 * nlmsg_notify - send a notification netlink message 1856 * nlmsg_notify - send a notification netlink message
1928 * @sk: netlink socket to use 1857 * @sk: netlink socket to use
1929 * @skb: notification message 1858 * @skb: notification message
1930 * @portid: destination netlink portid for reports or 0 1859 * @pid: destination netlink pid for reports or 0
1931 * @group: destination multicast group or 0 1860 * @group: destination multicast group or 0
1932 * @report: 1 to report back, 0 to disable 1861 * @report: 1 to report back, 0 to disable
1933 * @flags: allocation flags 1862 * @flags: allocation flags
1934 */ 1863 */
1935int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid, 1864int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 pid,
1936 unsigned int group, int report, gfp_t flags) 1865 unsigned int group, int report, gfp_t flags)
1937{ 1866{
1938 int err = 0; 1867 int err = 0;
1939 1868
1940 if (group) { 1869 if (group) {
1941 int exclude_portid = 0; 1870 int exclude_pid = 0;
1942 1871
1943 if (report) { 1872 if (report) {
1944 atomic_inc(&skb->users); 1873 atomic_inc(&skb->users);
1945 exclude_portid = portid; 1874 exclude_pid = pid;
1946 } 1875 }
1947 1876
1948 /* errors reported via destination sk->sk_err, but propagate 1877 /* errors reported via destination sk->sk_err, but propagate
1949 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */ 1878 * delivery errors if NETLINK_BROADCAST_ERROR flag is set */
1950 err = nlmsg_multicast(sk, skb, exclude_portid, group, flags); 1879 err = nlmsg_multicast(sk, skb, exclude_pid, group, flags);
1951 } 1880 }
1952 1881
1953 if (report) { 1882 if (report) {
1954 int err2; 1883 int err2;
1955 1884
1956 err2 = nlmsg_unicast(sk, skb, portid); 1885 err2 = nlmsg_unicast(sk, skb, pid);
1957 if (!err || err == -ESRCH) 1886 if (!err || err == -ESRCH)
1958 err = err2; 1887 err = err2;
1959 } 1888 }
@@ -1978,7 +1907,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1978 loff_t off = 0; 1907 loff_t off = 0;
1979 1908
1980 for (i = 0; i < MAX_LINKS; i++) { 1909 for (i = 0; i < MAX_LINKS; i++) {
1981 struct nl_portid_hash *hash = &nl_table[i].hash; 1910 struct nl_pid_hash *hash = &nl_table[i].hash;
1982 1911
1983 for (j = 0; j <= hash->mask; j++) { 1912 for (j = 0; j <= hash->mask; j++) {
1984 sk_for_each(s, node, &hash->table[j]) { 1913 sk_for_each(s, node, &hash->table[j]) {
@@ -2026,7 +1955,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2026 j = iter->hash_idx + 1; 1955 j = iter->hash_idx + 1;
2027 1956
2028 do { 1957 do {
2029 struct nl_portid_hash *hash = &nl_table[i].hash; 1958 struct nl_pid_hash *hash = &nl_table[i].hash;
2030 1959
2031 for (; j <= hash->mask; j++) { 1960 for (; j <= hash->mask; j++) {
2032 s = sk_head(&hash->table[j]); 1961 s = sk_head(&hash->table[j]);
@@ -2054,18 +1983,18 @@ static void netlink_seq_stop(struct seq_file *seq, void *v)
2054 1983
2055static int netlink_seq_show(struct seq_file *seq, void *v) 1984static int netlink_seq_show(struct seq_file *seq, void *v)
2056{ 1985{
2057 if (v == SEQ_START_TOKEN) { 1986 if (v == SEQ_START_TOKEN)
2058 seq_puts(seq, 1987 seq_puts(seq,
2059 "sk Eth Pid Groups " 1988 "sk Eth Pid Groups "
2060 "Rmem Wmem Dump Locks Drops Inode\n"); 1989 "Rmem Wmem Dump Locks Drops Inode\n");
2061 } else { 1990 else {
2062 struct sock *s = v; 1991 struct sock *s = v;
2063 struct netlink_sock *nlk = nlk_sk(s); 1992 struct netlink_sock *nlk = nlk_sk(s);
2064 1993
2065 seq_printf(seq, "%pK %-3d %-6u %08x %-8d %-8d %pK %-8d %-8d %-8lu\n", 1994 seq_printf(seq, "%pK %-3d %-6d %08x %-8d %-8d %pK %-8d %-8d %-8lu\n",
2066 s, 1995 s,
2067 s->sk_protocol, 1996 s->sk_protocol,
2068 nlk->portid, 1997 nlk->pid,
2069 nlk->groups ? (u32)nlk->groups[0] : 0, 1998 nlk->groups ? (u32)nlk->groups[0] : 0,
2070 sk_rmem_alloc_get(s), 1999 sk_rmem_alloc_get(s),
2071 sk_wmem_alloc_get(s), 2000 sk_wmem_alloc_get(s),
@@ -2173,7 +2102,6 @@ static void __init netlink_add_usersock_entry(void)
2173 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners); 2102 rcu_assign_pointer(nl_table[NETLINK_USERSOCK].listeners, listeners);
2174 nl_table[NETLINK_USERSOCK].module = THIS_MODULE; 2103 nl_table[NETLINK_USERSOCK].module = THIS_MODULE;
2175 nl_table[NETLINK_USERSOCK].registered = 1; 2104 nl_table[NETLINK_USERSOCK].registered = 1;
2176 nl_table[NETLINK_USERSOCK].flags = NL_CFG_F_NONROOT_SEND;
2177 2105
2178 netlink_table_ungrab(); 2106 netlink_table_ungrab();
2179} 2107}
@@ -2210,12 +2138,12 @@ static int __init netlink_proto_init(void)
2210 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1; 2138 order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
2211 2139
2212 for (i = 0; i < MAX_LINKS; i++) { 2140 for (i = 0; i < MAX_LINKS; i++) {
2213 struct nl_portid_hash *hash = &nl_table[i].hash; 2141 struct nl_pid_hash *hash = &nl_table[i].hash;
2214 2142
2215 hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table)); 2143 hash->table = nl_pid_hash_zalloc(1 * sizeof(*hash->table));
2216 if (!hash->table) { 2144 if (!hash->table) {
2217 while (i-- > 0) 2145 while (i-- > 0)
2218 nl_portid_hash_free(nl_table[i].hash.table, 2146 nl_pid_hash_free(nl_table[i].hash.table,
2219 1 * sizeof(*hash->table)); 2147 1 * sizeof(*hash->table));
2220 kfree(nl_table); 2148 kfree(nl_table);
2221 goto panic; 2149 goto panic;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index f2aabb6f410..482fa571b4e 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -33,14 +33,6 @@ void genl_unlock(void)
33} 33}
34EXPORT_SYMBOL(genl_unlock); 34EXPORT_SYMBOL(genl_unlock);
35 35
36#ifdef CONFIG_LOCKDEP
37int lockdep_genl_is_held(void)
38{
39 return lockdep_is_held(&genl_mutex);
40}
41EXPORT_SYMBOL(lockdep_genl_is_held);
42#endif
43
44#define GENL_FAM_TAB_SIZE 16 36#define GENL_FAM_TAB_SIZE 16
45#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1) 37#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
46 38
@@ -106,7 +98,7 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
106/* Of course we are going to have problems once we hit 98/* Of course we are going to have problems once we hit
107 * 2^16 alive types, but that can only happen by year 2K 99 * 2^16 alive types, but that can only happen by year 2K
108*/ 100*/
109static u16 genl_generate_id(void) 101static inline u16 genl_generate_id(void)
110{ 102{
111 static u16 id_gen_idx = GENL_MIN_ID; 103 static u16 id_gen_idx = GENL_MIN_ID;
112 int i; 104 int i;
@@ -498,37 +490,6 @@ int genl_unregister_family(struct genl_family *family)
498} 490}
499EXPORT_SYMBOL(genl_unregister_family); 491EXPORT_SYMBOL(genl_unregister_family);
500 492
501/**
502 * genlmsg_put - Add generic netlink header to netlink message
503 * @skb: socket buffer holding the message
504 * @portid: netlink portid the message is addressed to
505 * @seq: sequence number (usually the one of the sender)
506 * @family: generic netlink family
507 * @flags: netlink message flags
508 * @cmd: generic netlink command
509 *
510 * Returns pointer to user specific header
511 */
512void *genlmsg_put(struct sk_buff *skb, u32 portid, u32 seq,
513 struct genl_family *family, int flags, u8 cmd)
514{
515 struct nlmsghdr *nlh;
516 struct genlmsghdr *hdr;
517
518 nlh = nlmsg_put(skb, portid, seq, family->id, GENL_HDRLEN +
519 family->hdrsize, flags);
520 if (nlh == NULL)
521 return NULL;
522
523 hdr = nlmsg_data(nlh);
524 hdr->cmd = cmd;
525 hdr->version = family->version;
526 hdr->reserved = 0;
527
528 return (char *) hdr + GENL_HDRLEN;
529}
530EXPORT_SYMBOL(genlmsg_put);
531
532static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) 493static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
533{ 494{
534 struct genl_ops *ops; 495 struct genl_ops *ops;
@@ -555,7 +516,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
555 return -EOPNOTSUPP; 516 return -EOPNOTSUPP;
556 517
557 if ((ops->flags & GENL_ADMIN_PERM) && 518 if ((ops->flags & GENL_ADMIN_PERM) &&
558 !capable(CAP_NET_ADMIN)) 519 security_netlink_recv(skb, CAP_NET_ADMIN))
559 return -EPERM; 520 return -EPERM;
560 521
561 if (nlh->nlmsg_flags & NLM_F_DUMP) { 522 if (nlh->nlmsg_flags & NLM_F_DUMP) {
@@ -563,13 +524,8 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
563 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
564 525
565 genl_unlock(); 526 genl_unlock();
566 { 527 err = netlink_dump_start(net->genl_sock, skb, nlh,
567 struct netlink_dump_control c = { 528 ops->dumpit, ops->done, 0);
568 .dump = ops->dumpit,
569 .done = ops->done,
570 };
571 err = netlink_dump_start(net->genl_sock, skb, nlh, &c);
572 }
573 genl_lock(); 529 genl_lock();
574 return err; 530 return err;
575 } 531 }
@@ -585,7 +541,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
585 } 541 }
586 542
587 info.snd_seq = nlh->nlmsg_seq; 543 info.snd_seq = nlh->nlmsg_seq;
588 info.snd_portid = NETLINK_CB(skb).portid; 544 info.snd_pid = NETLINK_CB(skb).pid;
589 info.nlhdr = nlh; 545 info.nlhdr = nlh;
590 info.genlhdr = nlmsg_data(nlh); 546 info.genlhdr = nlmsg_data(nlh);
591 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; 547 info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
@@ -626,21 +582,20 @@ static struct genl_family genl_ctrl = {
626 .netnsok = true, 582 .netnsok = true,
627}; 583};
628 584
629static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq, 585static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
630 u32 flags, struct sk_buff *skb, u8 cmd) 586 u32 flags, struct sk_buff *skb, u8 cmd)
631{ 587{
632 void *hdr; 588 void *hdr;
633 589
634 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 590 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
635 if (hdr == NULL) 591 if (hdr == NULL)
636 return -1; 592 return -1;
637 593
638 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, family->name) || 594 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
639 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, family->id) || 595 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
640 nla_put_u32(skb, CTRL_ATTR_VERSION, family->version) || 596 NLA_PUT_U32(skb, CTRL_ATTR_VERSION, family->version);
641 nla_put_u32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize) || 597 NLA_PUT_U32(skb, CTRL_ATTR_HDRSIZE, family->hdrsize);
642 nla_put_u32(skb, CTRL_ATTR_MAXATTR, family->maxattr)) 598 NLA_PUT_U32(skb, CTRL_ATTR_MAXATTR, family->maxattr);
643 goto nla_put_failure;
644 599
645 if (!list_empty(&family->ops_list)) { 600 if (!list_empty(&family->ops_list)) {
646 struct nlattr *nla_ops; 601 struct nlattr *nla_ops;
@@ -658,9 +613,8 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
658 if (nest == NULL) 613 if (nest == NULL)
659 goto nla_put_failure; 614 goto nla_put_failure;
660 615
661 if (nla_put_u32(skb, CTRL_ATTR_OP_ID, ops->cmd) || 616 NLA_PUT_U32(skb, CTRL_ATTR_OP_ID, ops->cmd);
662 nla_put_u32(skb, CTRL_ATTR_OP_FLAGS, ops->flags)) 617 NLA_PUT_U32(skb, CTRL_ATTR_OP_FLAGS, ops->flags);
663 goto nla_put_failure;
664 618
665 nla_nest_end(skb, nest); 619 nla_nest_end(skb, nest);
666 } 620 }
@@ -684,10 +638,9 @@ static int ctrl_fill_info(struct genl_family *family, u32 portid, u32 seq,
684 if (nest == NULL) 638 if (nest == NULL)
685 goto nla_put_failure; 639 goto nla_put_failure;
686 640
687 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || 641 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
688 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 642 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
689 grp->name)) 643 grp->name);
690 goto nla_put_failure;
691 644
692 nla_nest_end(skb, nest); 645 nla_nest_end(skb, nest);
693 } 646 }
@@ -701,7 +654,7 @@ nla_put_failure:
701 return -EMSGSIZE; 654 return -EMSGSIZE;
702} 655}
703 656
704static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid, 657static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 pid,
705 u32 seq, u32 flags, struct sk_buff *skb, 658 u32 seq, u32 flags, struct sk_buff *skb,
706 u8 cmd) 659 u8 cmd)
707{ 660{
@@ -709,13 +662,12 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
709 struct nlattr *nla_grps; 662 struct nlattr *nla_grps;
710 struct nlattr *nest; 663 struct nlattr *nest;
711 664
712 hdr = genlmsg_put(skb, portid, seq, &genl_ctrl, flags, cmd); 665 hdr = genlmsg_put(skb, pid, seq, &genl_ctrl, flags, cmd);
713 if (hdr == NULL) 666 if (hdr == NULL)
714 return -1; 667 return -1;
715 668
716 if (nla_put_string(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name) || 669 NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, grp->family->name);
717 nla_put_u16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id)) 670 NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, grp->family->id);
718 goto nla_put_failure;
719 671
720 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS); 672 nla_grps = nla_nest_start(skb, CTRL_ATTR_MCAST_GROUPS);
721 if (nla_grps == NULL) 673 if (nla_grps == NULL)
@@ -725,10 +677,9 @@ static int ctrl_fill_mcgrp_info(struct genl_multicast_group *grp, u32 portid,
725 if (nest == NULL) 677 if (nest == NULL)
726 goto nla_put_failure; 678 goto nla_put_failure;
727 679
728 if (nla_put_u32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id) || 680 NLA_PUT_U32(skb, CTRL_ATTR_MCAST_GRP_ID, grp->id);
729 nla_put_string(skb, CTRL_ATTR_MCAST_GRP_NAME, 681 NLA_PUT_STRING(skb, CTRL_ATTR_MCAST_GRP_NAME,
730 grp->name)) 682 grp->name);
731 goto nla_put_failure;
732 683
733 nla_nest_end(skb, nest); 684 nla_nest_end(skb, nest);
734 nla_nest_end(skb, nla_grps); 685 nla_nest_end(skb, nla_grps);
@@ -756,7 +707,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
756 continue; 707 continue;
757 if (++n < fams_to_skip) 708 if (++n < fams_to_skip)
758 continue; 709 continue;
759 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).portid, 710 if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
760 cb->nlh->nlmsg_seq, NLM_F_MULTI, 711 cb->nlh->nlmsg_seq, NLM_F_MULTI,
761 skb, CTRL_CMD_NEWFAMILY) < 0) 712 skb, CTRL_CMD_NEWFAMILY) < 0)
762 goto errout; 713 goto errout;
@@ -773,7 +724,7 @@ errout:
773} 724}
774 725
775static struct sk_buff *ctrl_build_family_msg(struct genl_family *family, 726static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
776 u32 portid, int seq, u8 cmd) 727 u32 pid, int seq, u8 cmd)
777{ 728{
778 struct sk_buff *skb; 729 struct sk_buff *skb;
779 int err; 730 int err;
@@ -782,7 +733,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
782 if (skb == NULL) 733 if (skb == NULL)
783 return ERR_PTR(-ENOBUFS); 734 return ERR_PTR(-ENOBUFS);
784 735
785 err = ctrl_fill_info(family, portid, seq, 0, skb, cmd); 736 err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
786 if (err < 0) { 737 if (err < 0) {
787 nlmsg_free(skb); 738 nlmsg_free(skb);
788 return ERR_PTR(err); 739 return ERR_PTR(err);
@@ -792,7 +743,7 @@ static struct sk_buff *ctrl_build_family_msg(struct genl_family *family,
792} 743}
793 744
794static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp, 745static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
795 u32 portid, int seq, u8 cmd) 746 u32 pid, int seq, u8 cmd)
796{ 747{
797 struct sk_buff *skb; 748 struct sk_buff *skb;
798 int err; 749 int err;
@@ -801,7 +752,7 @@ static struct sk_buff *ctrl_build_mcgrp_msg(struct genl_multicast_group *grp,
801 if (skb == NULL) 752 if (skb == NULL)
802 return ERR_PTR(-ENOBUFS); 753 return ERR_PTR(-ENOBUFS);
803 754
804 err = ctrl_fill_mcgrp_info(grp, portid, seq, 0, skb, cmd); 755 err = ctrl_fill_mcgrp_info(grp, pid, seq, 0, skb, cmd);
805 if (err < 0) { 756 if (err < 0) {
806 nlmsg_free(skb); 757 nlmsg_free(skb);
807 return ERR_PTR(err); 758 return ERR_PTR(err);
@@ -833,15 +784,6 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
833 784
834 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); 785 name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]);
835 res = genl_family_find_byname(name); 786 res = genl_family_find_byname(name);
836#ifdef CONFIG_MODULES
837 if (res == NULL) {
838 genl_unlock();
839 request_module("net-pf-%d-proto-%d-family-%s",
840 PF_NETLINK, NETLINK_GENERIC, name);
841 genl_lock();
842 res = genl_family_find_byname(name);
843 }
844#endif
845 err = -ENOENT; 787 err = -ENOENT;
846 } 788 }
847 789
@@ -853,7 +795,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
853 return -ENOENT; 795 return -ENOENT;
854 } 796 }
855 797
856 msg = ctrl_build_family_msg(res, info->snd_portid, info->snd_seq, 798 msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq,
857 CTRL_CMD_NEWFAMILY); 799 CTRL_CMD_NEWFAMILY);
858 if (IS_ERR(msg)) 800 if (IS_ERR(msg))
859 return PTR_ERR(msg); 801 return PTR_ERR(msg);
@@ -915,14 +857,10 @@ static struct genl_multicast_group notify_grp = {
915 857
916static int __net_init genl_pernet_init(struct net *net) 858static int __net_init genl_pernet_init(struct net *net)
917{ 859{
918 struct netlink_kernel_cfg cfg = {
919 .input = genl_rcv,
920 .cb_mutex = &genl_mutex,
921 .flags = NL_CFG_F_NONROOT_RECV,
922 };
923
924 /* we'll bump the group number right afterwards */ 860 /* we'll bump the group number right afterwards */
925 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, &cfg); 861 net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0,
862 genl_rcv, &genl_mutex,
863 THIS_MODULE);
926 864
927 if (!net->genl_sock && net_eq(net, &init_net)) 865 if (!net->genl_sock && net_eq(net, &init_net))
928 panic("GENL: Cannot initialize generic netlink\n"); 866 panic("GENL: Cannot initialize generic netlink\n");
@@ -955,6 +893,8 @@ static int __init genl_init(void)
955 if (err < 0) 893 if (err < 0)
956 goto problem; 894 goto problem;
957 895
896 netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
897
958 err = register_pernet_subsys(&genl_pernet_ops); 898 err = register_pernet_subsys(&genl_pernet_ops);
959 if (err) 899 if (err)
960 goto problem; 900 goto problem;
@@ -971,7 +911,7 @@ problem:
971 911
972subsys_initcall(genl_init); 912subsys_initcall(genl_init);
973 913
974static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group, 914static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group,
975 gfp_t flags) 915 gfp_t flags)
976{ 916{
977 struct sk_buff *tmp; 917 struct sk_buff *tmp;
@@ -986,7 +926,7 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
986 goto error; 926 goto error;
987 } 927 }
988 err = nlmsg_multicast(prev->genl_sock, tmp, 928 err = nlmsg_multicast(prev->genl_sock, tmp,
989 portid, group, flags); 929 pid, group, flags);
990 if (err) 930 if (err)
991 goto error; 931 goto error;
992 } 932 }
@@ -994,28 +934,15 @@ static int genlmsg_mcast(struct sk_buff *skb, u32 portid, unsigned long group,
994 prev = net; 934 prev = net;
995 } 935 }
996 936
997 return nlmsg_multicast(prev->genl_sock, skb, portid, group, flags); 937 return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags);
998 error: 938 error:
999 kfree_skb(skb); 939 kfree_skb(skb);
1000 return err; 940 return err;
1001} 941}
1002 942
1003int genlmsg_multicast_allns(struct sk_buff *skb, u32 portid, unsigned int group, 943int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group,
1004 gfp_t flags) 944 gfp_t flags)
1005{ 945{
1006 return genlmsg_mcast(skb, portid, group, flags); 946 return genlmsg_mcast(skb, pid, group, flags);
1007} 947}
1008EXPORT_SYMBOL(genlmsg_multicast_allns); 948EXPORT_SYMBOL(genlmsg_multicast_allns);
1009
1010void genl_notify(struct sk_buff *skb, struct net *net, u32 portid, u32 group,
1011 struct nlmsghdr *nlh, gfp_t flags)
1012{
1013 struct sock *sk = net->genl_sock;
1014 int report = 0;
1015
1016 if (nlh)
1017 report = nlmsg_report(nlh);
1018
1019 nlmsg_notify(sk, skb, portid, group, report, flags);
1020}
1021EXPORT_SYMBOL(genl_notify);