aboutsummaryrefslogtreecommitdiffstats
path: root/net/netlink/af_netlink.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-18 21:02:35 -0400
commit334d094504c2fe1c44211ecb49146ae6bca8c321 (patch)
treed3c0f68e4b9f8e3d2ccc39e7dfe5de0534a5fad9 /net/netlink/af_netlink.c
parentd1a4be630fb068f251d64b62919f143c49ca8057 (diff)
parentd1643d24c61b725bef399cc1cf2944b4c9c23177 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6.26: (1090 commits) [NET]: Fix and allocate less memory for ->priv'less netdevices [IPV6]: Fix dangling references on error in fib6_add(). [NETLABEL]: Fix NULL deref in netlbl_unlabel_staticlist_gen() if ifindex not found [PKT_SCHED]: Fix datalen check in tcf_simp_init(). [INET]: Uninline the __inet_inherit_port call. [INET]: Drop the inet_inherit_port() call. SCTP: Initialize partial_bytes_acked to 0, when all of the data is acked. [netdrvr] forcedeth: internal simplifications; changelog removal phylib: factor out get_phy_id from within get_phy_device PHY: add BCM5464 support to broadcom PHY driver cxgb3: Fix __must_check warning with dev_dbg. tc35815: Statistics cleanup natsemi: fix MMIO for PPC 44x platforms [TIPC]: Cleanup of TIPC reference table code [TIPC]: Optimized initialization of TIPC reference table [TIPC]: Remove inlining of reference table locking routines e1000: convert uint16_t style integers to u16 ixgb: convert uint16_t style integers to u16 sb1000.c: make const arrays static sb1000.c: stop inlining largish static functions ...
Diffstat (limited to 'net/netlink/af_netlink.c')
-rw-r--r--net/netlink/af_netlink.c83
1 files changed, 41 insertions, 42 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1ab0da2632e1..36f75d873898 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -228,7 +228,7 @@ static inline struct sock *netlink_lookup(struct net *net, int protocol,
228 read_lock(&nl_table_lock); 228 read_lock(&nl_table_lock);
229 head = nl_pid_hashfn(hash, pid); 229 head = nl_pid_hashfn(hash, pid);
230 sk_for_each(sk, node, head) { 230 sk_for_each(sk, node, head) {
231 if ((sk->sk_net == net) && (nlk_sk(sk)->pid == pid)) { 231 if (net_eq(sock_net(sk), net) && (nlk_sk(sk)->pid == pid)) {
232 sock_hold(sk); 232 sock_hold(sk);
233 goto found; 233 goto found;
234 } 234 }
@@ -348,7 +348,7 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 pid)
348 head = nl_pid_hashfn(hash, pid); 348 head = nl_pid_hashfn(hash, pid);
349 len = 0; 349 len = 0;
350 sk_for_each(osk, node, head) { 350 sk_for_each(osk, node, head) {
351 if ((osk->sk_net == net) && (nlk_sk(osk)->pid == pid)) 351 if (net_eq(sock_net(osk), net) && (nlk_sk(osk)->pid == pid))
352 break; 352 break;
353 len++; 353 len++;
354 } 354 }
@@ -486,7 +486,7 @@ static int netlink_release(struct socket *sock)
486 486
487 if (nlk->pid && !nlk->subscriptions) { 487 if (nlk->pid && !nlk->subscriptions) {
488 struct netlink_notify n = { 488 struct netlink_notify n = {
489 .net = sk->sk_net, 489 .net = sock_net(sk),
490 .protocol = sk->sk_protocol, 490 .protocol = sk->sk_protocol,
491 .pid = nlk->pid, 491 .pid = nlk->pid,
492 }; 492 };
@@ -518,7 +518,7 @@ static int netlink_release(struct socket *sock)
518static int netlink_autobind(struct socket *sock) 518static int netlink_autobind(struct socket *sock)
519{ 519{
520 struct sock *sk = sock->sk; 520 struct sock *sk = sock->sk;
521 struct net *net = sk->sk_net; 521 struct net *net = sock_net(sk);
522 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash; 522 struct nl_pid_hash *hash = &nl_table[sk->sk_protocol].hash;
523 struct hlist_head *head; 523 struct hlist_head *head;
524 struct sock *osk; 524 struct sock *osk;
@@ -532,7 +532,7 @@ retry:
532 netlink_table_grab(); 532 netlink_table_grab();
533 head = nl_pid_hashfn(hash, pid); 533 head = nl_pid_hashfn(hash, pid);
534 sk_for_each(osk, node, head) { 534 sk_for_each(osk, node, head) {
535 if ((osk->sk_net != net)) 535 if (!net_eq(sock_net(osk), net))
536 continue; 536 continue;
537 if (nlk_sk(osk)->pid == pid) { 537 if (nlk_sk(osk)->pid == pid) {
538 /* Bind collision, search negative pid values. */ 538 /* Bind collision, search negative pid values. */
@@ -611,7 +611,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
611 int addr_len) 611 int addr_len)
612{ 612{
613 struct sock *sk = sock->sk; 613 struct sock *sk = sock->sk;
614 struct net *net = sk->sk_net; 614 struct net *net = sock_net(sk);
615 struct netlink_sock *nlk = nlk_sk(sk); 615 struct netlink_sock *nlk = nlk_sk(sk);
616 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; 616 struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
617 int err; 617 int err;
@@ -720,7 +720,7 @@ static struct sock *netlink_getsockbypid(struct sock *ssk, u32 pid)
720 struct sock *sock; 720 struct sock *sock;
721 struct netlink_sock *nlk; 721 struct netlink_sock *nlk;
722 722
723 sock = netlink_lookup(ssk->sk_net, ssk->sk_protocol, pid); 723 sock = netlink_lookup(sock_net(ssk), ssk->sk_protocol, pid);
724 if (!sock) 724 if (!sock)
725 return ERR_PTR(-ECONNREFUSED); 725 return ERR_PTR(-ECONNREFUSED);
726 726
@@ -886,6 +886,13 @@ retry:
886 if (netlink_is_kernel(sk)) 886 if (netlink_is_kernel(sk))
887 return netlink_unicast_kernel(sk, skb); 887 return netlink_unicast_kernel(sk, skb);
888 888
889 if (sk_filter(sk, skb)) {
890 int err = skb->len;
891 kfree_skb(skb);
892 sock_put(sk);
893 return err;
894 }
895
889 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk); 896 err = netlink_attachskb(sk, skb, nonblock, &timeo, ssk);
890 if (err == 1) 897 if (err == 1)
891 goto retry; 898 goto retry;
@@ -955,7 +962,7 @@ static inline int do_one_broadcast(struct sock *sk,
955 !test_bit(p->group - 1, nlk->groups)) 962 !test_bit(p->group - 1, nlk->groups))
956 goto out; 963 goto out;
957 964
958 if ((sk->sk_net != p->net)) 965 if (!net_eq(sock_net(sk), p->net))
959 goto out; 966 goto out;
960 967
961 if (p->failure) { 968 if (p->failure) {
@@ -980,6 +987,9 @@ static inline int do_one_broadcast(struct sock *sk,
980 netlink_overrun(sk); 987 netlink_overrun(sk);
981 /* Clone failed. Notify ALL listeners. */ 988 /* Clone failed. Notify ALL listeners. */
982 p->failure = 1; 989 p->failure = 1;
990 } else if (sk_filter(sk, p->skb2)) {
991 kfree_skb(p->skb2);
992 p->skb2 = NULL;
983 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) { 993 } else if ((val = netlink_broadcast_deliver(sk, p->skb2)) < 0) {
984 netlink_overrun(sk); 994 netlink_overrun(sk);
985 } else { 995 } else {
@@ -996,7 +1006,7 @@ out:
996int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid, 1006int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, u32 pid,
997 u32 group, gfp_t allocation) 1007 u32 group, gfp_t allocation)
998{ 1008{
999 struct net *net = ssk->sk_net; 1009 struct net *net = sock_net(ssk);
1000 struct netlink_broadcast_data info; 1010 struct netlink_broadcast_data info;
1001 struct hlist_node *node; 1011 struct hlist_node *node;
1002 struct sock *sk; 1012 struct sock *sk;
@@ -1054,7 +1064,7 @@ static inline int do_one_set_err(struct sock *sk,
1054 if (sk == p->exclude_sk) 1064 if (sk == p->exclude_sk)
1055 goto out; 1065 goto out;
1056 1066
1057 if (sk->sk_net != p->exclude_sk->sk_net) 1067 if (sock_net(sk) != sock_net(p->exclude_sk))
1058 goto out; 1068 goto out;
1059 1069
1060 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || 1070 if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups ||
@@ -1344,22 +1354,6 @@ static void netlink_data_ready(struct sock *sk, int len)
1344 * queueing. 1354 * queueing.
1345 */ 1355 */
1346 1356
1347static void __netlink_release(struct sock *sk)
1348{
1349 /*
1350 * Last sock_put should drop referrence to sk->sk_net. It has already
1351 * been dropped in netlink_kernel_create. Taking referrence to stopping
1352 * namespace is not an option.
1353 * Take referrence to a socket to remove it from netlink lookup table
1354 * _alive_ and after that destroy it in the context of init_net.
1355 */
1356
1357 sock_hold(sk);
1358 sock_release(sk->sk_socket);
1359 sk->sk_net = get_net(&init_net);
1360 sock_put(sk);
1361}
1362
1363struct sock * 1357struct sock *
1364netlink_kernel_create(struct net *net, int unit, unsigned int groups, 1358netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1365 void (*input)(struct sk_buff *skb), 1359 void (*input)(struct sk_buff *skb),
@@ -1388,8 +1382,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1388 goto out_sock_release_nosk; 1382 goto out_sock_release_nosk;
1389 1383
1390 sk = sock->sk; 1384 sk = sock->sk;
1391 put_net(sk->sk_net); 1385 sk_change_net(sk, net);
1392 sk->sk_net = net;
1393 1386
1394 if (groups < 32) 1387 if (groups < 32)
1395 groups = 32; 1388 groups = 32;
@@ -1424,7 +1417,7 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups,
1424 1417
1425out_sock_release: 1418out_sock_release:
1426 kfree(listeners); 1419 kfree(listeners);
1427 __netlink_release(sk); 1420 netlink_kernel_release(sk);
1428 return NULL; 1421 return NULL;
1429 1422
1430out_sock_release_nosk: 1423out_sock_release_nosk:
@@ -1437,10 +1430,7 @@ EXPORT_SYMBOL(netlink_kernel_create);
1437void 1430void
1438netlink_kernel_release(struct sock *sk) 1431netlink_kernel_release(struct sock *sk)
1439{ 1432{
1440 if (sk == NULL || sk->sk_socket == NULL) 1433 sk_release_kernel(sk);
1441 return;
1442
1443 __netlink_release(sk);
1444} 1434}
1445EXPORT_SYMBOL(netlink_kernel_release); 1435EXPORT_SYMBOL(netlink_kernel_release);
1446 1436
@@ -1553,8 +1543,13 @@ static int netlink_dump(struct sock *sk)
1553 1543
1554 if (len > 0) { 1544 if (len > 0) {
1555 mutex_unlock(nlk->cb_mutex); 1545 mutex_unlock(nlk->cb_mutex);
1556 skb_queue_tail(&sk->sk_receive_queue, skb); 1546
1557 sk->sk_data_ready(sk, len); 1547 if (sk_filter(sk, skb))
1548 kfree_skb(skb);
1549 else {
1550 skb_queue_tail(&sk->sk_receive_queue, skb);
1551 sk->sk_data_ready(sk, skb->len);
1552 }
1558 return 0; 1553 return 0;
1559 } 1554 }
1560 1555
@@ -1564,8 +1559,12 @@ static int netlink_dump(struct sock *sk)
1564 1559
1565 memcpy(nlmsg_data(nlh), &len, sizeof(len)); 1560 memcpy(nlmsg_data(nlh), &len, sizeof(len));
1566 1561
1567 skb_queue_tail(&sk->sk_receive_queue, skb); 1562 if (sk_filter(sk, skb))
1568 sk->sk_data_ready(sk, skb->len); 1563 kfree_skb(skb);
1564 else {
1565 skb_queue_tail(&sk->sk_receive_queue, skb);
1566 sk->sk_data_ready(sk, skb->len);
1567 }
1569 1568
1570 if (cb->done) 1569 if (cb->done)
1571 cb->done(cb); 1570 cb->done(cb);
@@ -1602,7 +1601,7 @@ int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
1602 atomic_inc(&skb->users); 1601 atomic_inc(&skb->users);
1603 cb->skb = skb; 1602 cb->skb = skb;
1604 1603
1605 sk = netlink_lookup(ssk->sk_net, ssk->sk_protocol, NETLINK_CB(skb).pid); 1604 sk = netlink_lookup(sock_net(ssk), ssk->sk_protocol, NETLINK_CB(skb).pid);
1606 if (sk == NULL) { 1605 if (sk == NULL) {
1607 netlink_destroy_callback(cb); 1606 netlink_destroy_callback(cb);
1608 return -ECONNREFUSED; 1607 return -ECONNREFUSED;
@@ -1644,7 +1643,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
1644 if (!skb) { 1643 if (!skb) {
1645 struct sock *sk; 1644 struct sock *sk;
1646 1645
1647 sk = netlink_lookup(in_skb->sk->sk_net, 1646 sk = netlink_lookup(sock_net(in_skb->sk),
1648 in_skb->sk->sk_protocol, 1647 in_skb->sk->sk_protocol,
1649 NETLINK_CB(in_skb).pid); 1648 NETLINK_CB(in_skb).pid);
1650 if (sk) { 1649 if (sk) {
@@ -1759,7 +1758,7 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
1759 1758
1760 for (j = 0; j <= hash->mask; j++) { 1759 for (j = 0; j <= hash->mask; j++) {
1761 sk_for_each(s, node, &hash->table[j]) { 1760 sk_for_each(s, node, &hash->table[j]) {
1762 if (iter->p.net != s->sk_net) 1761 if (sock_net(s) != seq_file_net(seq))
1763 continue; 1762 continue;
1764 if (off == pos) { 1763 if (off == pos) {
1765 iter->link = i; 1764 iter->link = i;
@@ -1795,7 +1794,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1795 s = v; 1794 s = v;
1796 do { 1795 do {
1797 s = sk_next(s); 1796 s = sk_next(s);
1798 } while (s && (iter->p.net != s->sk_net)); 1797 } while (s && sock_net(s) != seq_file_net(seq));
1799 if (s) 1798 if (s)
1800 return s; 1799 return s;
1801 1800
@@ -1807,7 +1806,7 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1807 1806
1808 for (; j <= hash->mask; j++) { 1807 for (; j <= hash->mask; j++) {
1809 s = sk_head(&hash->table[j]); 1808 s = sk_head(&hash->table[j]);
1810 while (s && (iter->p.net != s->sk_net)) 1809 while (s && sock_net(s) != seq_file_net(seq))
1811 s = sk_next(s); 1810 s = sk_next(s);
1812 if (s) { 1811 if (s) {
1813 iter->link = i; 1812 iter->link = i;