diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-02-21 14:17:22 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-02-21 14:17:22 -0500 |
commit | 5f854cfc024622e4aae14d7cf422f6ff86278688 (patch) | |
tree | 426e77c6f6e4939c80440bf1fabcb020e3ee145b /net/netlink | |
parent | cc24da0742870f152ddf1002aa39dfcd83f7cf9c (diff) | |
parent | 4ec62b2b2e6bd7ddef7b6cea6e5db7b5578a6532 (diff) |
Forward to 2.6.33-rc8
Merge branch 'linus' into rt/head with a pile of conflicts.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'net/netlink')
-rw-r--r-- | net/netlink/af_netlink.c | 166 | ||||
-rw-r--r-- | net/netlink/genetlink.c | 225 |
2 files changed, 283 insertions, 108 deletions
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index f4e94fb17477..95826e01d293 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c | |||
@@ -83,6 +83,11 @@ struct netlink_sock { | |||
83 | struct module *module; | 83 | struct module *module; |
84 | }; | 84 | }; |
85 | 85 | ||
86 | struct listeners_rcu_head { | ||
87 | struct rcu_head rcu_head; | ||
88 | void *ptr; | ||
89 | }; | ||
90 | |||
86 | #define NETLINK_KERNEL_SOCKET 0x1 | 91 | #define NETLINK_KERNEL_SOCKET 0x1 |
87 | #define NETLINK_RECV_PKTINFO 0x2 | 92 | #define NETLINK_RECV_PKTINFO 0x2 |
88 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 | 93 | #define NETLINK_BROADCAST_SEND_ERROR 0x4 |
@@ -172,9 +177,11 @@ static void netlink_sock_destruct(struct sock *sk) | |||
172 | * this, _but_ remember, it adds useless work on UP machines. | 177 | * this, _but_ remember, it adds useless work on UP machines. |
173 | */ | 178 | */ |
174 | 179 | ||
175 | static void netlink_table_grab(void) | 180 | void netlink_table_grab(void) |
176 | __acquires(nl_table_lock) | 181 | __acquires(nl_table_lock) |
177 | { | 182 | { |
183 | might_sleep(); | ||
184 | |||
178 | write_lock_irq(&nl_table_lock); | 185 | write_lock_irq(&nl_table_lock); |
179 | 186 | ||
180 | if (atomic_read(&nl_table_users)) { | 187 | if (atomic_read(&nl_table_users)) { |
@@ -195,7 +202,7 @@ static void netlink_table_grab(void) | |||
195 | } | 202 | } |
196 | } | 203 | } |
197 | 204 | ||
198 | static void netlink_table_ungrab(void) | 205 | void netlink_table_ungrab(void) |
199 | __releases(nl_table_lock) | 206 | __releases(nl_table_lock) |
200 | { | 207 | { |
201 | write_unlock_irq(&nl_table_lock); | 208 | write_unlock_irq(&nl_table_lock); |
@@ -421,7 +428,8 @@ static int __netlink_create(struct net *net, struct socket *sock, | |||
421 | return 0; | 428 | return 0; |
422 | } | 429 | } |
423 | 430 | ||
424 | static int netlink_create(struct net *net, struct socket *sock, int protocol) | 431 | static int netlink_create(struct net *net, struct socket *sock, int protocol, |
432 | int kern) | ||
425 | { | 433 | { |
426 | struct module *module = NULL; | 434 | struct module *module = NULL; |
427 | struct mutex *cb_mutex; | 435 | struct mutex *cb_mutex; |
@@ -447,9 +455,14 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol) | |||
447 | if (nl_table[protocol].registered && | 455 | if (nl_table[protocol].registered && |
448 | try_module_get(nl_table[protocol].module)) | 456 | try_module_get(nl_table[protocol].module)) |
449 | module = nl_table[protocol].module; | 457 | module = nl_table[protocol].module; |
458 | else | ||
459 | err = -EPROTONOSUPPORT; | ||
450 | cb_mutex = nl_table[protocol].cb_mutex; | 460 | cb_mutex = nl_table[protocol].cb_mutex; |
451 | netlink_unlock_table(); | 461 | netlink_unlock_table(); |
452 | 462 | ||
463 | if (err < 0) | ||
464 | goto out; | ||
465 | |||
453 | err = __netlink_create(net, sock, cb_mutex, protocol); | 466 | err = __netlink_create(net, sock, cb_mutex, protocol); |
454 | if (err < 0) | 467 | if (err < 0) |
455 | goto out_module; | 468 | goto out_module; |
@@ -490,7 +503,7 @@ static int netlink_release(struct socket *sock) | |||
490 | 503 | ||
491 | skb_queue_purge(&sk->sk_write_queue); | 504 | skb_queue_purge(&sk->sk_write_queue); |
492 | 505 | ||
493 | if (nlk->pid && !nlk->subscriptions) { | 506 | if (nlk->pid) { |
494 | struct netlink_notify n = { | 507 | struct netlink_notify n = { |
495 | .net = sock_net(sk), | 508 | .net = sock_net(sk), |
496 | .protocol = sk->sk_protocol, | 509 | .protocol = sk->sk_protocol, |
@@ -700,7 +713,7 @@ static int netlink_getname(struct socket *sock, struct sockaddr *addr, | |||
700 | { | 713 | { |
701 | struct sock *sk = sock->sk; | 714 | struct sock *sk = sock->sk; |
702 | struct netlink_sock *nlk = nlk_sk(sk); | 715 | struct netlink_sock *nlk = nlk_sk(sk); |
703 | struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr; | 716 | DECLARE_SOCKADDR(struct sockaddr_nl *, nladdr, addr); |
704 | 717 | ||
705 | nladdr->nl_family = AF_NETLINK; | 718 | nladdr->nl_family = AF_NETLINK; |
706 | nladdr->nl_pad = 0; | 719 | nladdr->nl_pad = 0; |
@@ -1084,7 +1097,7 @@ static inline int do_one_set_err(struct sock *sk, | |||
1084 | if (sk == p->exclude_sk) | 1097 | if (sk == p->exclude_sk) |
1085 | goto out; | 1098 | goto out; |
1086 | 1099 | ||
1087 | if (sock_net(sk) != sock_net(p->exclude_sk)) | 1100 | if (!net_eq(sock_net(sk), sock_net(p->exclude_sk))) |
1088 | goto out; | 1101 | goto out; |
1089 | 1102 | ||
1090 | if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || | 1103 | if (nlk->pid == p->pid || p->group - 1 >= nlk->ngroups || |
@@ -1143,7 +1156,7 @@ static void netlink_update_socket_mc(struct netlink_sock *nlk, | |||
1143 | } | 1156 | } |
1144 | 1157 | ||
1145 | static int netlink_setsockopt(struct socket *sock, int level, int optname, | 1158 | static int netlink_setsockopt(struct socket *sock, int level, int optname, |
1146 | char __user *optval, int optlen) | 1159 | char __user *optval, unsigned int optlen) |
1147 | { | 1160 | { |
1148 | struct sock *sk = sock->sk; | 1161 | struct sock *sk = sock->sk; |
1149 | struct netlink_sock *nlk = nlk_sk(sk); | 1162 | struct netlink_sock *nlk = nlk_sk(sk); |
@@ -1356,7 +1369,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1356 | struct netlink_sock *nlk = nlk_sk(sk); | 1369 | struct netlink_sock *nlk = nlk_sk(sk); |
1357 | int noblock = flags&MSG_DONTWAIT; | 1370 | int noblock = flags&MSG_DONTWAIT; |
1358 | size_t copied; | 1371 | size_t copied; |
1359 | struct sk_buff *skb; | 1372 | struct sk_buff *skb, *frag __maybe_unused = NULL; |
1360 | int err; | 1373 | int err; |
1361 | 1374 | ||
1362 | if (flags&MSG_OOB) | 1375 | if (flags&MSG_OOB) |
@@ -1368,6 +1381,35 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1368 | if (skb == NULL) | 1381 | if (skb == NULL) |
1369 | goto out; | 1382 | goto out; |
1370 | 1383 | ||
1384 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1385 | if (unlikely(skb_shinfo(skb)->frag_list)) { | ||
1386 | bool need_compat = !!(flags & MSG_CMSG_COMPAT); | ||
1387 | |||
1388 | /* | ||
1389 | * If this skb has a frag_list, then here that means that | ||
1390 | * we will have to use the frag_list skb for compat tasks | ||
1391 | * and the regular skb for non-compat tasks. | ||
1392 | * | ||
1393 | * The skb might (and likely will) be cloned, so we can't | ||
1394 | * just reset frag_list and go on with things -- we need to | ||
1395 | * keep that. For the compat case that's easy -- simply get | ||
1396 | * a reference to the compat skb and free the regular one | ||
1397 | * including the frag. For the non-compat case, we need to | ||
1398 | * avoid sending the frag to the user -- so assign NULL but | ||
1399 | * restore it below before freeing the skb. | ||
1400 | */ | ||
1401 | if (need_compat) { | ||
1402 | struct sk_buff *compskb = skb_shinfo(skb)->frag_list; | ||
1403 | skb_get(compskb); | ||
1404 | kfree_skb(skb); | ||
1405 | skb = compskb; | ||
1406 | } else { | ||
1407 | frag = skb_shinfo(skb)->frag_list; | ||
1408 | skb_shinfo(skb)->frag_list = NULL; | ||
1409 | } | ||
1410 | } | ||
1411 | #endif | ||
1412 | |||
1371 | msg->msg_namelen = 0; | 1413 | msg->msg_namelen = 0; |
1372 | 1414 | ||
1373 | copied = skb->len; | 1415 | copied = skb->len; |
@@ -1398,6 +1440,11 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock, | |||
1398 | siocb->scm->creds = *NETLINK_CREDS(skb); | 1440 | siocb->scm->creds = *NETLINK_CREDS(skb); |
1399 | if (flags & MSG_TRUNC) | 1441 | if (flags & MSG_TRUNC) |
1400 | copied = skb->len; | 1442 | copied = skb->len; |
1443 | |||
1444 | #ifdef CONFIG_COMPAT_NETLINK_MESSAGES | ||
1445 | skb_shinfo(skb)->frag_list = frag; | ||
1446 | #endif | ||
1447 | |||
1401 | skb_free_datagram(sk, skb); | 1448 | skb_free_datagram(sk, skb); |
1402 | 1449 | ||
1403 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) | 1450 | if (nlk->cb && atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) |
@@ -1453,7 +1500,8 @@ netlink_kernel_create(struct net *net, int unit, unsigned int groups, | |||
1453 | if (groups < 32) | 1500 | if (groups < 32) |
1454 | groups = 32; | 1501 | groups = 32; |
1455 | 1502 | ||
1456 | listeners = kzalloc(NLGRPSZ(groups), GFP_KERNEL); | 1503 | listeners = kzalloc(NLGRPSZ(groups) + sizeof(struct listeners_rcu_head), |
1504 | GFP_KERNEL); | ||
1457 | if (!listeners) | 1505 | if (!listeners) |
1458 | goto out_sock_release; | 1506 | goto out_sock_release; |
1459 | 1507 | ||
@@ -1501,6 +1549,49 @@ netlink_kernel_release(struct sock *sk) | |||
1501 | EXPORT_SYMBOL(netlink_kernel_release); | 1549 | EXPORT_SYMBOL(netlink_kernel_release); |
1502 | 1550 | ||
1503 | 1551 | ||
1552 | static void netlink_free_old_listeners(struct rcu_head *rcu_head) | ||
1553 | { | ||
1554 | struct listeners_rcu_head *lrh; | ||
1555 | |||
1556 | lrh = container_of(rcu_head, struct listeners_rcu_head, rcu_head); | ||
1557 | kfree(lrh->ptr); | ||
1558 | } | ||
1559 | |||
1560 | int __netlink_change_ngroups(struct sock *sk, unsigned int groups) | ||
1561 | { | ||
1562 | unsigned long *listeners, *old = NULL; | ||
1563 | struct listeners_rcu_head *old_rcu_head; | ||
1564 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | ||
1565 | |||
1566 | if (groups < 32) | ||
1567 | groups = 32; | ||
1568 | |||
1569 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { | ||
1570 | listeners = kzalloc(NLGRPSZ(groups) + | ||
1571 | sizeof(struct listeners_rcu_head), | ||
1572 | GFP_ATOMIC); | ||
1573 | if (!listeners) | ||
1574 | return -ENOMEM; | ||
1575 | old = tbl->listeners; | ||
1576 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); | ||
1577 | rcu_assign_pointer(tbl->listeners, listeners); | ||
1578 | /* | ||
1579 | * Free the old memory after an RCU grace period so we | ||
1580 | * don't leak it. We use call_rcu() here in order to be | ||
1581 | * able to call this function from atomic contexts. The | ||
1582 | * allocation of this memory will have reserved enough | ||
1583 | * space for struct listeners_rcu_head at the end. | ||
1584 | */ | ||
1585 | old_rcu_head = (void *)(tbl->listeners + | ||
1586 | NLGRPLONGS(tbl->groups)); | ||
1587 | old_rcu_head->ptr = old; | ||
1588 | call_rcu(&old_rcu_head->rcu_head, netlink_free_old_listeners); | ||
1589 | } | ||
1590 | tbl->groups = groups; | ||
1591 | |||
1592 | return 0; | ||
1593 | } | ||
1594 | |||
1504 | /** | 1595 | /** |
1505 | * netlink_change_ngroups - change number of multicast groups | 1596 | * netlink_change_ngroups - change number of multicast groups |
1506 | * | 1597 | * |
@@ -1515,33 +1606,24 @@ EXPORT_SYMBOL(netlink_kernel_release); | |||
1515 | */ | 1606 | */ |
1516 | int netlink_change_ngroups(struct sock *sk, unsigned int groups) | 1607 | int netlink_change_ngroups(struct sock *sk, unsigned int groups) |
1517 | { | 1608 | { |
1518 | unsigned long *listeners, *old = NULL; | 1609 | int err; |
1519 | struct netlink_table *tbl = &nl_table[sk->sk_protocol]; | ||
1520 | int err = 0; | ||
1521 | |||
1522 | if (groups < 32) | ||
1523 | groups = 32; | ||
1524 | 1610 | ||
1525 | netlink_table_grab(); | 1611 | netlink_table_grab(); |
1526 | if (NLGRPSZ(tbl->groups) < NLGRPSZ(groups)) { | 1612 | err = __netlink_change_ngroups(sk, groups); |
1527 | listeners = kzalloc(NLGRPSZ(groups), GFP_ATOMIC); | ||
1528 | if (!listeners) { | ||
1529 | err = -ENOMEM; | ||
1530 | goto out_ungrab; | ||
1531 | } | ||
1532 | old = tbl->listeners; | ||
1533 | memcpy(listeners, old, NLGRPSZ(tbl->groups)); | ||
1534 | rcu_assign_pointer(tbl->listeners, listeners); | ||
1535 | } | ||
1536 | tbl->groups = groups; | ||
1537 | |||
1538 | out_ungrab: | ||
1539 | netlink_table_ungrab(); | 1613 | netlink_table_ungrab(); |
1540 | synchronize_rcu(); | 1614 | |
1541 | kfree(old); | ||
1542 | return err; | 1615 | return err; |
1543 | } | 1616 | } |
1544 | EXPORT_SYMBOL(netlink_change_ngroups); | 1617 | |
1618 | void __netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | ||
1619 | { | ||
1620 | struct sock *sk; | ||
1621 | struct hlist_node *node; | ||
1622 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | ||
1623 | |||
1624 | sk_for_each_bound(sk, node, &tbl->mc_list) | ||
1625 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | ||
1626 | } | ||
1545 | 1627 | ||
1546 | /** | 1628 | /** |
1547 | * netlink_clear_multicast_users - kick off multicast listeners | 1629 | * netlink_clear_multicast_users - kick off multicast listeners |
@@ -1553,18 +1635,10 @@ EXPORT_SYMBOL(netlink_change_ngroups); | |||
1553 | */ | 1635 | */ |
1554 | void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) | 1636 | void netlink_clear_multicast_users(struct sock *ksk, unsigned int group) |
1555 | { | 1637 | { |
1556 | struct sock *sk; | ||
1557 | struct hlist_node *node; | ||
1558 | struct netlink_table *tbl = &nl_table[ksk->sk_protocol]; | ||
1559 | |||
1560 | netlink_table_grab(); | 1638 | netlink_table_grab(); |
1561 | 1639 | __netlink_clear_multicast_users(ksk, group); | |
1562 | sk_for_each_bound(sk, node, &tbl->mc_list) | ||
1563 | netlink_update_socket_mc(nlk_sk(sk), group, 0); | ||
1564 | |||
1565 | netlink_table_ungrab(); | 1640 | netlink_table_ungrab(); |
1566 | } | 1641 | } |
1567 | EXPORT_SYMBOL(netlink_clear_multicast_users); | ||
1568 | 1642 | ||
1569 | void netlink_set_nonroot(int protocol, unsigned int flags) | 1643 | void netlink_set_nonroot(int protocol, unsigned int flags) |
1570 | { | 1644 | { |
@@ -1647,7 +1721,7 @@ errout: | |||
1647 | } | 1721 | } |
1648 | 1722 | ||
1649 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, | 1723 | int netlink_dump_start(struct sock *ssk, struct sk_buff *skb, |
1650 | struct nlmsghdr *nlh, | 1724 | const struct nlmsghdr *nlh, |
1651 | int (*dump)(struct sk_buff *skb, | 1725 | int (*dump)(struct sk_buff *skb, |
1652 | struct netlink_callback *), | 1726 | struct netlink_callback *), |
1653 | int (*done)(struct netlink_callback *)) | 1727 | int (*done)(struct netlink_callback *)) |
@@ -1720,7 +1794,7 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err) | |||
1720 | } | 1794 | } |
1721 | 1795 | ||
1722 | rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, | 1796 | rep = __nlmsg_put(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, |
1723 | NLMSG_ERROR, sizeof(struct nlmsgerr), 0); | 1797 | NLMSG_ERROR, payload, 0); |
1724 | errmsg = nlmsg_data(rep); | 1798 | errmsg = nlmsg_data(rep); |
1725 | errmsg->error = err; | 1799 | errmsg->error = err; |
1726 | memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); | 1800 | memcpy(&errmsg->msg, nlh, err ? nlh->nlmsg_len : sizeof(*nlh)); |
@@ -1982,7 +2056,7 @@ static const struct proto_ops netlink_ops = { | |||
1982 | .sendpage = sock_no_sendpage, | 2056 | .sendpage = sock_no_sendpage, |
1983 | }; | 2057 | }; |
1984 | 2058 | ||
1985 | static struct net_proto_family netlink_family_ops = { | 2059 | static const struct net_proto_family netlink_family_ops = { |
1986 | .family = PF_NETLINK, | 2060 | .family = PF_NETLINK, |
1987 | .create = netlink_create, | 2061 | .create = netlink_create, |
1988 | .owner = THIS_MODULE, /* for consistency 8) */ | 2062 | .owner = THIS_MODULE, /* for consistency 8) */ |
@@ -2026,10 +2100,10 @@ static int __init netlink_proto_init(void) | |||
2026 | if (!nl_table) | 2100 | if (!nl_table) |
2027 | goto panic; | 2101 | goto panic; |
2028 | 2102 | ||
2029 | if (num_physpages >= (128 * 1024)) | 2103 | if (totalram_pages >= (128 * 1024)) |
2030 | limit = num_physpages >> (21 - PAGE_SHIFT); | 2104 | limit = totalram_pages >> (21 - PAGE_SHIFT); |
2031 | else | 2105 | else |
2032 | limit = num_physpages >> (23 - PAGE_SHIFT); | 2106 | limit = totalram_pages >> (23 - PAGE_SHIFT); |
2033 | 2107 | ||
2034 | order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; | 2108 | order = get_bitmask_order(limit) - 1 + PAGE_SHIFT; |
2035 | limit = (1UL << order) / sizeof(struct hlist_head); | 2109 | limit = (1UL << order) / sizeof(struct hlist_head); |
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index eed4c6a8afc0..d07ecda0a92d 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c | |||
@@ -18,8 +18,6 @@ | |||
18 | #include <net/sock.h> | 18 | #include <net/sock.h> |
19 | #include <net/genetlink.h> | 19 | #include <net/genetlink.h> |
20 | 20 | ||
21 | struct sock *genl_sock = NULL; | ||
22 | |||
23 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ | 21 | static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ |
24 | 22 | ||
25 | static inline void genl_lock(void) | 23 | static inline void genl_lock(void) |
@@ -99,25 +97,17 @@ static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family) | |||
99 | */ | 97 | */ |
100 | static inline u16 genl_generate_id(void) | 98 | static inline u16 genl_generate_id(void) |
101 | { | 99 | { |
102 | static u16 id_gen_idx; | 100 | static u16 id_gen_idx = GENL_MIN_ID; |
103 | int overflowed = 0; | 101 | int i; |
104 | 102 | ||
105 | do { | 103 | for (i = 0; i <= GENL_MAX_ID - GENL_MIN_ID; i++) { |
106 | if (id_gen_idx == 0) | 104 | if (!genl_family_find_byid(id_gen_idx)) |
105 | return id_gen_idx; | ||
106 | if (++id_gen_idx > GENL_MAX_ID) | ||
107 | id_gen_idx = GENL_MIN_ID; | 107 | id_gen_idx = GENL_MIN_ID; |
108 | } | ||
108 | 109 | ||
109 | if (++id_gen_idx > GENL_MAX_ID) { | 110 | return 0; |
110 | if (!overflowed) { | ||
111 | overflowed = 1; | ||
112 | id_gen_idx = 0; | ||
113 | continue; | ||
114 | } else | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | } while (genl_family_find_byid(id_gen_idx)); | ||
119 | |||
120 | return id_gen_idx; | ||
121 | } | 111 | } |
122 | 112 | ||
123 | static struct genl_multicast_group notify_grp; | 113 | static struct genl_multicast_group notify_grp; |
@@ -138,7 +128,7 @@ int genl_register_mc_group(struct genl_family *family, | |||
138 | { | 128 | { |
139 | int id; | 129 | int id; |
140 | unsigned long *new_groups; | 130 | unsigned long *new_groups; |
141 | int err; | 131 | int err = 0; |
142 | 132 | ||
143 | BUG_ON(grp->name[0] == '\0'); | 133 | BUG_ON(grp->name[0] == '\0'); |
144 | 134 | ||
@@ -175,10 +165,34 @@ int genl_register_mc_group(struct genl_family *family, | |||
175 | mc_groups_longs++; | 165 | mc_groups_longs++; |
176 | } | 166 | } |
177 | 167 | ||
178 | err = netlink_change_ngroups(genl_sock, | 168 | if (family->netnsok) { |
179 | mc_groups_longs * BITS_PER_LONG); | 169 | struct net *net; |
180 | if (err) | 170 | |
181 | goto out; | 171 | netlink_table_grab(); |
172 | rcu_read_lock(); | ||
173 | for_each_net_rcu(net) { | ||
174 | err = __netlink_change_ngroups(net->genl_sock, | ||
175 | mc_groups_longs * BITS_PER_LONG); | ||
176 | if (err) { | ||
177 | /* | ||
178 | * No need to roll back, can only fail if | ||
179 | * memory allocation fails and then the | ||
180 | * number of _possible_ groups has been | ||
181 | * increased on some sockets which is ok. | ||
182 | */ | ||
183 | rcu_read_unlock(); | ||
184 | netlink_table_ungrab(); | ||
185 | goto out; | ||
186 | } | ||
187 | } | ||
188 | rcu_read_unlock(); | ||
189 | netlink_table_ungrab(); | ||
190 | } else { | ||
191 | err = netlink_change_ngroups(init_net.genl_sock, | ||
192 | mc_groups_longs * BITS_PER_LONG); | ||
193 | if (err) | ||
194 | goto out; | ||
195 | } | ||
182 | 196 | ||
183 | grp->id = id; | 197 | grp->id = id; |
184 | set_bit(id, mc_groups); | 198 | set_bit(id, mc_groups); |
@@ -195,8 +209,16 @@ EXPORT_SYMBOL(genl_register_mc_group); | |||
195 | static void __genl_unregister_mc_group(struct genl_family *family, | 209 | static void __genl_unregister_mc_group(struct genl_family *family, |
196 | struct genl_multicast_group *grp) | 210 | struct genl_multicast_group *grp) |
197 | { | 211 | { |
212 | struct net *net; | ||
198 | BUG_ON(grp->family != family); | 213 | BUG_ON(grp->family != family); |
199 | netlink_clear_multicast_users(genl_sock, grp->id); | 214 | |
215 | netlink_table_grab(); | ||
216 | rcu_read_lock(); | ||
217 | for_each_net_rcu(net) | ||
218 | __netlink_clear_multicast_users(net->genl_sock, grp->id); | ||
219 | rcu_read_unlock(); | ||
220 | netlink_table_ungrab(); | ||
221 | |||
200 | clear_bit(grp->id, mc_groups); | 222 | clear_bit(grp->id, mc_groups); |
201 | list_del(&grp->list); | 223 | list_del(&grp->list); |
202 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); | 224 | genl_ctrl_event(CTRL_CMD_DELMCAST_GRP, grp); |
@@ -344,11 +366,6 @@ int genl_register_family(struct genl_family *family) | |||
344 | goto errout_locked; | 366 | goto errout_locked; |
345 | } | 367 | } |
346 | 368 | ||
347 | if (genl_family_find_byid(family->id)) { | ||
348 | err = -EEXIST; | ||
349 | goto errout_locked; | ||
350 | } | ||
351 | |||
352 | if (family->id == GENL_ID_GENERATE) { | 369 | if (family->id == GENL_ID_GENERATE) { |
353 | u16 newid = genl_generate_id(); | 370 | u16 newid = genl_generate_id(); |
354 | 371 | ||
@@ -358,6 +375,9 @@ int genl_register_family(struct genl_family *family) | |||
358 | } | 375 | } |
359 | 376 | ||
360 | family->id = newid; | 377 | family->id = newid; |
378 | } else if (genl_family_find_byid(family->id)) { | ||
379 | err = -EEXIST; | ||
380 | goto errout_locked; | ||
361 | } | 381 | } |
362 | 382 | ||
363 | if (family->maxattr) { | 383 | if (family->maxattr) { |
@@ -467,6 +487,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
467 | { | 487 | { |
468 | struct genl_ops *ops; | 488 | struct genl_ops *ops; |
469 | struct genl_family *family; | 489 | struct genl_family *family; |
490 | struct net *net = sock_net(skb->sk); | ||
470 | struct genl_info info; | 491 | struct genl_info info; |
471 | struct genlmsghdr *hdr = nlmsg_data(nlh); | 492 | struct genlmsghdr *hdr = nlmsg_data(nlh); |
472 | int hdrlen, err; | 493 | int hdrlen, err; |
@@ -475,6 +496,10 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
475 | if (family == NULL) | 496 | if (family == NULL) |
476 | return -ENOENT; | 497 | return -ENOENT; |
477 | 498 | ||
499 | /* this family doesn't exist in this netns */ | ||
500 | if (!family->netnsok && !net_eq(net, &init_net)) | ||
501 | return -ENOENT; | ||
502 | |||
478 | hdrlen = GENL_HDRLEN + family->hdrsize; | 503 | hdrlen = GENL_HDRLEN + family->hdrsize; |
479 | if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) | 504 | if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) |
480 | return -EINVAL; | 505 | return -EINVAL; |
@@ -492,7 +517,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
492 | return -EOPNOTSUPP; | 517 | return -EOPNOTSUPP; |
493 | 518 | ||
494 | genl_unlock(); | 519 | genl_unlock(); |
495 | err = netlink_dump_start(genl_sock, skb, nlh, | 520 | err = netlink_dump_start(net->genl_sock, skb, nlh, |
496 | ops->dumpit, ops->done); | 521 | ops->dumpit, ops->done); |
497 | genl_lock(); | 522 | genl_lock(); |
498 | return err; | 523 | return err; |
@@ -514,6 +539,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | |||
514 | info.genlhdr = nlmsg_data(nlh); | 539 | info.genlhdr = nlmsg_data(nlh); |
515 | info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; | 540 | info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN; |
516 | info.attrs = family->attrbuf; | 541 | info.attrs = family->attrbuf; |
542 | genl_info_net_set(&info, net); | ||
517 | 543 | ||
518 | return ops->doit(skb, &info); | 544 | return ops->doit(skb, &info); |
519 | } | 545 | } |
@@ -534,6 +560,7 @@ static struct genl_family genl_ctrl = { | |||
534 | .name = "nlctrl", | 560 | .name = "nlctrl", |
535 | .version = 0x2, | 561 | .version = 0x2, |
536 | .maxattr = CTRL_ATTR_MAX, | 562 | .maxattr = CTRL_ATTR_MAX, |
563 | .netnsok = true, | ||
537 | }; | 564 | }; |
538 | 565 | ||
539 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, | 566 | static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq, |
@@ -650,6 +677,7 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
650 | 677 | ||
651 | int i, n = 0; | 678 | int i, n = 0; |
652 | struct genl_family *rt; | 679 | struct genl_family *rt; |
680 | struct net *net = sock_net(skb->sk); | ||
653 | int chains_to_skip = cb->args[0]; | 681 | int chains_to_skip = cb->args[0]; |
654 | int fams_to_skip = cb->args[1]; | 682 | int fams_to_skip = cb->args[1]; |
655 | 683 | ||
@@ -658,6 +686,8 @@ static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb) | |||
658 | continue; | 686 | continue; |
659 | n = 0; | 687 | n = 0; |
660 | list_for_each_entry(rt, genl_family_chain(i), family_list) { | 688 | list_for_each_entry(rt, genl_family_chain(i), family_list) { |
689 | if (!rt->netnsok && !net_eq(net, &init_net)) | ||
690 | continue; | ||
661 | if (++n < fams_to_skip) | 691 | if (++n < fams_to_skip) |
662 | continue; | 692 | continue; |
663 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid, | 693 | if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid, |
@@ -729,6 +759,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
729 | if (info->attrs[CTRL_ATTR_FAMILY_ID]) { | 759 | if (info->attrs[CTRL_ATTR_FAMILY_ID]) { |
730 | u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); | 760 | u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]); |
731 | res = genl_family_find_byid(id); | 761 | res = genl_family_find_byid(id); |
762 | err = -ENOENT; | ||
732 | } | 763 | } |
733 | 764 | ||
734 | if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { | 765 | if (info->attrs[CTRL_ATTR_FAMILY_NAME]) { |
@@ -736,49 +767,61 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info) | |||
736 | 767 | ||
737 | name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); | 768 | name = nla_data(info->attrs[CTRL_ATTR_FAMILY_NAME]); |
738 | res = genl_family_find_byname(name); | 769 | res = genl_family_find_byname(name); |
770 | err = -ENOENT; | ||
739 | } | 771 | } |
740 | 772 | ||
741 | if (res == NULL) { | 773 | if (res == NULL) |
742 | err = -ENOENT; | 774 | return err; |
743 | goto errout; | 775 | |
776 | if (!res->netnsok && !net_eq(genl_info_net(info), &init_net)) { | ||
777 | /* family doesn't exist here */ | ||
778 | return -ENOENT; | ||
744 | } | 779 | } |
745 | 780 | ||
746 | msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, | 781 | msg = ctrl_build_family_msg(res, info->snd_pid, info->snd_seq, |
747 | CTRL_CMD_NEWFAMILY); | 782 | CTRL_CMD_NEWFAMILY); |
748 | if (IS_ERR(msg)) { | 783 | if (IS_ERR(msg)) |
749 | err = PTR_ERR(msg); | 784 | return PTR_ERR(msg); |
750 | goto errout; | ||
751 | } | ||
752 | 785 | ||
753 | err = genlmsg_reply(msg, info); | 786 | return genlmsg_reply(msg, info); |
754 | errout: | ||
755 | return err; | ||
756 | } | 787 | } |
757 | 788 | ||
758 | static int genl_ctrl_event(int event, void *data) | 789 | static int genl_ctrl_event(int event, void *data) |
759 | { | 790 | { |
760 | struct sk_buff *msg; | 791 | struct sk_buff *msg; |
792 | struct genl_family *family; | ||
793 | struct genl_multicast_group *grp; | ||
761 | 794 | ||
762 | if (genl_sock == NULL) | 795 | /* genl is still initialising */ |
796 | if (!init_net.genl_sock) | ||
763 | return 0; | 797 | return 0; |
764 | 798 | ||
765 | switch (event) { | 799 | switch (event) { |
766 | case CTRL_CMD_NEWFAMILY: | 800 | case CTRL_CMD_NEWFAMILY: |
767 | case CTRL_CMD_DELFAMILY: | 801 | case CTRL_CMD_DELFAMILY: |
768 | msg = ctrl_build_family_msg(data, 0, 0, event); | 802 | family = data; |
769 | if (IS_ERR(msg)) | 803 | msg = ctrl_build_family_msg(family, 0, 0, event); |
770 | return PTR_ERR(msg); | ||
771 | |||
772 | genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL); | ||
773 | break; | 804 | break; |
774 | case CTRL_CMD_NEWMCAST_GRP: | 805 | case CTRL_CMD_NEWMCAST_GRP: |
775 | case CTRL_CMD_DELMCAST_GRP: | 806 | case CTRL_CMD_DELMCAST_GRP: |
807 | grp = data; | ||
808 | family = grp->family; | ||
776 | msg = ctrl_build_mcgrp_msg(data, 0, 0, event); | 809 | msg = ctrl_build_mcgrp_msg(data, 0, 0, event); |
777 | if (IS_ERR(msg)) | ||
778 | return PTR_ERR(msg); | ||
779 | |||
780 | genlmsg_multicast(msg, 0, GENL_ID_CTRL, GFP_KERNEL); | ||
781 | break; | 810 | break; |
811 | default: | ||
812 | return -EINVAL; | ||
813 | } | ||
814 | |||
815 | if (IS_ERR(msg)) | ||
816 | return PTR_ERR(msg); | ||
817 | |||
818 | if (!family->netnsok) { | ||
819 | genlmsg_multicast_netns(&init_net, msg, 0, | ||
820 | GENL_ID_CTRL, GFP_KERNEL); | ||
821 | } else { | ||
822 | rcu_read_lock(); | ||
823 | genlmsg_multicast_allns(msg, 0, GENL_ID_CTRL, GFP_ATOMIC); | ||
824 | rcu_read_unlock(); | ||
782 | } | 825 | } |
783 | 826 | ||
784 | return 0; | 827 | return 0; |
@@ -795,6 +838,33 @@ static struct genl_multicast_group notify_grp = { | |||
795 | .name = "notify", | 838 | .name = "notify", |
796 | }; | 839 | }; |
797 | 840 | ||
841 | static int __net_init genl_pernet_init(struct net *net) | ||
842 | { | ||
843 | /* we'll bump the group number right afterwards */ | ||
844 | net->genl_sock = netlink_kernel_create(net, NETLINK_GENERIC, 0, | ||
845 | genl_rcv, &genl_mutex, | ||
846 | THIS_MODULE); | ||
847 | |||
848 | if (!net->genl_sock && net_eq(net, &init_net)) | ||
849 | panic("GENL: Cannot initialize generic netlink\n"); | ||
850 | |||
851 | if (!net->genl_sock) | ||
852 | return -ENOMEM; | ||
853 | |||
854 | return 0; | ||
855 | } | ||
856 | |||
857 | static void __net_exit genl_pernet_exit(struct net *net) | ||
858 | { | ||
859 | netlink_kernel_release(net->genl_sock); | ||
860 | net->genl_sock = NULL; | ||
861 | } | ||
862 | |||
863 | static struct pernet_operations genl_pernet_ops = { | ||
864 | .init = genl_pernet_init, | ||
865 | .exit = genl_pernet_exit, | ||
866 | }; | ||
867 | |||
798 | static int __init genl_init(void) | 868 | static int __init genl_init(void) |
799 | { | 869 | { |
800 | int i, err; | 870 | int i, err; |
@@ -804,36 +874,67 @@ static int __init genl_init(void) | |||
804 | 874 | ||
805 | err = genl_register_family(&genl_ctrl); | 875 | err = genl_register_family(&genl_ctrl); |
806 | if (err < 0) | 876 | if (err < 0) |
807 | goto errout; | 877 | goto problem; |
808 | 878 | ||
809 | err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); | 879 | err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops); |
810 | if (err < 0) | 880 | if (err < 0) |
811 | goto errout_register; | 881 | goto problem; |
812 | 882 | ||
813 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); | 883 | netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV); |
814 | 884 | ||
815 | /* we'll bump the group number right afterwards */ | 885 | err = register_pernet_subsys(&genl_pernet_ops); |
816 | genl_sock = netlink_kernel_create(&init_net, NETLINK_GENERIC, 0, | 886 | if (err) |
817 | genl_rcv, &genl_mutex, THIS_MODULE); | 887 | goto problem; |
818 | if (genl_sock == NULL) | ||
819 | panic("GENL: Cannot initialize generic netlink\n"); | ||
820 | 888 | ||
821 | err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); | 889 | err = genl_register_mc_group(&genl_ctrl, ¬ify_grp); |
822 | if (err < 0) | 890 | if (err < 0) |
823 | goto errout_register; | 891 | goto problem; |
824 | 892 | ||
825 | return 0; | 893 | return 0; |
826 | 894 | ||
827 | errout_register: | 895 | problem: |
828 | genl_unregister_family(&genl_ctrl); | ||
829 | errout: | ||
830 | panic("GENL: Cannot register controller: %d\n", err); | 896 | panic("GENL: Cannot register controller: %d\n", err); |
831 | } | 897 | } |
832 | 898 | ||
833 | subsys_initcall(genl_init); | 899 | subsys_initcall(genl_init); |
834 | 900 | ||
835 | EXPORT_SYMBOL(genl_sock); | ||
836 | EXPORT_SYMBOL(genl_register_ops); | 901 | EXPORT_SYMBOL(genl_register_ops); |
837 | EXPORT_SYMBOL(genl_unregister_ops); | 902 | EXPORT_SYMBOL(genl_unregister_ops); |
838 | EXPORT_SYMBOL(genl_register_family); | 903 | EXPORT_SYMBOL(genl_register_family); |
839 | EXPORT_SYMBOL(genl_unregister_family); | 904 | EXPORT_SYMBOL(genl_unregister_family); |
905 | |||
906 | static int genlmsg_mcast(struct sk_buff *skb, u32 pid, unsigned long group, | ||
907 | gfp_t flags) | ||
908 | { | ||
909 | struct sk_buff *tmp; | ||
910 | struct net *net, *prev = NULL; | ||
911 | int err; | ||
912 | |||
913 | for_each_net_rcu(net) { | ||
914 | if (prev) { | ||
915 | tmp = skb_clone(skb, flags); | ||
916 | if (!tmp) { | ||
917 | err = -ENOMEM; | ||
918 | goto error; | ||
919 | } | ||
920 | err = nlmsg_multicast(prev->genl_sock, tmp, | ||
921 | pid, group, flags); | ||
922 | if (err) | ||
923 | goto error; | ||
924 | } | ||
925 | |||
926 | prev = net; | ||
927 | } | ||
928 | |||
929 | return nlmsg_multicast(prev->genl_sock, skb, pid, group, flags); | ||
930 | error: | ||
931 | kfree_skb(skb); | ||
932 | return err; | ||
933 | } | ||
934 | |||
935 | int genlmsg_multicast_allns(struct sk_buff *skb, u32 pid, unsigned int group, | ||
936 | gfp_t flags) | ||
937 | { | ||
938 | return genlmsg_mcast(skb, pid, group, flags); | ||
939 | } | ||
940 | EXPORT_SYMBOL(genlmsg_multicast_allns); | ||