diff options
author | Ying Xue <ying.xue@windriver.com> | 2015-01-09 02:27:05 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2015-01-12 16:24:32 -0500 |
commit | f2f9800d4955a96d92896841d8ba9b04201deaa1 (patch) | |
tree | 3b817800cfd8fcb2de6d5a3d7eb4fff972fba681 /net/tipc/node.c | |
parent | c93d3baa24095887005647984cff5de8c63d3611 (diff) |
tipc: make tipc node table aware of net namespace
Global variables associated with node table are below:
- node table list (node_htable)
- node hash table list (tipc_node_list)
- node table lock (node_list_lock)
- node number counter (tipc_num_nodes)
- node link number counter (tipc_num_links)
To make node table support namespace, above global variables must be
moved to tipc_net structure in order to keep secret for different
namespaces. As a consequence, these variables are allocated and
initialized when namespace is created, and deallocated when namespace
is destroyed. After the change, functions associated with these
variables have to utilize a namespace pointer to access them. So
adding namespace pointer as a parameter of these functions is the
major change made in the commit.
Signed-off-by: Ying Xue <ying.xue@windriver.com>
Tested-by: Tero Aho <Tero.Aho@coriant.com>
Reviewed-by: Jon Maloy <jon.maloy@ericsson.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 130 |
1 files changed, 69 insertions, 61 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 8d353ec77a66..a0ca1ac53119 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -40,17 +40,9 @@ | |||
40 | #include "name_distr.h" | 40 | #include "name_distr.h" |
41 | #include "socket.h" | 41 | #include "socket.h" |
42 | 42 | ||
43 | #define NODE_HTABLE_SIZE 512 | ||
44 | |||
45 | static void node_lost_contact(struct tipc_node *n_ptr); | 43 | static void node_lost_contact(struct tipc_node *n_ptr); |
46 | static void node_established_contact(struct tipc_node *n_ptr); | 44 | static void node_established_contact(struct tipc_node *n_ptr); |
47 | 45 | ||
48 | static struct hlist_head node_htable[NODE_HTABLE_SIZE]; | ||
49 | LIST_HEAD(tipc_node_list); | ||
50 | static u32 tipc_num_nodes; | ||
51 | static u32 tipc_num_links; | ||
52 | static DEFINE_SPINLOCK(node_list_lock); | ||
53 | |||
54 | struct tipc_sock_conn { | 46 | struct tipc_sock_conn { |
55 | u32 port; | 47 | u32 port; |
56 | u32 peer_port; | 48 | u32 peer_port; |
@@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr) | |||
78 | /* | 70 | /* |
79 | * tipc_node_find - locate specified node object, if it exists | 71 | * tipc_node_find - locate specified node object, if it exists |
80 | */ | 72 | */ |
81 | struct tipc_node *tipc_node_find(u32 addr) | 73 | struct tipc_node *tipc_node_find(struct net *net, u32 addr) |
82 | { | 74 | { |
75 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
83 | struct tipc_node *node; | 76 | struct tipc_node *node; |
84 | 77 | ||
85 | if (unlikely(!in_own_cluster_exact(addr))) | 78 | if (unlikely(!in_own_cluster_exact(addr))) |
86 | return NULL; | 79 | return NULL; |
87 | 80 | ||
88 | rcu_read_lock(); | 81 | rcu_read_lock(); |
89 | hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { | 82 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], |
83 | hash) { | ||
90 | if (node->addr == addr) { | 84 | if (node->addr == addr) { |
91 | rcu_read_unlock(); | 85 | rcu_read_unlock(); |
92 | return node; | 86 | return node; |
@@ -96,20 +90,22 @@ struct tipc_node *tipc_node_find(u32 addr) | |||
96 | return NULL; | 90 | return NULL; |
97 | } | 91 | } |
98 | 92 | ||
99 | struct tipc_node *tipc_node_create(u32 addr) | 93 | struct tipc_node *tipc_node_create(struct net *net, u32 addr) |
100 | { | 94 | { |
95 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
101 | struct tipc_node *n_ptr, *temp_node; | 96 | struct tipc_node *n_ptr, *temp_node; |
102 | 97 | ||
103 | spin_lock_bh(&node_list_lock); | 98 | spin_lock_bh(&tn->node_list_lock); |
104 | 99 | ||
105 | n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); | 100 | n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); |
106 | if (!n_ptr) { | 101 | if (!n_ptr) { |
107 | spin_unlock_bh(&node_list_lock); | 102 | spin_unlock_bh(&tn->node_list_lock); |
108 | pr_warn("Node creation failed, no memory\n"); | 103 | pr_warn("Node creation failed, no memory\n"); |
109 | return NULL; | 104 | return NULL; |
110 | } | 105 | } |
111 | 106 | ||
112 | n_ptr->addr = addr; | 107 | n_ptr->addr = addr; |
108 | n_ptr->net = net; | ||
113 | spin_lock_init(&n_ptr->lock); | 109 | spin_lock_init(&n_ptr->lock); |
114 | INIT_HLIST_NODE(&n_ptr->hash); | 110 | INIT_HLIST_NODE(&n_ptr->hash); |
115 | INIT_LIST_HEAD(&n_ptr->list); | 111 | INIT_LIST_HEAD(&n_ptr->list); |
@@ -118,9 +114,9 @@ struct tipc_node *tipc_node_create(u32 addr) | |||
118 | skb_queue_head_init(&n_ptr->waiting_sks); | 114 | skb_queue_head_init(&n_ptr->waiting_sks); |
119 | __skb_queue_head_init(&n_ptr->bclink.deferred_queue); | 115 | __skb_queue_head_init(&n_ptr->bclink.deferred_queue); |
120 | 116 | ||
121 | hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); | 117 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); |
122 | 118 | ||
123 | list_for_each_entry_rcu(temp_node, &tipc_node_list, list) { | 119 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { |
124 | if (n_ptr->addr < temp_node->addr) | 120 | if (n_ptr->addr < temp_node->addr) |
125 | break; | 121 | break; |
126 | } | 122 | } |
@@ -128,32 +124,33 @@ struct tipc_node *tipc_node_create(u32 addr) | |||
128 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; | 124 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; |
129 | n_ptr->signature = INVALID_NODE_SIG; | 125 | n_ptr->signature = INVALID_NODE_SIG; |
130 | 126 | ||
131 | tipc_num_nodes++; | 127 | tn->num_nodes++; |
132 | 128 | ||
133 | spin_unlock_bh(&node_list_lock); | 129 | spin_unlock_bh(&tn->node_list_lock); |
134 | return n_ptr; | 130 | return n_ptr; |
135 | } | 131 | } |
136 | 132 | ||
137 | static void tipc_node_delete(struct tipc_node *n_ptr) | 133 | static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr) |
138 | { | 134 | { |
139 | list_del_rcu(&n_ptr->list); | 135 | list_del_rcu(&n_ptr->list); |
140 | hlist_del_rcu(&n_ptr->hash); | 136 | hlist_del_rcu(&n_ptr->hash); |
141 | kfree_rcu(n_ptr, rcu); | 137 | kfree_rcu(n_ptr, rcu); |
142 | 138 | ||
143 | tipc_num_nodes--; | 139 | tn->num_nodes--; |
144 | } | 140 | } |
145 | 141 | ||
146 | void tipc_node_stop(void) | 142 | void tipc_node_stop(struct net *net) |
147 | { | 143 | { |
144 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
148 | struct tipc_node *node, *t_node; | 145 | struct tipc_node *node, *t_node; |
149 | 146 | ||
150 | spin_lock_bh(&node_list_lock); | 147 | spin_lock_bh(&tn->node_list_lock); |
151 | list_for_each_entry_safe(node, t_node, &tipc_node_list, list) | 148 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) |
152 | tipc_node_delete(node); | 149 | tipc_node_delete(tn, node); |
153 | spin_unlock_bh(&node_list_lock); | 150 | spin_unlock_bh(&tn->node_list_lock); |
154 | } | 151 | } |
155 | 152 | ||
156 | int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) | 153 | int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) |
157 | { | 154 | { |
158 | struct tipc_node *node; | 155 | struct tipc_node *node; |
159 | struct tipc_sock_conn *conn; | 156 | struct tipc_sock_conn *conn; |
@@ -161,7 +158,7 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) | |||
161 | if (in_own_node(dnode)) | 158 | if (in_own_node(dnode)) |
162 | return 0; | 159 | return 0; |
163 | 160 | ||
164 | node = tipc_node_find(dnode); | 161 | node = tipc_node_find(net, dnode); |
165 | if (!node) { | 162 | if (!node) { |
166 | pr_warn("Connecting sock to node 0x%x failed\n", dnode); | 163 | pr_warn("Connecting sock to node 0x%x failed\n", dnode); |
167 | return -EHOSTUNREACH; | 164 | return -EHOSTUNREACH; |
@@ -179,7 +176,7 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) | |||
179 | return 0; | 176 | return 0; |
180 | } | 177 | } |
181 | 178 | ||
182 | void tipc_node_remove_conn(u32 dnode, u32 port) | 179 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) |
183 | { | 180 | { |
184 | struct tipc_node *node; | 181 | struct tipc_node *node; |
185 | struct tipc_sock_conn *conn, *safe; | 182 | struct tipc_sock_conn *conn, *safe; |
@@ -187,7 +184,7 @@ void tipc_node_remove_conn(u32 dnode, u32 port) | |||
187 | if (in_own_node(dnode)) | 184 | if (in_own_node(dnode)) |
188 | return; | 185 | return; |
189 | 186 | ||
190 | node = tipc_node_find(dnode); | 187 | node = tipc_node_find(net, dnode); |
191 | if (!node) | 188 | if (!node) |
192 | return; | 189 | return; |
193 | 190 | ||
@@ -201,7 +198,7 @@ void tipc_node_remove_conn(u32 dnode, u32 port) | |||
201 | tipc_node_unlock(node); | 198 | tipc_node_unlock(node); |
202 | } | 199 | } |
203 | 200 | ||
204 | void tipc_node_abort_sock_conns(struct list_head *conns) | 201 | void tipc_node_abort_sock_conns(struct net *net, struct list_head *conns) |
205 | { | 202 | { |
206 | struct tipc_sock_conn *conn, *safe; | 203 | struct tipc_sock_conn *conn, *safe; |
207 | struct sk_buff *buf; | 204 | struct sk_buff *buf; |
@@ -212,7 +209,7 @@ void tipc_node_abort_sock_conns(struct list_head *conns) | |||
212 | conn->peer_node, conn->port, | 209 | conn->peer_node, conn->port, |
213 | conn->peer_port, TIPC_ERR_NO_NODE); | 210 | conn->peer_port, TIPC_ERR_NO_NODE); |
214 | if (likely(buf)) | 211 | if (likely(buf)) |
215 | tipc_sk_rcv(buf); | 212 | tipc_sk_rcv(net, buf); |
216 | list_del(&conn->list); | 213 | list_del(&conn->list); |
217 | kfree(conn); | 214 | kfree(conn); |
218 | } | 215 | } |
@@ -342,24 +339,27 @@ int tipc_node_is_up(struct tipc_node *n_ptr) | |||
342 | 339 | ||
343 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 340 | void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
344 | { | 341 | { |
342 | struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); | ||
343 | |||
345 | n_ptr->links[l_ptr->bearer_id] = l_ptr; | 344 | n_ptr->links[l_ptr->bearer_id] = l_ptr; |
346 | spin_lock_bh(&node_list_lock); | 345 | spin_lock_bh(&tn->node_list_lock); |
347 | tipc_num_links++; | 346 | tn->num_links++; |
348 | spin_unlock_bh(&node_list_lock); | 347 | spin_unlock_bh(&tn->node_list_lock); |
349 | n_ptr->link_cnt++; | 348 | n_ptr->link_cnt++; |
350 | } | 349 | } |
351 | 350 | ||
352 | void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | 351 | void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) |
353 | { | 352 | { |
353 | struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id); | ||
354 | int i; | 354 | int i; |
355 | 355 | ||
356 | for (i = 0; i < MAX_BEARERS; i++) { | 356 | for (i = 0; i < MAX_BEARERS; i++) { |
357 | if (l_ptr != n_ptr->links[i]) | 357 | if (l_ptr != n_ptr->links[i]) |
358 | continue; | 358 | continue; |
359 | n_ptr->links[i] = NULL; | 359 | n_ptr->links[i] = NULL; |
360 | spin_lock_bh(&node_list_lock); | 360 | spin_lock_bh(&tn->node_list_lock); |
361 | tipc_num_links--; | 361 | tn->num_links--; |
362 | spin_unlock_bh(&node_list_lock); | 362 | spin_unlock_bh(&tn->node_list_lock); |
363 | n_ptr->link_cnt--; | 363 | n_ptr->link_cnt--; |
364 | } | 364 | } |
365 | } | 365 | } |
@@ -414,8 +414,10 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
414 | TIPC_NOTIFY_NODE_DOWN; | 414 | TIPC_NOTIFY_NODE_DOWN; |
415 | } | 415 | } |
416 | 416 | ||
417 | struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | 417 | struct sk_buff *tipc_node_get_nodes(struct net *net, const void *req_tlv_area, |
418 | int req_tlv_space) | ||
418 | { | 419 | { |
420 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
419 | u32 domain; | 421 | u32 domain; |
420 | struct sk_buff *buf; | 422 | struct sk_buff *buf; |
421 | struct tipc_node *n_ptr; | 423 | struct tipc_node *n_ptr; |
@@ -430,20 +432,20 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
430 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | 432 | return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE |
431 | " (network address)"); | 433 | " (network address)"); |
432 | 434 | ||
433 | spin_lock_bh(&node_list_lock); | 435 | spin_lock_bh(&tn->node_list_lock); |
434 | if (!tipc_num_nodes) { | 436 | if (!tn->num_nodes) { |
435 | spin_unlock_bh(&node_list_lock); | 437 | spin_unlock_bh(&tn->node_list_lock); |
436 | return tipc_cfg_reply_none(); | 438 | return tipc_cfg_reply_none(); |
437 | } | 439 | } |
438 | 440 | ||
439 | /* For now, get space for all other nodes */ | 441 | /* For now, get space for all other nodes */ |
440 | payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; | 442 | payload_size = TLV_SPACE(sizeof(node_info)) * tn->num_nodes; |
441 | if (payload_size > 32768u) { | 443 | if (payload_size > 32768u) { |
442 | spin_unlock_bh(&node_list_lock); | 444 | spin_unlock_bh(&tn->node_list_lock); |
443 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 445 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
444 | " (too many nodes)"); | 446 | " (too many nodes)"); |
445 | } | 447 | } |
446 | spin_unlock_bh(&node_list_lock); | 448 | spin_unlock_bh(&tn->node_list_lock); |
447 | 449 | ||
448 | buf = tipc_cfg_reply_alloc(payload_size); | 450 | buf = tipc_cfg_reply_alloc(payload_size); |
449 | if (!buf) | 451 | if (!buf) |
@@ -451,7 +453,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
451 | 453 | ||
452 | /* Add TLVs for all nodes in scope */ | 454 | /* Add TLVs for all nodes in scope */ |
453 | rcu_read_lock(); | 455 | rcu_read_lock(); |
454 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | 456 | list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { |
455 | if (!tipc_in_scope(domain, n_ptr->addr)) | 457 | if (!tipc_in_scope(domain, n_ptr->addr)) |
456 | continue; | 458 | continue; |
457 | node_info.addr = htonl(n_ptr->addr); | 459 | node_info.addr = htonl(n_ptr->addr); |
@@ -463,8 +465,10 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |||
463 | return buf; | 465 | return buf; |
464 | } | 466 | } |
465 | 467 | ||
466 | struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | 468 | struct sk_buff *tipc_node_get_links(struct net *net, const void *req_tlv_area, |
469 | int req_tlv_space) | ||
467 | { | 470 | { |
471 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
468 | u32 domain; | 472 | u32 domain; |
469 | struct sk_buff *buf; | 473 | struct sk_buff *buf; |
470 | struct tipc_node *n_ptr; | 474 | struct tipc_node *n_ptr; |
@@ -482,15 +486,15 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
482 | if (!tipc_own_addr) | 486 | if (!tipc_own_addr) |
483 | return tipc_cfg_reply_none(); | 487 | return tipc_cfg_reply_none(); |
484 | 488 | ||
485 | spin_lock_bh(&node_list_lock); | 489 | spin_lock_bh(&tn->node_list_lock); |
486 | /* Get space for all unicast links + broadcast link */ | 490 | /* Get space for all unicast links + broadcast link */ |
487 | payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1)); | 491 | payload_size = TLV_SPACE((sizeof(link_info)) * (tn->num_links + 1)); |
488 | if (payload_size > 32768u) { | 492 | if (payload_size > 32768u) { |
489 | spin_unlock_bh(&node_list_lock); | 493 | spin_unlock_bh(&tn->node_list_lock); |
490 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED | 494 | return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED |
491 | " (too many links)"); | 495 | " (too many links)"); |
492 | } | 496 | } |
493 | spin_unlock_bh(&node_list_lock); | 497 | spin_unlock_bh(&tn->node_list_lock); |
494 | 498 | ||
495 | buf = tipc_cfg_reply_alloc(payload_size); | 499 | buf = tipc_cfg_reply_alloc(payload_size); |
496 | if (!buf) | 500 | if (!buf) |
@@ -504,7 +508,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
504 | 508 | ||
505 | /* Add TLVs for any other links in scope */ | 509 | /* Add TLVs for any other links in scope */ |
506 | rcu_read_lock(); | 510 | rcu_read_lock(); |
507 | list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { | 511 | list_for_each_entry_rcu(n_ptr, &tn->node_list, list) { |
508 | u32 i; | 512 | u32 i; |
509 | 513 | ||
510 | if (!tipc_in_scope(domain, n_ptr->addr)) | 514 | if (!tipc_in_scope(domain, n_ptr->addr)) |
@@ -534,10 +538,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space) | |||
534 | * | 538 | * |
535 | * Returns 0 on success | 539 | * Returns 0 on success |
536 | */ | 540 | */ |
537 | int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) | 541 | int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, |
542 | char *linkname, size_t len) | ||
538 | { | 543 | { |
539 | struct tipc_link *link; | 544 | struct tipc_link *link; |
540 | struct tipc_node *node = tipc_node_find(addr); | 545 | struct tipc_node *node = tipc_node_find(net, addr); |
541 | 546 | ||
542 | if ((bearer_id >= MAX_BEARERS) || !node) | 547 | if ((bearer_id >= MAX_BEARERS) || !node) |
543 | return -EINVAL; | 548 | return -EINVAL; |
@@ -554,6 +559,7 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) | |||
554 | 559 | ||
555 | void tipc_node_unlock(struct tipc_node *node) | 560 | void tipc_node_unlock(struct tipc_node *node) |
556 | { | 561 | { |
562 | struct net *net = node->net; | ||
557 | LIST_HEAD(nsub_list); | 563 | LIST_HEAD(nsub_list); |
558 | LIST_HEAD(conn_sks); | 564 | LIST_HEAD(conn_sks); |
559 | struct sk_buff_head waiting_sks; | 565 | struct sk_buff_head waiting_sks; |
@@ -585,26 +591,26 @@ void tipc_node_unlock(struct tipc_node *node) | |||
585 | spin_unlock_bh(&node->lock); | 591 | spin_unlock_bh(&node->lock); |
586 | 592 | ||
587 | while (!skb_queue_empty(&waiting_sks)) | 593 | while (!skb_queue_empty(&waiting_sks)) |
588 | tipc_sk_rcv(__skb_dequeue(&waiting_sks)); | 594 | tipc_sk_rcv(net, __skb_dequeue(&waiting_sks)); |
589 | 595 | ||
590 | if (!list_empty(&conn_sks)) | 596 | if (!list_empty(&conn_sks)) |
591 | tipc_node_abort_sock_conns(&conn_sks); | 597 | tipc_node_abort_sock_conns(net, &conn_sks); |
592 | 598 | ||
593 | if (!list_empty(&nsub_list)) | 599 | if (!list_empty(&nsub_list)) |
594 | tipc_publ_notify(&nsub_list, addr); | 600 | tipc_publ_notify(net, &nsub_list, addr); |
595 | 601 | ||
596 | if (flags & TIPC_WAKEUP_BCAST_USERS) | 602 | if (flags & TIPC_WAKEUP_BCAST_USERS) |
597 | tipc_bclink_wakeup_users(); | 603 | tipc_bclink_wakeup_users(net); |
598 | 604 | ||
599 | if (flags & TIPC_NOTIFY_NODE_UP) | 605 | if (flags & TIPC_NOTIFY_NODE_UP) |
600 | tipc_named_node_up(addr); | 606 | tipc_named_node_up(net, addr); |
601 | 607 | ||
602 | if (flags & TIPC_NOTIFY_LINK_UP) | 608 | if (flags & TIPC_NOTIFY_LINK_UP) |
603 | tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, | 609 | tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr, |
604 | TIPC_NODE_SCOPE, link_id, addr); | 610 | TIPC_NODE_SCOPE, link_id, addr); |
605 | 611 | ||
606 | if (flags & TIPC_NOTIFY_LINK_DOWN) | 612 | if (flags & TIPC_NOTIFY_LINK_DOWN) |
607 | tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, | 613 | tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr, |
608 | link_id, addr); | 614 | link_id, addr); |
609 | } | 615 | } |
610 | 616 | ||
@@ -645,6 +651,8 @@ msg_full: | |||
645 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | 651 | int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) |
646 | { | 652 | { |
647 | int err; | 653 | int err; |
654 | struct net *net = sock_net(skb->sk); | ||
655 | struct tipc_net *tn = net_generic(net, tipc_net_id); | ||
648 | int done = cb->args[0]; | 656 | int done = cb->args[0]; |
649 | int last_addr = cb->args[1]; | 657 | int last_addr = cb->args[1]; |
650 | struct tipc_node *node; | 658 | struct tipc_node *node; |
@@ -659,7 +667,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
659 | 667 | ||
660 | rcu_read_lock(); | 668 | rcu_read_lock(); |
661 | 669 | ||
662 | if (last_addr && !tipc_node_find(last_addr)) { | 670 | if (last_addr && !tipc_node_find(net, last_addr)) { |
663 | rcu_read_unlock(); | 671 | rcu_read_unlock(); |
664 | /* We never set seq or call nl_dump_check_consistent() this | 672 | /* We never set seq or call nl_dump_check_consistent() this |
665 | * means that setting prev_seq here will cause the consistence | 673 | * means that setting prev_seq here will cause the consistence |
@@ -671,7 +679,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
671 | return -EPIPE; | 679 | return -EPIPE; |
672 | } | 680 | } |
673 | 681 | ||
674 | list_for_each_entry_rcu(node, &tipc_node_list, list) { | 682 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
675 | if (last_addr) { | 683 | if (last_addr) { |
676 | if (node->addr == last_addr) | 684 | if (node->addr == last_addr) |
677 | last_addr = 0; | 685 | last_addr = 0; |