diff options
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 676 |
1 files changed, 676 insertions, 0 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c new file mode 100644 index 000000000000..e311638b9b3d --- /dev/null +++ b/net/tipc/node.c | |||
@@ -0,0 +1,676 @@ | |||
1 | /* | ||
2 | * net/tipc/node.c: TIPC node management routines | ||
3 | * | ||
4 | * Copyright (c) 2003-2005, Ericsson Research Canada | ||
5 | * Copyright (c) 2005, Wind River Systems | ||
6 | * Copyright (c) 2005-2006, Ericsson AB | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * Redistribution and use in source and binary forms, with or without | ||
10 | * modification, are permitted provided that the following conditions are met: | ||
11 | * | ||
12 | * Redistributions of source code must retain the above copyright notice, this | ||
13 | * list of conditions and the following disclaimer. | ||
14 | * Redistributions in binary form must reproduce the above copyright notice, | ||
15 | * this list of conditions and the following disclaimer in the documentation | ||
16 | * and/or other materials provided with the distribution. | ||
17 | * Neither the names of the copyright holders nor the names of its | ||
18 | * contributors may be used to endorse or promote products derived from this | ||
19 | * software without specific prior written permission. | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
31 | * POSSIBILITY OF SUCH DAMAGE. | ||
32 | */ | ||
33 | |||
34 | #include "core.h" | ||
35 | #include "config.h" | ||
36 | #include "node.h" | ||
37 | #include "cluster.h" | ||
38 | #include "net.h" | ||
39 | #include "addr.h" | ||
40 | #include "node_subscr.h" | ||
41 | #include "link.h" | ||
42 | #include "port.h" | ||
43 | #include "bearer.h" | ||
44 | #include "name_distr.h" | ||
45 | #include "net.h" | ||
46 | |||
47 | void node_print(struct print_buf *buf, struct node *n_ptr, char *str); | ||
48 | static void node_lost_contact(struct node *n_ptr); | ||
49 | static void node_established_contact(struct node *n_ptr); | ||
50 | |||
51 | struct node *nodes = NULL; /* sorted list of nodes within cluster */ | ||
52 | |||
53 | u32 tipc_own_tag = 0; | ||
54 | |||
55 | struct node *node_create(u32 addr) | ||
56 | { | ||
57 | struct cluster *c_ptr; | ||
58 | struct node *n_ptr; | ||
59 | struct node **curr_node; | ||
60 | |||
61 | n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC); | ||
62 | if (n_ptr != NULL) { | ||
63 | memset(n_ptr, 0, sizeof(*n_ptr)); | ||
64 | n_ptr->addr = addr; | ||
65 | n_ptr->lock = SPIN_LOCK_UNLOCKED; | ||
66 | INIT_LIST_HEAD(&n_ptr->nsub); | ||
67 | |||
68 | c_ptr = cluster_find(addr); | ||
69 | if (c_ptr == NULL) | ||
70 | c_ptr = cluster_create(addr); | ||
71 | if (c_ptr != NULL) { | ||
72 | n_ptr->owner = c_ptr; | ||
73 | cluster_attach_node(c_ptr, n_ptr); | ||
74 | n_ptr->last_router = -1; | ||
75 | |||
76 | /* Insert node into ordered list */ | ||
77 | for (curr_node = &nodes; *curr_node; | ||
78 | curr_node = &(*curr_node)->next) { | ||
79 | if (addr < (*curr_node)->addr) { | ||
80 | n_ptr->next = *curr_node; | ||
81 | break; | ||
82 | } | ||
83 | } | ||
84 | (*curr_node) = n_ptr; | ||
85 | } else { | ||
86 | kfree(n_ptr); | ||
87 | n_ptr = NULL; | ||
88 | } | ||
89 | } | ||
90 | return n_ptr; | ||
91 | } | ||
92 | |||
93 | void node_delete(struct node *n_ptr) | ||
94 | { | ||
95 | if (!n_ptr) | ||
96 | return; | ||
97 | |||
98 | #if 0 | ||
99 | /* Not needed because links are already deleted via bearer_stop() */ | ||
100 | |||
101 | u32 l_num; | ||
102 | |||
103 | for (l_num = 0; l_num < MAX_BEARERS; l_num++) { | ||
104 | link_delete(n_ptr->links[l_num]); | ||
105 | } | ||
106 | #endif | ||
107 | |||
108 | dbg("node %x deleted\n", n_ptr->addr); | ||
109 | kfree(n_ptr); | ||
110 | } | ||
111 | |||
112 | |||
113 | /** | ||
114 | * node_link_up - handle addition of link | ||
115 | * | ||
116 | * Link becomes active (alone or shared) or standby, depending on its priority. | ||
117 | */ | ||
118 | |||
119 | void node_link_up(struct node *n_ptr, struct link *l_ptr) | ||
120 | { | ||
121 | struct link **active = &n_ptr->active_links[0]; | ||
122 | |||
123 | info("Established link <%s> on network plane %c\n", | ||
124 | l_ptr->name, l_ptr->b_ptr->net_plane); | ||
125 | |||
126 | if (!active[0]) { | ||
127 | dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); | ||
128 | active[0] = active[1] = l_ptr; | ||
129 | node_established_contact(n_ptr); | ||
130 | return; | ||
131 | } | ||
132 | if (l_ptr->priority < active[0]->priority) { | ||
133 | info("Link is standby\n"); | ||
134 | return; | ||
135 | } | ||
136 | link_send_duplicate(active[0], l_ptr); | ||
137 | if (l_ptr->priority == active[0]->priority) { | ||
138 | active[0] = l_ptr; | ||
139 | return; | ||
140 | } | ||
141 | info("Link <%s> on network plane %c becomes standby\n", | ||
142 | active[0]->name, active[0]->b_ptr->net_plane); | ||
143 | active[0] = active[1] = l_ptr; | ||
144 | } | ||
145 | |||
146 | /** | ||
147 | * node_select_active_links - select active link | ||
148 | */ | ||
149 | |||
150 | static void node_select_active_links(struct node *n_ptr) | ||
151 | { | ||
152 | struct link **active = &n_ptr->active_links[0]; | ||
153 | u32 i; | ||
154 | u32 highest_prio = 0; | ||
155 | |||
156 | active[0] = active[1] = 0; | ||
157 | |||
158 | for (i = 0; i < MAX_BEARERS; i++) { | ||
159 | struct link *l_ptr = n_ptr->links[i]; | ||
160 | |||
161 | if (!l_ptr || !link_is_up(l_ptr) || | ||
162 | (l_ptr->priority < highest_prio)) | ||
163 | continue; | ||
164 | |||
165 | if (l_ptr->priority > highest_prio) { | ||
166 | highest_prio = l_ptr->priority; | ||
167 | active[0] = active[1] = l_ptr; | ||
168 | } else { | ||
169 | active[1] = l_ptr; | ||
170 | } | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** | ||
175 | * node_link_down - handle loss of link | ||
176 | */ | ||
177 | |||
178 | void node_link_down(struct node *n_ptr, struct link *l_ptr) | ||
179 | { | ||
180 | struct link **active; | ||
181 | |||
182 | if (!link_is_active(l_ptr)) { | ||
183 | info("Lost standby link <%s> on network plane %c\n", | ||
184 | l_ptr->name, l_ptr->b_ptr->net_plane); | ||
185 | return; | ||
186 | } | ||
187 | info("Lost link <%s> on network plane %c\n", | ||
188 | l_ptr->name, l_ptr->b_ptr->net_plane); | ||
189 | |||
190 | active = &n_ptr->active_links[0]; | ||
191 | if (active[0] == l_ptr) | ||
192 | active[0] = active[1]; | ||
193 | if (active[1] == l_ptr) | ||
194 | active[1] = active[0]; | ||
195 | if (active[0] == l_ptr) | ||
196 | node_select_active_links(n_ptr); | ||
197 | if (node_is_up(n_ptr)) | ||
198 | link_changeover(l_ptr); | ||
199 | else | ||
200 | node_lost_contact(n_ptr); | ||
201 | } | ||
202 | |||
203 | int node_has_active_links(struct node *n_ptr) | ||
204 | { | ||
205 | return (n_ptr && | ||
206 | ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); | ||
207 | } | ||
208 | |||
209 | int node_has_redundant_links(struct node *n_ptr) | ||
210 | { | ||
211 | return (node_has_active_links(n_ptr) && | ||
212 | (n_ptr->active_links[0] != n_ptr->active_links[1])); | ||
213 | } | ||
214 | |||
215 | int node_has_active_routes(struct node *n_ptr) | ||
216 | { | ||
217 | return (n_ptr && (n_ptr->last_router >= 0)); | ||
218 | } | ||
219 | |||
220 | int node_is_up(struct node *n_ptr) | ||
221 | { | ||
222 | return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr)); | ||
223 | } | ||
224 | |||
225 | struct node *node_attach_link(struct link *l_ptr) | ||
226 | { | ||
227 | struct node *n_ptr = node_find(l_ptr->addr); | ||
228 | |||
229 | if (!n_ptr) | ||
230 | n_ptr = node_create(l_ptr->addr); | ||
231 | if (n_ptr) { | ||
232 | u32 bearer_id = l_ptr->b_ptr->identity; | ||
233 | char addr_string[16]; | ||
234 | |||
235 | assert(bearer_id < MAX_BEARERS); | ||
236 | if (n_ptr->link_cnt >= 2) { | ||
237 | char addr_string[16]; | ||
238 | |||
239 | err("Attempt to create third link to %s\n", | ||
240 | addr_string_fill(addr_string, n_ptr->addr)); | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | if (!n_ptr->links[bearer_id]) { | ||
245 | n_ptr->links[bearer_id] = l_ptr; | ||
246 | net.zones[tipc_zone(l_ptr->addr)]->links++; | ||
247 | n_ptr->link_cnt++; | ||
248 | return n_ptr; | ||
249 | } | ||
250 | err("Attempt to establish second link on <%s> to <%s> \n", | ||
251 | l_ptr->b_ptr->publ.name, | ||
252 | addr_string_fill(addr_string, l_ptr->addr)); | ||
253 | } | ||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | void node_detach_link(struct node *n_ptr, struct link *l_ptr) | ||
258 | { | ||
259 | n_ptr->links[l_ptr->b_ptr->identity] = 0; | ||
260 | net.zones[tipc_zone(l_ptr->addr)]->links--; | ||
261 | n_ptr->link_cnt--; | ||
262 | } | ||
263 | |||
264 | /* | ||
265 | * Routing table management - five cases to handle: | ||
266 | * | ||
267 | * 1: A link towards a zone/cluster external node comes up. | ||
268 | * => Send a multicast message updating routing tables of all | ||
269 | * system nodes within own cluster that the new destination | ||
270 | * can be reached via this node. | ||
271 | * (node.establishedContact()=>cluster.multicastNewRoute()) | ||
272 | * | ||
273 | * 2: A link towards a slave node comes up. | ||
274 | * => Send a multicast message updating routing tables of all | ||
275 | * system nodes within own cluster that the new destination | ||
276 | * can be reached via this node. | ||
277 | * (node.establishedContact()=>cluster.multicastNewRoute()) | ||
278 | * => Send a message to the slave node about existence | ||
279 | * of all system nodes within cluster: | ||
280 | * (node.establishedContact()=>cluster.sendLocalRoutes()) | ||
281 | * | ||
282 | * 3: A new cluster local system node becomes available. | ||
283 | * => Send message(s) to this particular node containing | ||
284 | * information about all cluster external and slave | ||
285 | * nodes which can be reached via this node. | ||
286 | * (node.establishedContact()==>network.sendExternalRoutes()) | ||
287 | * (node.establishedContact()==>network.sendSlaveRoutes()) | ||
288 | * => Send messages to all directly connected slave nodes | ||
289 | * containing information about the existence of the new node | ||
290 | * (node.establishedContact()=>cluster.multicastNewRoute()) | ||
291 | * | ||
292 | * 4: The link towards a zone/cluster external node or slave | ||
293 | * node goes down. | ||
294 | * => Send a multcast message updating routing tables of all | ||
295 | * nodes within cluster that the new destination can not any | ||
296 | * longer be reached via this node. | ||
297 | * (node.lostAllLinks()=>cluster.bcastLostRoute()) | ||
298 | * | ||
299 | * 5: A cluster local system node becomes unavailable. | ||
300 | * => Remove all references to this node from the local | ||
301 | * routing tables. Note: This is a completely node | ||
302 | * local operation. | ||
303 | * (node.lostAllLinks()=>network.removeAsRouter()) | ||
304 | * => Send messages to all directly connected slave nodes | ||
305 | * containing information about loss of the node | ||
306 | * (node.establishedContact()=>cluster.multicastLostRoute()) | ||
307 | * | ||
308 | */ | ||
309 | |||
310 | static void node_established_contact(struct node *n_ptr) | ||
311 | { | ||
312 | struct cluster *c_ptr; | ||
313 | |||
314 | dbg("node_established_contact:-> %x\n", n_ptr->addr); | ||
315 | if (!node_has_active_routes(n_ptr)) { | ||
316 | k_signal((Handler)named_node_up, n_ptr->addr); | ||
317 | } | ||
318 | |||
319 | /* Syncronize broadcast acks */ | ||
320 | n_ptr->bclink.acked = bclink_get_last_sent(); | ||
321 | |||
322 | if (is_slave(tipc_own_addr)) | ||
323 | return; | ||
324 | if (!in_own_cluster(n_ptr->addr)) { | ||
325 | /* Usage case 1 (see above) */ | ||
326 | c_ptr = cluster_find(tipc_own_addr); | ||
327 | if (!c_ptr) | ||
328 | c_ptr = cluster_create(tipc_own_addr); | ||
329 | if (c_ptr) | ||
330 | cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, | ||
331 | tipc_max_nodes); | ||
332 | return; | ||
333 | } | ||
334 | |||
335 | c_ptr = n_ptr->owner; | ||
336 | if (is_slave(n_ptr->addr)) { | ||
337 | /* Usage case 2 (see above) */ | ||
338 | cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); | ||
339 | cluster_send_local_routes(c_ptr, n_ptr->addr); | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | if (n_ptr->bclink.supported) { | ||
344 | nmap_add(&cluster_bcast_nodes, n_ptr->addr); | ||
345 | if (n_ptr->addr < tipc_own_addr) | ||
346 | tipc_own_tag++; | ||
347 | } | ||
348 | |||
349 | /* Case 3 (see above) */ | ||
350 | net_send_external_routes(n_ptr->addr); | ||
351 | cluster_send_slave_routes(c_ptr, n_ptr->addr); | ||
352 | cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, | ||
353 | highest_allowed_slave); | ||
354 | } | ||
355 | |||
356 | static void node_lost_contact(struct node *n_ptr) | ||
357 | { | ||
358 | struct cluster *c_ptr; | ||
359 | struct node_subscr *ns, *tns; | ||
360 | char addr_string[16]; | ||
361 | u32 i; | ||
362 | |||
363 | /* Clean up broadcast reception remains */ | ||
364 | n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; | ||
365 | while (n_ptr->bclink.deferred_head) { | ||
366 | struct sk_buff* buf = n_ptr->bclink.deferred_head; | ||
367 | n_ptr->bclink.deferred_head = buf->next; | ||
368 | buf_discard(buf); | ||
369 | } | ||
370 | if (n_ptr->bclink.defragm) { | ||
371 | buf_discard(n_ptr->bclink.defragm); | ||
372 | n_ptr->bclink.defragm = NULL; | ||
373 | } | ||
374 | if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { | ||
375 | bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); | ||
376 | } | ||
377 | |||
378 | /* Update routing tables */ | ||
379 | if (is_slave(tipc_own_addr)) { | ||
380 | net_remove_as_router(n_ptr->addr); | ||
381 | } else { | ||
382 | if (!in_own_cluster(n_ptr->addr)) { | ||
383 | /* Case 4 (see above) */ | ||
384 | c_ptr = cluster_find(tipc_own_addr); | ||
385 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, | ||
386 | tipc_max_nodes); | ||
387 | } else { | ||
388 | /* Case 5 (see above) */ | ||
389 | c_ptr = cluster_find(n_ptr->addr); | ||
390 | if (is_slave(n_ptr->addr)) { | ||
391 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, | ||
392 | tipc_max_nodes); | ||
393 | } else { | ||
394 | if (n_ptr->bclink.supported) { | ||
395 | nmap_remove(&cluster_bcast_nodes, | ||
396 | n_ptr->addr); | ||
397 | if (n_ptr->addr < tipc_own_addr) | ||
398 | tipc_own_tag--; | ||
399 | } | ||
400 | net_remove_as_router(n_ptr->addr); | ||
401 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, | ||
402 | LOWEST_SLAVE, | ||
403 | highest_allowed_slave); | ||
404 | } | ||
405 | } | ||
406 | } | ||
407 | if (node_has_active_routes(n_ptr)) | ||
408 | return; | ||
409 | |||
410 | info("Lost contact with %s\n", | ||
411 | addr_string_fill(addr_string, n_ptr->addr)); | ||
412 | |||
413 | /* Abort link changeover */ | ||
414 | for (i = 0; i < MAX_BEARERS; i++) { | ||
415 | struct link *l_ptr = n_ptr->links[i]; | ||
416 | if (!l_ptr) | ||
417 | continue; | ||
418 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | ||
419 | l_ptr->exp_msg_count = 0; | ||
420 | link_reset_fragments(l_ptr); | ||
421 | } | ||
422 | |||
423 | /* Notify subscribers */ | ||
424 | list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { | ||
425 | ns->node = 0; | ||
426 | list_del_init(&ns->nodesub_list); | ||
427 | k_signal((Handler)ns->handle_node_down, | ||
428 | (unsigned long)ns->usr_handle); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | /** | ||
433 | * node_select_next_hop - find the next-hop node for a message | ||
434 | * | ||
435 | * Called by when cluster local lookup has failed. | ||
436 | */ | ||
437 | |||
438 | struct node *node_select_next_hop(u32 addr, u32 selector) | ||
439 | { | ||
440 | struct node *n_ptr; | ||
441 | u32 router_addr; | ||
442 | |||
443 | if (!addr_domain_valid(addr)) | ||
444 | return 0; | ||
445 | |||
446 | /* Look for direct link to destination processsor */ | ||
447 | n_ptr = node_find(addr); | ||
448 | if (n_ptr && node_has_active_links(n_ptr)) | ||
449 | return n_ptr; | ||
450 | |||
451 | /* Cluster local system nodes *must* have direct links */ | ||
452 | if (!is_slave(addr) && in_own_cluster(addr)) | ||
453 | return 0; | ||
454 | |||
455 | /* Look for cluster local router with direct link to node */ | ||
456 | router_addr = node_select_router(n_ptr, selector); | ||
457 | if (router_addr) | ||
458 | return node_select(router_addr, selector); | ||
459 | |||
460 | /* Slave nodes can only be accessed within own cluster via a | ||
461 | known router with direct link -- if no router was found,give up */ | ||
462 | if (is_slave(addr)) | ||
463 | return 0; | ||
464 | |||
465 | /* Inter zone/cluster -- find any direct link to remote cluster */ | ||
466 | addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); | ||
467 | n_ptr = net_select_remote_node(addr, selector); | ||
468 | if (n_ptr && node_has_active_links(n_ptr)) | ||
469 | return n_ptr; | ||
470 | |||
471 | /* Last resort -- look for any router to anywhere in remote zone */ | ||
472 | router_addr = net_select_router(addr, selector); | ||
473 | if (router_addr) | ||
474 | return node_select(router_addr, selector); | ||
475 | |||
476 | return 0; | ||
477 | } | ||
478 | |||
479 | /** | ||
480 | * node_select_router - select router to reach specified node | ||
481 | * | ||
482 | * Uses a deterministic and fair algorithm for selecting router node. | ||
483 | */ | ||
484 | |||
485 | u32 node_select_router(struct node *n_ptr, u32 ref) | ||
486 | { | ||
487 | u32 ulim; | ||
488 | u32 mask; | ||
489 | u32 start; | ||
490 | u32 r; | ||
491 | |||
492 | if (!n_ptr) | ||
493 | return 0; | ||
494 | |||
495 | if (n_ptr->last_router < 0) | ||
496 | return 0; | ||
497 | ulim = ((n_ptr->last_router + 1) * 32) - 1; | ||
498 | |||
499 | /* Start entry must be random */ | ||
500 | mask = tipc_max_nodes; | ||
501 | while (mask > ulim) | ||
502 | mask >>= 1; | ||
503 | start = ref & mask; | ||
504 | r = start; | ||
505 | |||
506 | /* Lookup upwards with wrap-around */ | ||
507 | do { | ||
508 | if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) | ||
509 | break; | ||
510 | } while (++r <= ulim); | ||
511 | if (r > ulim) { | ||
512 | r = 1; | ||
513 | do { | ||
514 | if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) | ||
515 | break; | ||
516 | } while (++r < start); | ||
517 | assert(r != start); | ||
518 | } | ||
519 | assert(r && (r <= ulim)); | ||
520 | return tipc_addr(own_zone(), own_cluster(), r); | ||
521 | } | ||
522 | |||
523 | void node_add_router(struct node *n_ptr, u32 router) | ||
524 | { | ||
525 | u32 r_num = tipc_node(router); | ||
526 | |||
527 | n_ptr->routers[r_num / 32] = | ||
528 | ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); | ||
529 | n_ptr->last_router = tipc_max_nodes / 32; | ||
530 | while ((--n_ptr->last_router >= 0) && | ||
531 | !n_ptr->routers[n_ptr->last_router]); | ||
532 | } | ||
533 | |||
534 | void node_remove_router(struct node *n_ptr, u32 router) | ||
535 | { | ||
536 | u32 r_num = tipc_node(router); | ||
537 | |||
538 | if (n_ptr->last_router < 0) | ||
539 | return; /* No routes */ | ||
540 | |||
541 | n_ptr->routers[r_num / 32] = | ||
542 | ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); | ||
543 | n_ptr->last_router = tipc_max_nodes / 32; | ||
544 | while ((--n_ptr->last_router >= 0) && | ||
545 | !n_ptr->routers[n_ptr->last_router]); | ||
546 | |||
547 | if (!node_is_up(n_ptr)) | ||
548 | node_lost_contact(n_ptr); | ||
549 | } | ||
550 | |||
551 | #if 0 | ||
552 | void node_print(struct print_buf *buf, struct node *n_ptr, char *str) | ||
553 | { | ||
554 | u32 i; | ||
555 | |||
556 | tipc_printf(buf, "\n\n%s", str); | ||
557 | for (i = 0; i < MAX_BEARERS; i++) { | ||
558 | if (!n_ptr->links[i]) | ||
559 | continue; | ||
560 | tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]); | ||
561 | } | ||
562 | tipc_printf(buf, "Active links: [%x,%x]\n", | ||
563 | n_ptr->active_links[0], n_ptr->active_links[1]); | ||
564 | } | ||
565 | #endif | ||
566 | |||
567 | u32 tipc_available_nodes(const u32 domain) | ||
568 | { | ||
569 | struct node *n_ptr; | ||
570 | u32 cnt = 0; | ||
571 | |||
572 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | ||
573 | if (!in_scope(domain, n_ptr->addr)) | ||
574 | continue; | ||
575 | if (node_is_up(n_ptr)) | ||
576 | cnt++; | ||
577 | } | ||
578 | return cnt; | ||
579 | } | ||
580 | |||
581 | struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space) | ||
582 | { | ||
583 | u32 domain; | ||
584 | struct sk_buff *buf; | ||
585 | struct node *n_ptr; | ||
586 | struct tipc_node_info node_info; | ||
587 | |||
588 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | ||
589 | return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
590 | |||
591 | domain = *(u32 *)TLV_DATA(req_tlv_area); | ||
592 | domain = ntohl(domain); | ||
593 | if (!addr_domain_valid(domain)) | ||
594 | return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
595 | " (network address)"); | ||
596 | |||
597 | if (!nodes) | ||
598 | return cfg_reply_none(); | ||
599 | |||
600 | /* For now, get space for all other nodes | ||
601 | (will need to modify this when slave nodes are supported */ | ||
602 | |||
603 | buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * | ||
604 | (tipc_max_nodes - 1)); | ||
605 | if (!buf) | ||
606 | return NULL; | ||
607 | |||
608 | /* Add TLVs for all nodes in scope */ | ||
609 | |||
610 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | ||
611 | if (!in_scope(domain, n_ptr->addr)) | ||
612 | continue; | ||
613 | node_info.addr = htonl(n_ptr->addr); | ||
614 | node_info.up = htonl(node_is_up(n_ptr)); | ||
615 | cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, | ||
616 | &node_info, sizeof(node_info)); | ||
617 | } | ||
618 | |||
619 | return buf; | ||
620 | } | ||
621 | |||
622 | struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space) | ||
623 | { | ||
624 | u32 domain; | ||
625 | struct sk_buff *buf; | ||
626 | struct node *n_ptr; | ||
627 | struct tipc_link_info link_info; | ||
628 | |||
629 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | ||
630 | return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | ||
631 | |||
632 | domain = *(u32 *)TLV_DATA(req_tlv_area); | ||
633 | domain = ntohl(domain); | ||
634 | if (!addr_domain_valid(domain)) | ||
635 | return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | ||
636 | " (network address)"); | ||
637 | |||
638 | if (!nodes) | ||
639 | return cfg_reply_none(); | ||
640 | |||
641 | /* For now, get space for 2 links to all other nodes + bcast link | ||
642 | (will need to modify this when slave nodes are supported */ | ||
643 | |||
644 | buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * | ||
645 | (2 * (tipc_max_nodes - 1) + 1)); | ||
646 | if (!buf) | ||
647 | return NULL; | ||
648 | |||
649 | /* Add TLV for broadcast link */ | ||
650 | |||
651 | link_info.dest = tipc_own_addr & 0xfffff00; | ||
652 | link_info.dest = htonl(link_info.dest); | ||
653 | link_info.up = htonl(1); | ||
654 | sprintf(link_info.str, bc_link_name); | ||
655 | cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); | ||
656 | |||
657 | /* Add TLVs for any other links in scope */ | ||
658 | |||
659 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | ||
660 | u32 i; | ||
661 | |||
662 | if (!in_scope(domain, n_ptr->addr)) | ||
663 | continue; | ||
664 | for (i = 0; i < MAX_BEARERS; i++) { | ||
665 | if (!n_ptr->links[i]) | ||
666 | continue; | ||
667 | link_info.dest = htonl(n_ptr->addr); | ||
668 | link_info.up = htonl(link_is_up(n_ptr->links[i])); | ||
669 | strcpy(link_info.str, n_ptr->links[i]->name); | ||
670 | cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, | ||
671 | &link_info, sizeof(link_info)); | ||
672 | } | ||
673 | } | ||
674 | |||
675 | return buf; | ||
676 | } | ||