aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/tipc/addr.c10
-rw-r--r--net/tipc/addr.h4
-rw-r--r--net/tipc/bcast.c162
-rw-r--r--net/tipc/bcast.h44
-rw-r--r--net/tipc/bearer.c170
-rw-r--r--net/tipc/bearer.h38
-rw-r--r--net/tipc/cluster.c154
-rw-r--r--net/tipc/cluster.h40
-rw-r--r--net/tipc/config.c218
-rw-r--r--net/tipc/config.h38
-rw-r--r--net/tipc/core.c86
-rw-r--r--net/tipc/core.h30
-rw-r--r--net/tipc/dbg.c112
-rw-r--r--net/tipc/dbg.h18
-rw-r--r--net/tipc/discover.c52
-rw-r--r--net/tipc/discover.h16
-rw-r--r--net/tipc/eth_media.c8
-rw-r--r--net/tipc/handler.c6
-rw-r--r--net/tipc/link.c602
-rw-r--r--net/tipc/link.h73
-rw-r--r--net/tipc/msg.c19
-rw-r--r--net/tipc/msg.h16
-rw-r--r--net/tipc/name_distr.c92
-rw-r--r--net/tipc/name_distr.h10
-rw-r--r--net/tipc/name_table.c206
-rw-r--r--net/tipc/name_table.h26
-rw-r--r--net/tipc/net.c126
-rw-r--r--net/tipc/net.h20
-rw-r--r--net/tipc/netlink.c16
-rw-r--r--net/tipc/node.c215
-rw-r--r--net/tipc/node.h50
-rw-r--r--net/tipc/node_subscr.c20
-rw-r--r--net/tipc/node_subscr.h6
-rw-r--r--net/tipc/port.c274
-rw-r--r--net/tipc/port.h58
-rw-r--r--net/tipc/ref.c72
-rw-r--r--net/tipc/ref.h38
-rw-r--r--net/tipc/socket.c10
-rw-r--r--net/tipc/subscr.c58
-rw-r--r--net/tipc/subscr.h24
-rw-r--r--net/tipc/user_reg.c22
-rw-r--r--net/tipc/user_reg.h8
-rw-r--r--net/tipc/zone.c40
-rw-r--r--net/tipc/zone.h20
44 files changed, 1654 insertions, 1673 deletions
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index eca22260c98c..0be25e175b93 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -47,7 +47,7 @@ u32 tipc_get_addr(void)
47} 47}
48 48
49/** 49/**
50 * addr_domain_valid - validates a network domain address 50 * tipc_addr_domain_valid - validates a network domain address
51 * 51 *
52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>, 52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
53 * where Z, C, and N are non-zero and do not exceed the configured limits. 53 * where Z, C, and N are non-zero and do not exceed the configured limits.
@@ -55,7 +55,7 @@ u32 tipc_get_addr(void)
55 * Returns 1 if domain address is valid, otherwise 0 55 * Returns 1 if domain address is valid, otherwise 0
56 */ 56 */
57 57
58int addr_domain_valid(u32 addr) 58int tipc_addr_domain_valid(u32 addr)
59{ 59{
60 u32 n = tipc_node(addr); 60 u32 n = tipc_node(addr);
61 u32 c = tipc_cluster(addr); 61 u32 c = tipc_cluster(addr);
@@ -79,7 +79,7 @@ int addr_domain_valid(u32 addr)
79} 79}
80 80
81/** 81/**
82 * addr_node_valid - validates a proposed network address for this node 82 * tipc_addr_node_valid - validates a proposed network address for this node
83 * 83 *
84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed 84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
85 * the configured limits. 85 * the configured limits.
@@ -87,8 +87,8 @@ int addr_domain_valid(u32 addr)
87 * Returns 1 if address can be used, otherwise 0 87 * Returns 1 if address can be used, otherwise 0
88 */ 88 */
89 89
90int addr_node_valid(u32 addr) 90int tipc_addr_node_valid(u32 addr)
91{ 91{
92 return (addr_domain_valid(addr) && tipc_node(addr)); 92 return (tipc_addr_domain_valid(addr) && tipc_node(addr));
93} 93}
94 94
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index 02ca71783e2e..bcfebb3cbbf3 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -122,7 +122,7 @@ static inline char *addr_string_fill(char *string, u32 addr)
122 return string; 122 return string;
123} 123}
124 124
125int addr_domain_valid(u32); 125int tipc_addr_domain_valid(u32);
126int addr_node_valid(u32 addr); 126int tipc_addr_node_valid(u32 addr);
127 127
128#endif 128#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index af9743a52d6c..a7b04f397c12 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -104,7 +104,7 @@ static struct bclink *bclink = NULL;
104static struct link *bcl = NULL; 104static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; 105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
106 106
107char bc_link_name[] = "multicast-link"; 107char tipc_bclink_name[] = "multicast-link";
108 108
109 109
110static inline u32 buf_seqno(struct sk_buff *buf) 110static inline u32 buf_seqno(struct sk_buff *buf)
@@ -178,19 +178,19 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
178 buf = buf->next; 178 buf = buf->next;
179 } 179 }
180 if (buf != NULL) 180 if (buf != NULL)
181 link_retransmit(bcl, buf, mod(to - after)); 181 tipc_link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock); 182 spin_unlock_bh(&bc_lock);
183} 183}
184 184
185/** 185/**
186 * bclink_acknowledge - handle acknowledgement of broadcast packets 186 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
187 * @n_ptr: node that sent acknowledgement info 187 * @n_ptr: node that sent acknowledgement info
188 * @acked: broadcast sequence # that has been acknowledged 188 * @acked: broadcast sequence # that has been acknowledged
189 * 189 *
190 * Node is locked, bc_lock unlocked. 190 * Node is locked, bc_lock unlocked.
191 */ 191 */
192 192
193void bclink_acknowledge(struct node *n_ptr, u32 acked) 193void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
194{ 194{
195 struct sk_buff *crs; 195 struct sk_buff *crs;
196 struct sk_buff *next; 196 struct sk_buff *next;
@@ -226,16 +226,16 @@ void bclink_acknowledge(struct node *n_ptr, u32 acked)
226 /* Try resolving broadcast link congestion, if necessary */ 226 /* Try resolving broadcast link congestion, if necessary */
227 227
228 if (unlikely(bcl->next_out)) 228 if (unlikely(bcl->next_out))
229 link_push_queue(bcl); 229 tipc_link_push_queue(bcl);
230 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 230 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
231 link_wakeup_ports(bcl, 0); 231 tipc_link_wakeup_ports(bcl, 0);
232 spin_unlock_bh(&bc_lock); 232 spin_unlock_bh(&bc_lock);
233} 233}
234 234
235/** 235/**
236 * bclink_send_ack - unicast an ACK msg 236 * bclink_send_ack - unicast an ACK msg
237 * 237 *
238 * net_lock and node lock set 238 * tipc_net_lock and node lock set
239 */ 239 */
240 240
241static void bclink_send_ack(struct node *n_ptr) 241static void bclink_send_ack(struct node *n_ptr)
@@ -243,13 +243,13 @@ static void bclink_send_ack(struct node *n_ptr)
243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
244 244
245 if (l_ptr != NULL) 245 if (l_ptr != NULL)
246 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 246 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
247} 247}
248 248
249/** 249/**
250 * bclink_send_nack- broadcast a NACK msg 250 * bclink_send_nack- broadcast a NACK msg
251 * 251 *
252 * net_lock and node lock set 252 * tipc_net_lock and node lock set
253 */ 253 */
254 254
255static void bclink_send_nack(struct node *n_ptr) 255static void bclink_send_nack(struct node *n_ptr)
@@ -271,11 +271,11 @@ static void bclink_send_nack(struct node *n_ptr)
271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
272 msg_set_bcast_tag(msg, tipc_own_tag); 272 msg_set_bcast_tag(msg, tipc_own_tag);
273 273
274 if (bearer_send(&bcbearer->bearer, buf, 0)) { 274 if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
275 bcl->stats.sent_nacks++; 275 bcl->stats.sent_nacks++;
276 buf_discard(buf); 276 buf_discard(buf);
277 } else { 277 } else {
278 bearer_schedule(bcl->b_ptr, bcl); 278 tipc_bearer_schedule(bcl->b_ptr, bcl);
279 bcl->proto_msg_queue = buf; 279 bcl->proto_msg_queue = buf;
280 bcl->stats.bearer_congs++; 280 bcl->stats.bearer_congs++;
281 } 281 }
@@ -291,12 +291,12 @@ static void bclink_send_nack(struct node *n_ptr)
291} 291}
292 292
293/** 293/**
294 * bclink_check_gap - send a NACK if a sequence gap exists 294 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
295 * 295 *
296 * net_lock and node lock set 296 * tipc_net_lock and node lock set
297 */ 297 */
298 298
299void bclink_check_gap(struct node *n_ptr, u32 last_sent) 299void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
300{ 300{
301 if (!n_ptr->bclink.supported || 301 if (!n_ptr->bclink.supported ||
302 less_eq(last_sent, mod(n_ptr->bclink.last_in))) 302 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
@@ -309,19 +309,19 @@ void bclink_check_gap(struct node *n_ptr, u32 last_sent)
309} 309}
310 310
311/** 311/**
312 * bclink_peek_nack - process a NACK msg meant for another node 312 * tipc_bclink_peek_nack - process a NACK msg meant for another node
313 * 313 *
314 * Only net_lock set. 314 * Only tipc_net_lock set.
315 */ 315 */
316 316
317void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) 317void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
318{ 318{
319 struct node *n_ptr = node_find(dest); 319 struct node *n_ptr = tipc_node_find(dest);
320 u32 my_after, my_to; 320 u32 my_after, my_to;
321 321
322 if (unlikely(!n_ptr || !node_is_up(n_ptr))) 322 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
323 return; 323 return;
324 node_lock(n_ptr); 324 tipc_node_lock(n_ptr);
325 /* 325 /*
326 * Modify gap to suppress unnecessary NACKs from this node 326 * Modify gap to suppress unnecessary NACKs from this node
327 */ 327 */
@@ -364,20 +364,20 @@ void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
364 bclink_set_gap(n_ptr); 364 bclink_set_gap(n_ptr);
365 } 365 }
366 } 366 }
367 node_unlock(n_ptr); 367 tipc_node_unlock(n_ptr);
368} 368}
369 369
370/** 370/**
371 * bclink_send_msg - broadcast a packet to all nodes in cluster 371 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
372 */ 372 */
373 373
374int bclink_send_msg(struct sk_buff *buf) 374int tipc_bclink_send_msg(struct sk_buff *buf)
375{ 375{
376 int res; 376 int res;
377 377
378 spin_lock_bh(&bc_lock); 378 spin_lock_bh(&bc_lock);
379 379
380 res = link_send_buf(bcl, buf); 380 res = tipc_link_send_buf(bcl, buf);
381 if (unlikely(res == -ELINKCONG)) 381 if (unlikely(res == -ELINKCONG))
382 buf_discard(buf); 382 buf_discard(buf);
383 else 383 else
@@ -393,22 +393,22 @@ int bclink_send_msg(struct sk_buff *buf)
393} 393}
394 394
395/** 395/**
396 * bclink_recv_pkt - receive a broadcast packet, and deliver upwards 396 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
397 * 397 *
398 * net_lock is read_locked, no other locks set 398 * tipc_net_lock is read_locked, no other locks set
399 */ 399 */
400 400
401void bclink_recv_pkt(struct sk_buff *buf) 401void tipc_bclink_recv_pkt(struct sk_buff *buf)
402{ 402{
403 struct tipc_msg *msg = buf_msg(buf); 403 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = node_find(msg_prevnode(msg)); 404 struct node* node = tipc_node_find(msg_prevnode(msg));
405 u32 next_in; 405 u32 next_in;
406 u32 seqno; 406 u32 seqno;
407 struct sk_buff *deferred; 407 struct sk_buff *deferred;
408 408
409 msg_dbg(msg, "<BC<<<"); 409 msg_dbg(msg, "<BC<<<");
410 410
411 if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || 411 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
412 (msg_mc_netid(msg) != tipc_net_id))) { 412 (msg_mc_netid(msg) != tipc_net_id))) {
413 buf_discard(buf); 413 buf_discard(buf);
414 return; 414 return;
@@ -417,14 +417,14 @@ void bclink_recv_pkt(struct sk_buff *buf)
417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
418 msg_dbg(msg, "<BCNACK<<<"); 418 msg_dbg(msg, "<BCNACK<<<");
419 if (msg_destnode(msg) == tipc_own_addr) { 419 if (msg_destnode(msg) == tipc_own_addr) {
420 node_lock(node); 420 tipc_node_lock(node);
421 bclink_acknowledge(node, msg_bcast_ack(msg)); 421 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
422 node_unlock(node); 422 tipc_node_unlock(node);
423 bcl->stats.recv_nacks++; 423 bcl->stats.recv_nacks++;
424 bclink_retransmit_pkt(msg_bcgap_after(msg), 424 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg)); 425 msg_bcgap_to(msg));
426 } else { 426 } else {
427 bclink_peek_nack(msg_destnode(msg), 427 tipc_bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg), 428 msg_bcast_tag(msg),
429 msg_bcgap_after(msg), 429 msg_bcgap_after(msg),
430 msg_bcgap_to(msg)); 430 msg_bcgap_to(msg));
@@ -433,7 +433,7 @@ void bclink_recv_pkt(struct sk_buff *buf)
433 return; 433 return;
434 } 434 }
435 435
436 node_lock(node); 436 tipc_node_lock(node);
437receive: 437receive:
438 deferred = node->bclink.deferred_head; 438 deferred = node->bclink.deferred_head;
439 next_in = mod(node->bclink.last_in + 1); 439 next_in = mod(node->bclink.last_in + 1);
@@ -448,26 +448,26 @@ receive:
448 bcl->stats.sent_acks++; 448 bcl->stats.sent_acks++;
449 } 449 }
450 if (likely(msg_isdata(msg))) { 450 if (likely(msg_isdata(msg))) {
451 node_unlock(node); 451 tipc_node_unlock(node);
452 port_recv_mcast(buf, NULL); 452 tipc_port_recv_mcast(buf, NULL);
453 } else if (msg_user(msg) == MSG_BUNDLER) { 453 } else if (msg_user(msg) == MSG_BUNDLER) {
454 bcl->stats.recv_bundles++; 454 bcl->stats.recv_bundles++;
455 bcl->stats.recv_bundled += msg_msgcnt(msg); 455 bcl->stats.recv_bundled += msg_msgcnt(msg);
456 node_unlock(node); 456 tipc_node_unlock(node);
457 link_recv_bundle(buf); 457 tipc_link_recv_bundle(buf);
458 } else if (msg_user(msg) == MSG_FRAGMENTER) { 458 } else if (msg_user(msg) == MSG_FRAGMENTER) {
459 bcl->stats.recv_fragments++; 459 bcl->stats.recv_fragments++;
460 if (link_recv_fragment(&node->bclink.defragm, 460 if (tipc_link_recv_fragment(&node->bclink.defragm,
461 &buf, &msg)) 461 &buf, &msg))
462 bcl->stats.recv_fragmented++; 462 bcl->stats.recv_fragmented++;
463 node_unlock(node); 463 tipc_node_unlock(node);
464 net_route_msg(buf); 464 tipc_net_route_msg(buf);
465 } else { 465 } else {
466 node_unlock(node); 466 tipc_node_unlock(node);
467 net_route_msg(buf); 467 tipc_net_route_msg(buf);
468 } 468 }
469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
470 node_lock(node); 470 tipc_node_lock(node);
471 buf = deferred; 471 buf = deferred;
472 msg = buf_msg(buf); 472 msg = buf_msg(buf);
473 node->bclink.deferred_head = deferred->next; 473 node->bclink.deferred_head = deferred->next;
@@ -478,9 +478,9 @@ receive:
478 u32 gap_after = node->bclink.gap_after; 478 u32 gap_after = node->bclink.gap_after;
479 u32 gap_to = node->bclink.gap_to; 479 u32 gap_to = node->bclink.gap_to;
480 480
481 if (link_defer_pkt(&node->bclink.deferred_head, 481 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
482 &node->bclink.deferred_tail, 482 &node->bclink.deferred_tail,
483 buf)) { 483 buf)) {
484 node->bclink.nack_sync++; 484 node->bclink.nack_sync++;
485 bcl->stats.deferred_recv++; 485 bcl->stats.deferred_recv++;
486 if (seqno == mod(gap_after + 1)) 486 if (seqno == mod(gap_after + 1))
@@ -497,10 +497,10 @@ receive:
497 bcl->stats.duplicates++; 497 bcl->stats.duplicates++;
498 buf_discard(buf); 498 buf_discard(buf);
499 } 499 }
500 node_unlock(node); 500 tipc_node_unlock(node);
501} 501}
502 502
503u32 bclink_get_last_sent(void) 503u32 tipc_bclink_get_last_sent(void)
504{ 504{
505 u32 last_sent = mod(bcl->next_out_no - 1); 505 u32 last_sent = mod(bcl->next_out_no - 1);
506 506
@@ -509,15 +509,15 @@ u32 bclink_get_last_sent(void)
509 return last_sent; 509 return last_sent;
510} 510}
511 511
512u32 bclink_acks_missing(struct node *n_ptr) 512u32 tipc_bclink_acks_missing(struct node *n_ptr)
513{ 513{
514 return (n_ptr->bclink.supported && 514 return (n_ptr->bclink.supported &&
515 (bclink_get_last_sent() != n_ptr->bclink.acked)); 515 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
516} 516}
517 517
518 518
519/** 519/**
520 * bcbearer_send - send a packet through the broadcast pseudo-bearer 520 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
521 * 521 *
522 * Send through as many bearers as necessary to reach all nodes 522 * Send through as many bearers as necessary to reach all nodes
523 * that support TIPC multicasting. 523 * that support TIPC multicasting.
@@ -525,9 +525,9 @@ u32 bclink_acks_missing(struct node *n_ptr)
525 * Returns 0 if packet sent successfully, non-zero if not 525 * Returns 0 if packet sent successfully, non-zero if not
526 */ 526 */
527 527
528int bcbearer_send(struct sk_buff *buf, 528int tipc_bcbearer_send(struct sk_buff *buf,
529 struct tipc_bearer *unused1, 529 struct tipc_bearer *unused1,
530 struct tipc_media_addr *unused2) 530 struct tipc_media_addr *unused2)
531{ 531{
532 static int send_count = 0; 532 static int send_count = 0;
533 533
@@ -541,8 +541,8 @@ int bcbearer_send(struct sk_buff *buf,
541 if (likely(!msg_non_seq(buf_msg(buf)))) { 541 if (likely(!msg_non_seq(buf_msg(buf)))) {
542 struct tipc_msg *msg; 542 struct tipc_msg *msg;
543 543
544 assert(cluster_bcast_nodes.count != 0); 544 assert(tipc_cltr_bcast_nodes.count != 0);
545 bcbuf_set_acks(buf, cluster_bcast_nodes.count); 545 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
546 msg = buf_msg(buf); 546 msg = buf_msg(buf);
547 msg_set_non_seq(msg); 547 msg_set_non_seq(msg);
548 msg_set_mc_netid(msg, tipc_net_id); 548 msg_set_mc_netid(msg, tipc_net_id);
@@ -555,7 +555,7 @@ int bcbearer_send(struct sk_buff *buf,
555 555
556 /* Send buffer over bearers until all targets reached */ 556 /* Send buffer over bearers until all targets reached */
557 557
558 remains = cluster_bcast_nodes; 558 remains = tipc_cltr_bcast_nodes;
559 559
560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
561 struct bearer *p = bcbearer->bpairs[bp_index].primary; 561 struct bearer *p = bcbearer->bpairs[bp_index].primary;
@@ -564,7 +564,7 @@ int bcbearer_send(struct sk_buff *buf,
564 if (!p) 564 if (!p)
565 break; /* no more bearers to try */ 565 break; /* no more bearers to try */
566 566
567 nmap_diff(&remains, &p->nodes, &remains_new); 567 tipc_nmap_diff(&remains, &p->nodes, &remains_new);
568 if (remains_new.count == remains.count) 568 if (remains_new.count == remains.count)
569 continue; /* bearer pair doesn't add anything */ 569 continue; /* bearer pair doesn't add anything */
570 570
@@ -597,10 +597,10 @@ update:
597} 597}
598 598
599/** 599/**
600 * bcbearer_sort - create sets of bearer pairs used by broadcast bearer 600 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
601 */ 601 */
602 602
603void bcbearer_sort(void) 603void tipc_bcbearer_sort(void)
604{ 604{
605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
606 struct bcbearer_pair *bp_curr; 606 struct bcbearer_pair *bp_curr;
@@ -614,7 +614,7 @@ void bcbearer_sort(void)
614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
615 615
616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
617 struct bearer *b = &bearers[b_index]; 617 struct bearer *b = &tipc_bearers[b_index];
618 618
619 if (!b->active || !b->nodes.count) 619 if (!b->active || !b->nodes.count)
620 continue; 620 continue;
@@ -638,8 +638,8 @@ void bcbearer_sort(void)
638 bp_curr->primary = bp_temp[pri].primary; 638 bp_curr->primary = bp_temp[pri].primary;
639 639
640 if (bp_temp[pri].secondary) { 640 if (bp_temp[pri].secondary) {
641 if (nmap_equal(&bp_temp[pri].primary->nodes, 641 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
642 &bp_temp[pri].secondary->nodes)) { 642 &bp_temp[pri].secondary->nodes)) {
643 bp_curr->secondary = bp_temp[pri].secondary; 643 bp_curr->secondary = bp_temp[pri].secondary;
644 } else { 644 } else {
645 bp_curr++; 645 bp_curr++;
@@ -654,14 +654,14 @@ void bcbearer_sort(void)
654} 654}
655 655
656/** 656/**
657 * bcbearer_push - resolve bearer congestion 657 * tipc_bcbearer_push - resolve bearer congestion
658 * 658 *
659 * Forces bclink to push out any unsent packets, until all packets are gone 659 * Forces bclink to push out any unsent packets, until all packets are gone
660 * or congestion reoccurs. 660 * or congestion reoccurs.
661 * No locks set when function called 661 * No locks set when function called
662 */ 662 */
663 663
664void bcbearer_push(void) 664void tipc_bcbearer_push(void)
665{ 665{
666 struct bearer *b_ptr; 666 struct bearer *b_ptr;
667 667
@@ -669,20 +669,20 @@ void bcbearer_push(void)
669 b_ptr = &bcbearer->bearer; 669 b_ptr = &bcbearer->bearer;
670 if (b_ptr->publ.blocked) { 670 if (b_ptr->publ.blocked) {
671 b_ptr->publ.blocked = 0; 671 b_ptr->publ.blocked = 0;
672 bearer_lock_push(b_ptr); 672 tipc_bearer_lock_push(b_ptr);
673 } 673 }
674 spin_unlock_bh(&bc_lock); 674 spin_unlock_bh(&bc_lock);
675} 675}
676 676
677 677
678int bclink_stats(char *buf, const u32 buf_size) 678int tipc_bclink_stats(char *buf, const u32 buf_size)
679{ 679{
680 struct print_buf pb; 680 struct print_buf pb;
681 681
682 if (!bcl) 682 if (!bcl)
683 return 0; 683 return 0;
684 684
685 printbuf_init(&pb, buf, buf_size); 685 tipc_printbuf_init(&pb, buf, buf_size);
686 686
687 spin_lock_bh(&bc_lock); 687 spin_lock_bh(&bc_lock);
688 688
@@ -718,10 +718,10 @@ int bclink_stats(char *buf, const u32 buf_size)
718 : 0); 718 : 0);
719 719
720 spin_unlock_bh(&bc_lock); 720 spin_unlock_bh(&bc_lock);
721 return printbuf_validate(&pb); 721 return tipc_printbuf_validate(&pb);
722} 722}
723 723
724int bclink_reset_stats(void) 724int tipc_bclink_reset_stats(void)
725{ 725{
726 if (!bcl) 726 if (!bcl)
727 return -ENOPROTOOPT; 727 return -ENOPROTOOPT;
@@ -732,7 +732,7 @@ int bclink_reset_stats(void)
732 return TIPC_OK; 732 return TIPC_OK;
733} 733}
734 734
735int bclink_set_queue_limits(u32 limit) 735int tipc_bclink_set_queue_limits(u32 limit)
736{ 736{
737 if (!bcl) 737 if (!bcl)
738 return -ENOPROTOOPT; 738 return -ENOPROTOOPT;
@@ -740,12 +740,12 @@ int bclink_set_queue_limits(u32 limit)
740 return -EINVAL; 740 return -EINVAL;
741 741
742 spin_lock_bh(&bc_lock); 742 spin_lock_bh(&bc_lock);
743 link_set_queue_limits(bcl, limit); 743 tipc_link_set_queue_limits(bcl, limit);
744 spin_unlock_bh(&bc_lock); 744 spin_unlock_bh(&bc_lock);
745 return TIPC_OK; 745 return TIPC_OK;
746} 746}
747 747
748int bclink_init(void) 748int tipc_bclink_init(void)
749{ 749{
750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC); 750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); 751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
@@ -762,7 +762,7 @@ int bclink_init(void)
762 memset(bcbearer, 0, sizeof(struct bcbearer)); 762 memset(bcbearer, 0, sizeof(struct bcbearer));
763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links); 763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
764 bcbearer->bearer.media = &bcbearer->media; 764 bcbearer->bearer.media = &bcbearer->media;
765 bcbearer->media.send_msg = bcbearer_send; 765 bcbearer->media.send_msg = tipc_bcbearer_send;
766 sprintf(bcbearer->media.name, "tipc-multicast"); 766 sprintf(bcbearer->media.name, "tipc-multicast");
767 767
768 bcl = &bclink->link; 768 bcl = &bclink->link;
@@ -772,27 +772,27 @@ int bclink_init(void)
772 bclink->node.lock = SPIN_LOCK_UNLOCKED; 772 bclink->node.lock = SPIN_LOCK_UNLOCKED;
773 bcl->owner = &bclink->node; 773 bcl->owner = &bclink->node;
774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
775 link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 775 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
776 bcl->b_ptr = &bcbearer->bearer; 776 bcl->b_ptr = &bcbearer->bearer;
777 bcl->state = WORKING_WORKING; 777 bcl->state = WORKING_WORKING;
778 sprintf(bcl->name, bc_link_name); 778 sprintf(bcl->name, tipc_bclink_name);
779 779
780 if (BCLINK_LOG_BUF_SIZE) { 780 if (BCLINK_LOG_BUF_SIZE) {
781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC); 781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
782 782
783 if (!pb) 783 if (!pb)
784 goto nomem; 784 goto nomem;
785 printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); 785 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
786 } 786 }
787 787
788 return TIPC_OK; 788 return TIPC_OK;
789} 789}
790 790
791void bclink_stop(void) 791void tipc_bclink_stop(void)
792{ 792{
793 spin_lock_bh(&bc_lock); 793 spin_lock_bh(&bc_lock);
794 if (bcbearer) { 794 if (bcbearer) {
795 link_stop(bcl); 795 tipc_link_stop(bcl);
796 if (BCLINK_LOG_BUF_SIZE) 796 if (BCLINK_LOG_BUF_SIZE)
797 kfree(bcl->print_buf.buf); 797 kfree(bcl->print_buf.buf);
798 bcl = NULL; 798 bcl = NULL;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 5430e524b4f9..0e3be2ab3307 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -70,14 +70,14 @@ struct port_list {
70 70
71struct node; 71struct node;
72 72
73extern char bc_link_name[]; 73extern char tipc_bclink_name[];
74 74
75 75
76/** 76/**
77 * nmap_get - determine if node exists in a node map 77 * nmap_get - determine if node exists in a node map
78 */ 78 */
79 79
80static inline int nmap_get(struct node_map *nm_ptr, u32 node) 80static inline int tipc_nmap_get(struct node_map *nm_ptr, u32 node)
81{ 81{
82 int n = tipc_node(node); 82 int n = tipc_node(node);
83 int w = n / WSIZE; 83 int w = n / WSIZE;
@@ -90,7 +90,7 @@ static inline int nmap_get(struct node_map *nm_ptr, u32 node)
90 * nmap_add - add a node to a node map 90 * nmap_add - add a node to a node map
91 */ 91 */
92 92
93static inline void nmap_add(struct node_map *nm_ptr, u32 node) 93static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
94{ 94{
95 int n = tipc_node(node); 95 int n = tipc_node(node);
96 int w = n / WSIZE; 96 int w = n / WSIZE;
@@ -106,7 +106,7 @@ static inline void nmap_add(struct node_map *nm_ptr, u32 node)
106 * nmap_remove - remove a node from a node map 106 * nmap_remove - remove a node from a node map
107 */ 107 */
108 108
109static inline void nmap_remove(struct node_map *nm_ptr, u32 node) 109static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node)
110{ 110{
111 int n = tipc_node(node); 111 int n = tipc_node(node);
112 int w = n / WSIZE; 112 int w = n / WSIZE;
@@ -122,7 +122,7 @@ static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
122 * nmap_equal - test for equality of node maps 122 * nmap_equal - test for equality of node maps
123 */ 123 */
124 124
125static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b) 125static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
126{ 126{
127 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 127 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
128} 128}
@@ -134,8 +134,8 @@ static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B) 134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
135 */ 135 */
136 136
137static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b, 137static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
138 struct node_map *nm_diff) 138 struct node_map *nm_diff)
139{ 139{
140 int stop = sizeof(nm_a->map) / sizeof(u32); 140 int stop = sizeof(nm_a->map) / sizeof(u32);
141 int w; 141 int w;
@@ -159,7 +159,7 @@ static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
159 * port_list_add - add a port to a port list, ensuring no duplicates 159 * port_list_add - add a port to a port list, ensuring no duplicates
160 */ 160 */
161 161
162static inline void port_list_add(struct port_list *pl_ptr, u32 port) 162static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
163{ 163{
164 struct port_list *item = pl_ptr; 164 struct port_list *item = pl_ptr;
165 int i; 165 int i;
@@ -194,7 +194,7 @@ static inline void port_list_add(struct port_list *pl_ptr, u32 port)
194 * Note: First item is on stack, so it doesn't need to be released 194 * Note: First item is on stack, so it doesn't need to be released
195 */ 195 */
196 196
197static inline void port_list_free(struct port_list *pl_ptr) 197static inline void tipc_port_list_free(struct port_list *pl_ptr)
198{ 198{
199 struct port_list *item; 199 struct port_list *item;
200 struct port_list *next; 200 struct port_list *next;
@@ -206,18 +206,18 @@ static inline void port_list_free(struct port_list *pl_ptr)
206} 206}
207 207
208 208
209int bclink_init(void); 209int tipc_bclink_init(void);
210void bclink_stop(void); 210void tipc_bclink_stop(void);
211void bclink_acknowledge(struct node *n_ptr, u32 acked); 211void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked);
212int bclink_send_msg(struct sk_buff *buf); 212int tipc_bclink_send_msg(struct sk_buff *buf);
213void bclink_recv_pkt(struct sk_buff *buf); 213void tipc_bclink_recv_pkt(struct sk_buff *buf);
214u32 bclink_get_last_sent(void); 214u32 tipc_bclink_get_last_sent(void);
215u32 bclink_acks_missing(struct node *n_ptr); 215u32 tipc_bclink_acks_missing(struct node *n_ptr);
216void bclink_check_gap(struct node *n_ptr, u32 seqno); 216void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno);
217int bclink_stats(char *stats_buf, const u32 buf_size); 217int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
218int bclink_reset_stats(void); 218int tipc_bclink_reset_stats(void);
219int bclink_set_queue_limits(u32 limit); 219int tipc_bclink_set_queue_limits(u32 limit);
220void bcbearer_sort(void); 220void tipc_bcbearer_sort(void);
221void bcbearer_push(void); 221void tipc_bcbearer_push(void);
222 222
223#endif 223#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 02b6cf6ab7a4..64dcb0f3a8b2 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -48,7 +48,7 @@
48static struct media *media_list = 0; 48static struct media *media_list = 0;
49static u32 media_count = 0; 49static u32 media_count = 0;
50 50
51struct bearer *bearers = 0; 51struct bearer *tipc_bearers = 0;
52 52
53/** 53/**
54 * media_name_valid - validate media name 54 * media_name_valid - validate media name
@@ -107,7 +107,7 @@ int tipc_register_media(u32 media_type,
107 u32 i; 107 u32 i;
108 int res = -EINVAL; 108 int res = -EINVAL;
109 109
110 write_lock_bh(&net_lock); 110 write_lock_bh(&tipc_net_lock);
111 if (!media_list) 111 if (!media_list)
112 goto exit; 112 goto exit;
113 113
@@ -165,15 +165,15 @@ int tipc_register_media(u32 media_type,
165 dbg("Media <%s> registered\n", name); 165 dbg("Media <%s> registered\n", name);
166 res = 0; 166 res = 0;
167exit: 167exit:
168 write_unlock_bh(&net_lock); 168 write_unlock_bh(&tipc_net_lock);
169 return res; 169 return res;
170} 170}
171 171
172/** 172/**
173 * media_addr_printf - record media address in print buffer 173 * tipc_media_addr_printf - record media address in print buffer
174 */ 174 */
175 175
176void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a) 176void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
177{ 177{
178 struct media *m_ptr; 178 struct media *m_ptr;
179 u32 media_type; 179 u32 media_type;
@@ -201,25 +201,25 @@ void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
201} 201}
202 202
203/** 203/**
204 * media_get_names - record names of registered media in buffer 204 * tipc_media_get_names - record names of registered media in buffer
205 */ 205 */
206 206
207struct sk_buff *media_get_names(void) 207struct sk_buff *tipc_media_get_names(void)
208{ 208{
209 struct sk_buff *buf; 209 struct sk_buff *buf;
210 struct media *m_ptr; 210 struct media *m_ptr;
211 int i; 211 int i;
212 212
213 buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME)); 213 buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
214 if (!buf) 214 if (!buf)
215 return NULL; 215 return NULL;
216 216
217 read_lock_bh(&net_lock); 217 read_lock_bh(&tipc_net_lock);
218 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { 218 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
219 cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name, 219 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
220 strlen(m_ptr->name) + 1); 220 strlen(m_ptr->name) + 1);
221 } 221 }
222 read_unlock_bh(&net_lock); 222 read_unlock_bh(&tipc_net_lock);
223 return buf; 223 return buf;
224} 224}
225 225
@@ -283,7 +283,7 @@ static struct bearer *bearer_find(const char *name)
283 struct bearer *b_ptr; 283 struct bearer *b_ptr;
284 u32 i; 284 u32 i;
285 285
286 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) { 286 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name))) 287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
288 return b_ptr; 288 return b_ptr;
289 } 289 }
@@ -291,16 +291,16 @@ static struct bearer *bearer_find(const char *name)
291} 291}
292 292
293/** 293/**
294 * bearer_find - locates bearer object with matching interface name 294 * tipc_bearer_find_interface - locates bearer object with matching interface name
295 */ 295 */
296 296
297struct bearer *bearer_find_interface(const char *if_name) 297struct bearer *tipc_bearer_find_interface(const char *if_name)
298{ 298{
299 struct bearer *b_ptr; 299 struct bearer *b_ptr;
300 char *b_if_name; 300 char *b_if_name;
301 u32 i; 301 u32 i;
302 302
303 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) { 303 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
304 if (!b_ptr->active) 304 if (!b_ptr->active)
305 continue; 305 continue;
306 b_if_name = strchr(b_ptr->publ.name, ':') + 1; 306 b_if_name = strchr(b_ptr->publ.name, ':') + 1;
@@ -311,54 +311,54 @@ struct bearer *bearer_find_interface(const char *if_name)
311} 311}
312 312
313/** 313/**
314 * bearer_get_names - record names of bearers in buffer 314 * tipc_bearer_get_names - record names of bearers in buffer
315 */ 315 */
316 316
317struct sk_buff *bearer_get_names(void) 317struct sk_buff *tipc_bearer_get_names(void)
318{ 318{
319 struct sk_buff *buf; 319 struct sk_buff *buf;
320 struct media *m_ptr; 320 struct media *m_ptr;
321 struct bearer *b_ptr; 321 struct bearer *b_ptr;
322 int i, j; 322 int i, j;
323 323
324 buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME)); 324 buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
325 if (!buf) 325 if (!buf)
326 return NULL; 326 return NULL;
327 327
328 read_lock_bh(&net_lock); 328 read_lock_bh(&tipc_net_lock);
329 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) { 329 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
330 for (j = 0; j < MAX_BEARERS; j++) { 330 for (j = 0; j < MAX_BEARERS; j++) {
331 b_ptr = &bearers[j]; 331 b_ptr = &tipc_bearers[j];
332 if (b_ptr->active && (b_ptr->media == m_ptr)) { 332 if (b_ptr->active && (b_ptr->media == m_ptr)) {
333 cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME, 333 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
334 b_ptr->publ.name, 334 b_ptr->publ.name,
335 strlen(b_ptr->publ.name) + 1); 335 strlen(b_ptr->publ.name) + 1);
336 } 336 }
337 } 337 }
338 } 338 }
339 read_unlock_bh(&net_lock); 339 read_unlock_bh(&tipc_net_lock);
340 return buf; 340 return buf;
341} 341}
342 342
343void bearer_add_dest(struct bearer *b_ptr, u32 dest) 343void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
344{ 344{
345 nmap_add(&b_ptr->nodes, dest); 345 tipc_nmap_add(&b_ptr->nodes, dest);
346 disc_update_link_req(b_ptr->link_req); 346 tipc_disc_update_link_req(b_ptr->link_req);
347 bcbearer_sort(); 347 tipc_bcbearer_sort();
348} 348}
349 349
350void bearer_remove_dest(struct bearer *b_ptr, u32 dest) 350void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
351{ 351{
352 nmap_remove(&b_ptr->nodes, dest); 352 tipc_nmap_remove(&b_ptr->nodes, dest);
353 disc_update_link_req(b_ptr->link_req); 353 tipc_disc_update_link_req(b_ptr->link_req);
354 bcbearer_sort(); 354 tipc_bcbearer_sort();
355} 355}
356 356
357/* 357/*
358 * bearer_push(): Resolve bearer congestion. Force the waiting 358 * bearer_push(): Resolve bearer congestion. Force the waiting
359 * links to push out their unsent packets, one packet per link 359 * links to push out their unsent packets, one packet per link
360 * per iteration, until all packets are gone or congestion reoccurs. 360 * per iteration, until all packets are gone or congestion reoccurs.
361 * 'net_lock' is read_locked when this function is called 361 * 'tipc_net_lock' is read_locked when this function is called
362 * bearer.lock must be taken before calling 362 * bearer.lock must be taken before calling
363 * Returns binary true(1) ore false(0) 363 * Returns binary true(1) ore false(0)
364 */ 364 */
@@ -372,7 +372,7 @@ static int bearer_push(struct bearer *b_ptr)
372 372
373 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) { 373 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
374 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) { 374 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
375 res = link_push_packet(ln); 375 res = tipc_link_push_packet(ln);
376 if (res == PUSH_FAILED) 376 if (res == PUSH_FAILED)
377 break; 377 break;
378 if (res == PUSH_FINISHED) 378 if (res == PUSH_FINISHED)
@@ -382,7 +382,7 @@ static int bearer_push(struct bearer *b_ptr)
382 return list_empty(&b_ptr->cong_links); 382 return list_empty(&b_ptr->cong_links);
383} 383}
384 384
385void bearer_lock_push(struct bearer *b_ptr) 385void tipc_bearer_lock_push(struct bearer *b_ptr)
386{ 386{
387 int res; 387 int res;
388 388
@@ -390,7 +390,7 @@ void bearer_lock_push(struct bearer *b_ptr)
390 res = bearer_push(b_ptr); 390 res = bearer_push(b_ptr);
391 spin_unlock_bh(&b_ptr->publ.lock); 391 spin_unlock_bh(&b_ptr->publ.lock);
392 if (res) 392 if (res)
393 bcbearer_push(); 393 tipc_bcbearer_push();
394} 394}
395 395
396 396
@@ -405,7 +405,7 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
405 spin_lock_bh(&b_ptr->publ.lock); 405 spin_lock_bh(&b_ptr->publ.lock);
406 b_ptr->continue_count++; 406 b_ptr->continue_count++;
407 if (!list_empty(&b_ptr->cong_links)) 407 if (!list_empty(&b_ptr->cong_links))
408 k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr); 408 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
409 b_ptr->publ.blocked = 0; 409 b_ptr->publ.blocked = 0;
410 spin_unlock_bh(&b_ptr->publ.lock); 410 spin_unlock_bh(&b_ptr->publ.lock);
411} 411}
@@ -414,11 +414,11 @@ void tipc_continue(struct tipc_bearer *tb_ptr)
414 * Schedule link for sending of messages after the bearer 414 * Schedule link for sending of messages after the bearer
415 * has been deblocked by 'continue()'. This method is called 415 * has been deblocked by 'continue()'. This method is called
416 * when somebody tries to send a message via this link while 416 * when somebody tries to send a message via this link while
417 * the bearer is congested. 'net_lock' is in read_lock here 417 * the bearer is congested. 'tipc_net_lock' is in read_lock here
418 * bearer.lock is busy 418 * bearer.lock is busy
419 */ 419 */
420 420
421static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr) 421static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
422{ 422{
423 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); 423 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
424} 424}
@@ -427,24 +427,24 @@ static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
427 * Schedule link for sending of messages after the bearer 427 * Schedule link for sending of messages after the bearer
428 * has been deblocked by 'continue()'. This method is called 428 * has been deblocked by 'continue()'. This method is called
429 * when somebody tries to send a message via this link while 429 * when somebody tries to send a message via this link while
430 * the bearer is congested. 'net_lock' is in read_lock here, 430 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
431 * bearer.lock is free 431 * bearer.lock is free
432 */ 432 */
433 433
434void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr) 434void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
435{ 435{
436 spin_lock_bh(&b_ptr->publ.lock); 436 spin_lock_bh(&b_ptr->publ.lock);
437 bearer_schedule_unlocked(b_ptr, l_ptr); 437 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
438 spin_unlock_bh(&b_ptr->publ.lock); 438 spin_unlock_bh(&b_ptr->publ.lock);
439} 439}
440 440
441 441
442/* 442/*
443 * bearer_resolve_congestion(): Check if there is bearer congestion, 443 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
444 * and if there is, try to resolve it before returning. 444 * and if there is, try to resolve it before returning.
445 * 'net_lock' is read_locked when this function is called 445 * 'tipc_net_lock' is read_locked when this function is called
446 */ 446 */
447int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr) 447int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
448{ 448{
449 int res = 1; 449 int res = 1;
450 450
@@ -452,7 +452,7 @@ int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
452 return 1; 452 return 1;
453 spin_lock_bh(&b_ptr->publ.lock); 453 spin_lock_bh(&b_ptr->publ.lock);
454 if (!bearer_push(b_ptr)) { 454 if (!bearer_push(b_ptr)) {
455 bearer_schedule_unlocked(b_ptr, l_ptr); 455 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
456 res = 0; 456 res = 0;
457 } 457 }
458 spin_unlock_bh(&b_ptr->publ.lock); 458 spin_unlock_bh(&b_ptr->publ.lock);
@@ -479,7 +479,7 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
479 return -ENOPROTOOPT; 479 return -ENOPROTOOPT;
480 480
481 if (!bearer_name_validate(name, &b_name) || 481 if (!bearer_name_validate(name, &b_name) ||
482 !addr_domain_valid(bcast_scope) || 482 !tipc_addr_domain_valid(bcast_scope) ||
483 !in_scope(bcast_scope, tipc_own_addr)) 483 !in_scope(bcast_scope, tipc_own_addr))
484 return -EINVAL; 484 return -EINVAL;
485 485
@@ -488,8 +488,8 @@ int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
488 (priority != TIPC_MEDIA_LINK_PRI)) 488 (priority != TIPC_MEDIA_LINK_PRI))
489 return -EINVAL; 489 return -EINVAL;
490 490
491 write_lock_bh(&net_lock); 491 write_lock_bh(&tipc_net_lock);
492 if (!bearers) 492 if (!tipc_bearers)
493 goto failed; 493 goto failed;
494 494
495 m_ptr = media_find(b_name.media_name); 495 m_ptr = media_find(b_name.media_name);
@@ -505,15 +505,15 @@ restart:
505 bearer_id = MAX_BEARERS; 505 bearer_id = MAX_BEARERS;
506 with_this_prio = 1; 506 with_this_prio = 1;
507 for (i = MAX_BEARERS; i-- != 0; ) { 507 for (i = MAX_BEARERS; i-- != 0; ) {
508 if (!bearers[i].active) { 508 if (!tipc_bearers[i].active) {
509 bearer_id = i; 509 bearer_id = i;
510 continue; 510 continue;
511 } 511 }
512 if (!strcmp(name, bearers[i].publ.name)) { 512 if (!strcmp(name, tipc_bearers[i].publ.name)) {
513 warn("Bearer <%s> already enabled\n", name); 513 warn("Bearer <%s> already enabled\n", name);
514 goto failed; 514 goto failed;
515 } 515 }
516 if ((bearers[i].priority == priority) && 516 if ((tipc_bearers[i].priority == priority) &&
517 (++with_this_prio > 2)) { 517 (++with_this_prio > 2)) {
518 if (priority-- == 0) { 518 if (priority-- == 0) {
519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n", 519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
@@ -530,7 +530,7 @@ restart:
530 goto failed; 530 goto failed;
531 } 531 }
532 532
533 b_ptr = &bearers[bearer_id]; 533 b_ptr = &tipc_bearers[bearer_id];
534 memset(b_ptr, 0, sizeof(struct bearer)); 534 memset(b_ptr, 0, sizeof(struct bearer));
535 535
536 strcpy(b_ptr->publ.name, name); 536 strcpy(b_ptr->publ.name, name);
@@ -549,16 +549,16 @@ restart:
549 INIT_LIST_HEAD(&b_ptr->cong_links); 549 INIT_LIST_HEAD(&b_ptr->cong_links);
550 INIT_LIST_HEAD(&b_ptr->links); 550 INIT_LIST_HEAD(&b_ptr->links);
551 if (m_ptr->bcast) { 551 if (m_ptr->bcast) {
552 b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr, 552 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
553 bcast_scope, 2); 553 bcast_scope, 2);
554 } 554 }
555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED; 555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
556 write_unlock_bh(&net_lock); 556 write_unlock_bh(&tipc_net_lock);
557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
558 name, addr_string_fill(addr_string, bcast_scope), priority); 558 name, addr_string_fill(addr_string, bcast_scope), priority);
559 return 0; 559 return 0;
560failed: 560failed:
561 write_unlock_bh(&net_lock); 561 write_unlock_bh(&tipc_net_lock);
562 return res; 562 return res;
563} 563}
564 564
@@ -576,11 +576,11 @@ int tipc_block_bearer(const char *name)
576 if (tipc_mode != TIPC_NET_MODE) 576 if (tipc_mode != TIPC_NET_MODE)
577 return -ENOPROTOOPT; 577 return -ENOPROTOOPT;
578 578
579 read_lock_bh(&net_lock); 579 read_lock_bh(&tipc_net_lock);
580 b_ptr = bearer_find(name); 580 b_ptr = bearer_find(name);
581 if (!b_ptr) { 581 if (!b_ptr) {
582 warn("Attempt to block unknown bearer <%s>\n", name); 582 warn("Attempt to block unknown bearer <%s>\n", name);
583 read_unlock_bh(&net_lock); 583 read_unlock_bh(&tipc_net_lock);
584 return -EINVAL; 584 return -EINVAL;
585 } 585 }
586 586
@@ -590,11 +590,11 @@ int tipc_block_bearer(const char *name)
590 struct node *n_ptr = l_ptr->owner; 590 struct node *n_ptr = l_ptr->owner;
591 591
592 spin_lock_bh(&n_ptr->lock); 592 spin_lock_bh(&n_ptr->lock);
593 link_reset(l_ptr); 593 tipc_link_reset(l_ptr);
594 spin_unlock_bh(&n_ptr->lock); 594 spin_unlock_bh(&n_ptr->lock);
595 } 595 }
596 spin_unlock_bh(&b_ptr->publ.lock); 596 spin_unlock_bh(&b_ptr->publ.lock);
597 read_unlock_bh(&net_lock); 597 read_unlock_bh(&tipc_net_lock);
598 info("Blocked bearer <%s>\n", name); 598 info("Blocked bearer <%s>\n", name);
599 return TIPC_OK; 599 return TIPC_OK;
600} 600}
@@ -602,7 +602,7 @@ int tipc_block_bearer(const char *name)
602/** 602/**
603 * bearer_disable - 603 * bearer_disable -
604 * 604 *
605 * Note: This routine assumes caller holds net_lock. 605 * Note: This routine assumes caller holds tipc_net_lock.
606 */ 606 */
607 607
608static int bearer_disable(const char *name) 608static int bearer_disable(const char *name)
@@ -620,19 +620,19 @@ static int bearer_disable(const char *name)
620 return -EINVAL; 620 return -EINVAL;
621 } 621 }
622 622
623 disc_stop_link_req(b_ptr->link_req); 623 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->publ.lock); 624 spin_lock_bh(&b_ptr->publ.lock);
625 b_ptr->link_req = NULL; 625 b_ptr->link_req = NULL;
626 b_ptr->publ.blocked = 1; 626 b_ptr->publ.blocked = 1;
627 if (b_ptr->media->disable_bearer) { 627 if (b_ptr->media->disable_bearer) {
628 spin_unlock_bh(&b_ptr->publ.lock); 628 spin_unlock_bh(&b_ptr->publ.lock);
629 write_unlock_bh(&net_lock); 629 write_unlock_bh(&tipc_net_lock);
630 b_ptr->media->disable_bearer(&b_ptr->publ); 630 b_ptr->media->disable_bearer(&b_ptr->publ);
631 write_lock_bh(&net_lock); 631 write_lock_bh(&tipc_net_lock);
632 spin_lock_bh(&b_ptr->publ.lock); 632 spin_lock_bh(&b_ptr->publ.lock);
633 } 633 }
634 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 634 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
635 link_delete(l_ptr); 635 tipc_link_delete(l_ptr);
636 } 636 }
637 spin_unlock_bh(&b_ptr->publ.lock); 637 spin_unlock_bh(&b_ptr->publ.lock);
638 info("Disabled bearer <%s>\n", name); 638 info("Disabled bearer <%s>\n", name);
@@ -644,54 +644,54 @@ int tipc_disable_bearer(const char *name)
644{ 644{
645 int res; 645 int res;
646 646
647 write_lock_bh(&net_lock); 647 write_lock_bh(&tipc_net_lock);
648 res = bearer_disable(name); 648 res = bearer_disable(name);
649 write_unlock_bh(&net_lock); 649 write_unlock_bh(&tipc_net_lock);
650 return res; 650 return res;
651} 651}
652 652
653 653
654 654
655int bearer_init(void) 655int tipc_bearer_init(void)
656{ 656{
657 int res; 657 int res;
658 658
659 write_lock_bh(&net_lock); 659 write_lock_bh(&tipc_net_lock);
660 bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC); 660 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
661 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC); 661 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
662 if (bearers && media_list) { 662 if (tipc_bearers && media_list) {
663 memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer)); 663 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
664 memset(media_list, 0, MAX_MEDIA * sizeof(struct media)); 664 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
665 res = TIPC_OK; 665 res = TIPC_OK;
666 } else { 666 } else {
667 kfree(bearers); 667 kfree(tipc_bearers);
668 kfree(media_list); 668 kfree(media_list);
669 bearers = 0; 669 tipc_bearers = 0;
670 media_list = 0; 670 media_list = 0;
671 res = -ENOMEM; 671 res = -ENOMEM;
672 } 672 }
673 write_unlock_bh(&net_lock); 673 write_unlock_bh(&tipc_net_lock);
674 return res; 674 return res;
675} 675}
676 676
677void bearer_stop(void) 677void tipc_bearer_stop(void)
678{ 678{
679 u32 i; 679 u32 i;
680 680
681 if (!bearers) 681 if (!tipc_bearers)
682 return; 682 return;
683 683
684 for (i = 0; i < MAX_BEARERS; i++) { 684 for (i = 0; i < MAX_BEARERS; i++) {
685 if (bearers[i].active) 685 if (tipc_bearers[i].active)
686 bearers[i].publ.blocked = 1; 686 tipc_bearers[i].publ.blocked = 1;
687 } 687 }
688 for (i = 0; i < MAX_BEARERS; i++) { 688 for (i = 0; i < MAX_BEARERS; i++) {
689 if (bearers[i].active) 689 if (tipc_bearers[i].active)
690 bearer_disable(bearers[i].publ.name); 690 bearer_disable(tipc_bearers[i].publ.name);
691 } 691 }
692 kfree(bearers); 692 kfree(tipc_bearers);
693 kfree(media_list); 693 kfree(media_list);
694 bearers = 0; 694 tipc_bearers = 0;
695 media_list = 0; 695 media_list = 0;
696 media_count = 0; 696 media_count = 0;
697} 697}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 0c40cc2b43cc..c4e7c1c3655b 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -114,26 +114,24 @@ struct bearer_name {
114 114
115struct link; 115struct link;
116 116
117extern struct bearer *bearers; 117extern struct bearer *tipc_bearers;
118 118
119void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a); 119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *media_get_names(void); 120struct sk_buff *tipc_media_get_names(void);
121 121
122struct sk_buff *bearer_get_names(void); 122struct sk_buff *tipc_bearer_get_names(void);
123void bearer_add_dest(struct bearer *b_ptr, u32 dest); 123void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
124void bearer_remove_dest(struct bearer *b_ptr, u32 dest); 124void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
125void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr); 125void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *bearer_find_interface(const char *if_name); 126struct bearer *tipc_bearer_find_interface(const char *if_name);
127int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr); 127int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int bearer_init(void); 128int tipc_bearer_init(void);
129void bearer_stop(void); 129void tipc_bearer_stop(void);
130int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr, 130void tipc_bearer_lock_push(struct bearer *b_ptr);
131 struct tipc_media_addr *dest);
132void bearer_lock_push(struct bearer *b_ptr);
133 131
134 132
135/** 133/**
136 * bearer_send- sends buffer to destination over bearer 134 * tipc_bearer_send- sends buffer to destination over bearer
137 * 135 *
138 * Returns true (1) if successful, or false (0) if unable to send 136 * Returns true (1) if successful, or false (0) if unable to send
139 * 137 *
@@ -150,23 +148,23 @@ void bearer_lock_push(struct bearer *b_ptr);
150 * and let TIPC's link code deal with the undelivered message. 148 * and let TIPC's link code deal with the undelivered message.
151 */ 149 */
152 150
153static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf, 151static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
154 struct tipc_media_addr *dest) 152 struct tipc_media_addr *dest)
155{ 153{
156 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest); 154 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
157} 155}
158 156
159/** 157/**
160 * bearer_congested - determines if bearer is currently congested 158 * tipc_bearer_congested - determines if bearer is currently congested
161 */ 159 */
162 160
163static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr) 161static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
164{ 162{
165 if (unlikely(b_ptr->publ.blocked)) 163 if (unlikely(b_ptr->publ.blocked))
166 return 1; 164 return 1;
167 if (likely(list_empty(&b_ptr->cong_links))) 165 if (likely(list_empty(&b_ptr->cong_links)))
168 return 0; 166 return 0;
169 return !bearer_resolve_congestion(b_ptr, l_ptr); 167 return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
170} 168}
171 169
172#endif 170#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
index f0f7bac51d41..ab974ca19371 100644
--- a/net/tipc/cluster.c
+++ b/net/tipc/cluster.c
@@ -44,15 +44,15 @@
44#include "msg.h" 44#include "msg.h"
45#include "bearer.h" 45#include "bearer.h"
46 46
47void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 47void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper); 48 u32 lower, u32 upper);
49struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest); 49struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50 50
51struct node **local_nodes = 0; 51struct node **tipc_local_nodes = 0;
52struct node_map cluster_bcast_nodes = {0,{0,}}; 52struct node_map tipc_cltr_bcast_nodes = {0,{0,}};
53u32 highest_allowed_slave = 0; 53u32 tipc_highest_allowed_slave = 0;
54 54
55struct cluster *cluster_create(u32 addr) 55struct cluster *tipc_cltr_create(u32 addr)
56{ 56{
57 struct _zone *z_ptr; 57 struct _zone *z_ptr;
58 struct cluster *c_ptr; 58 struct cluster *c_ptr;
@@ -77,16 +77,16 @@ struct cluster *cluster_create(u32 addr)
77 } 77 }
78 memset(c_ptr->nodes, 0, alloc); 78 memset(c_ptr->nodes, 0, alloc);
79 if (in_own_cluster(addr)) 79 if (in_own_cluster(addr))
80 local_nodes = c_ptr->nodes; 80 tipc_local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1; 81 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0; 82 c_ptr->highest_node = 0;
83 83
84 z_ptr = zone_find(tipc_zone(addr)); 84 z_ptr = tipc_zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) { 85 if (z_ptr == NULL) {
86 z_ptr = zone_create(addr); 86 z_ptr = tipc_zone_create(addr);
87 } 87 }
88 if (z_ptr != NULL) { 88 if (z_ptr != NULL) {
89 zone_attach_cluster(z_ptr, c_ptr); 89 tipc_zone_attach_cluster(z_ptr, c_ptr);
90 c_ptr->owner = z_ptr; 90 c_ptr->owner = z_ptr;
91 } 91 }
92 else { 92 else {
@@ -97,23 +97,23 @@ struct cluster *cluster_create(u32 addr)
97 return c_ptr; 97 return c_ptr;
98} 98}
99 99
100void cluster_delete(struct cluster *c_ptr) 100void tipc_cltr_delete(struct cluster *c_ptr)
101{ 101{
102 u32 n_num; 102 u32 n_num;
103 103
104 if (!c_ptr) 104 if (!c_ptr)
105 return; 105 return;
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) { 106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 node_delete(c_ptr->nodes[n_num]); 107 tipc_node_delete(c_ptr->nodes[n_num]);
108 } 108 }
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) { 109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 node_delete(c_ptr->nodes[n_num]); 110 tipc_node_delete(c_ptr->nodes[n_num]);
111 } 111 }
112 kfree(c_ptr->nodes); 112 kfree(c_ptr->nodes);
113 kfree(c_ptr); 113 kfree(c_ptr);
114} 114}
115 115
116u32 cluster_next_node(struct cluster *c_ptr, u32 addr) 116u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
117{ 117{
118 struct node *n_ptr; 118 struct node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1; 119 u32 n_num = tipc_node(addr) + 1;
@@ -122,24 +122,24 @@ u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
122 return addr; 122 return addr;
123 for (; n_num <= c_ptr->highest_node; n_num++) { 123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num]; 124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && node_has_active_links(n_ptr)) 125 if (n_ptr && tipc_node_has_active_links(n_ptr))
126 return n_ptr->addr; 126 return n_ptr->addr;
127 } 127 }
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) { 128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num]; 129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && node_has_active_links(n_ptr)) 130 if (n_ptr && tipc_node_has_active_links(n_ptr))
131 return n_ptr->addr; 131 return n_ptr->addr;
132 } 132 }
133 return 0; 133 return 0;
134} 134}
135 135
136void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr) 136void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
137{ 137{
138 u32 n_num = tipc_node(n_ptr->addr); 138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes; 139 u32 max_n_num = tipc_max_nodes;
140 140
141 if (in_own_cluster(n_ptr->addr)) 141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = highest_allowed_slave; 142 max_n_num = tipc_highest_allowed_slave;
143 assert(n_num > 0); 143 assert(n_num > 0);
144 assert(n_num <= max_n_num); 144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == 0); 145 assert(c_ptr->nodes[n_num] == 0);
@@ -149,12 +149,12 @@ void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
149} 149}
150 150
151/** 151/**
152 * cluster_select_router - select router to a cluster 152 * tipc_cltr_select_router - select router to a cluster
153 * 153 *
154 * Uses deterministic and fair algorithm. 154 * Uses deterministic and fair algorithm.
155 */ 155 */
156 156
157u32 cluster_select_router(struct cluster *c_ptr, u32 ref) 157u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
158{ 158{
159 u32 n_num; 159 u32 n_num;
160 u32 ulim = c_ptr->highest_node; 160 u32 ulim = c_ptr->highest_node;
@@ -174,29 +174,29 @@ u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
174 174
175 /* Lookup upwards with wrap-around */ 175 /* Lookup upwards with wrap-around */
176 do { 176 do {
177 if (node_is_up(c_ptr->nodes[n_num])) 177 if (tipc_node_is_up(c_ptr->nodes[n_num]))
178 break; 178 break;
179 } while (++n_num <= ulim); 179 } while (++n_num <= ulim);
180 if (n_num > ulim) { 180 if (n_num > ulim) {
181 n_num = 1; 181 n_num = 1;
182 do { 182 do {
183 if (node_is_up(c_ptr->nodes[n_num])) 183 if (tipc_node_is_up(c_ptr->nodes[n_num]))
184 break; 184 break;
185 } while (++n_num < tstart); 185 } while (++n_num < tstart);
186 if (n_num == tstart) 186 if (n_num == tstart)
187 return 0; 187 return 0;
188 } 188 }
189 assert(n_num <= ulim); 189 assert(n_num <= ulim);
190 return node_select_router(c_ptr->nodes[n_num], ref); 190 return tipc_node_select_router(c_ptr->nodes[n_num], ref);
191} 191}
192 192
193/** 193/**
194 * cluster_select_node - select destination node within a remote cluster 194 * tipc_cltr_select_node - select destination node within a remote cluster
195 * 195 *
196 * Uses deterministic and fair algorithm. 196 * Uses deterministic and fair algorithm.
197 */ 197 */
198 198
199struct node *cluster_select_node(struct cluster *c_ptr, u32 selector) 199struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
200{ 200{
201 u32 n_num; 201 u32 n_num;
202 u32 mask = tipc_max_nodes; 202 u32 mask = tipc_max_nodes;
@@ -215,11 +215,11 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
215 215
216 /* Lookup upwards with wrap-around */ 216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) { 217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (node_has_active_links(c_ptr->nodes[n_num])) 218 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num]; 219 return c_ptr->nodes[n_num];
220 } 220 }
221 for (n_num = 1; n_num < start_entry; n_num++) { 221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (node_has_active_links(c_ptr->nodes[n_num])) 222 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num]; 223 return c_ptr->nodes[n_num];
224 } 224 }
225 return 0; 225 return 0;
@@ -229,7 +229,7 @@ struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
229 * Routing table management: See description in node.c 229 * Routing table management: See description in node.c
230 */ 230 */
231 231
232struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest) 232struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
233{ 233{
234 u32 size = INT_H_SIZE + data_size; 234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size); 235 struct sk_buff *buf = buf_acquire(size);
@@ -243,39 +243,39 @@ struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
243 return buf; 243 return buf;
244} 244}
245 245
246void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, 246void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper) 247 u32 lower, u32 upper)
248{ 248{
249 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr); 249 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg; 250 struct tipc_msg *msg;
251 251
252 if (buf) { 252 if (buf) {
253 msg = buf_msg(buf); 253 msg = buf_msg(buf);
254 msg_set_remote_node(msg, dest); 254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION); 255 msg_set_type(msg, ROUTE_ADDITION);
256 cluster_multicast(c_ptr, buf, lower, upper); 256 tipc_cltr_multicast(c_ptr, buf, lower, upper);
257 } else { 257 } else {
258 warn("Memory squeeze: broadcast of new route failed\n"); 258 warn("Memory squeeze: broadcast of new route failed\n");
259 } 259 }
260} 260}
261 261
262void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, 262void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper) 263 u32 lower, u32 upper)
264{ 264{
265 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr); 265 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg; 266 struct tipc_msg *msg;
267 267
268 if (buf) { 268 if (buf) {
269 msg = buf_msg(buf); 269 msg = buf_msg(buf);
270 msg_set_remote_node(msg, dest); 270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL); 271 msg_set_type(msg, ROUTE_REMOVAL);
272 cluster_multicast(c_ptr, buf, lower, upper); 272 tipc_cltr_multicast(c_ptr, buf, lower, upper);
273 } else { 273 } else {
274 warn("Memory squeeze: broadcast of lost route failed\n"); 274 warn("Memory squeeze: broadcast of lost route failed\n");
275 } 275 }
276} 276}
277 277
278void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest) 278void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
279{ 279{
280 struct sk_buff *buf; 280 struct sk_buff *buf;
281 struct tipc_msg *msg; 281 struct tipc_msg *msg;
@@ -288,21 +288,21 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
288 assert(in_own_cluster(c_ptr->addr)); 288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE) 289 if (highest <= LOWEST_SLAVE)
290 return; 290 return;
291 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1, 291 buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 c_ptr->addr); 292 c_ptr->addr);
293 if (buf) { 293 if (buf) {
294 msg = buf_msg(buf); 294 msg = buf_msg(buf);
295 msg_set_remote_node(msg, c_ptr->addr); 295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE); 296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) { 297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] && 298 if (c_ptr->nodes[n_num] &&
299 node_has_active_links(c_ptr->nodes[n_num])) { 299 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1; 300 send = 1;
301 msg_set_dataoctet(msg, n_num); 301 msg_set_dataoctet(msg, n_num);
302 } 302 }
303 } 303 }
304 if (send) 304 if (send)
305 link_send(buf, dest, dest); 305 tipc_link_send(buf, dest, dest);
306 else 306 else
307 buf_discard(buf); 307 buf_discard(buf);
308 } else { 308 } else {
@@ -310,7 +310,7 @@ void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
310 } 310 }
311} 311}
312 312
313void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest) 313void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
314{ 314{
315 struct sk_buff *buf; 315 struct sk_buff *buf;
316 struct tipc_msg *msg; 316 struct tipc_msg *msg;
@@ -323,20 +323,20 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
323 assert(!is_slave(dest)); 323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest)); 324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node; 325 highest = c_ptr->highest_node;
326 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr); 326 buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
327 if (buf) { 327 if (buf) {
328 msg = buf_msg(buf); 328 msg = buf_msg(buf);
329 msg_set_remote_node(msg, c_ptr->addr); 329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE); 330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) { 331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] && 332 if (c_ptr->nodes[n_num] &&
333 node_has_active_links(c_ptr->nodes[n_num])) { 333 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1; 334 send = 1;
335 msg_set_dataoctet(msg, n_num); 335 msg_set_dataoctet(msg, n_num);
336 } 336 }
337 } 337 }
338 if (send) 338 if (send)
339 link_send(buf, dest, dest); 339 tipc_link_send(buf, dest, dest);
340 else 340 else
341 buf_discard(buf); 341 buf_discard(buf);
342 } else { 342 } else {
@@ -344,7 +344,7 @@ void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
344 } 344 }
345} 345}
346 346
347void cluster_send_local_routes(struct cluster *c_ptr, u32 dest) 347void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
348{ 348{
349 struct sk_buff *buf; 349 struct sk_buff *buf;
350 struct tipc_msg *msg; 350 struct tipc_msg *msg;
@@ -354,20 +354,20 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
354 354
355 assert(is_slave(dest)); 355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr)); 356 assert(in_own_cluster(c_ptr->addr));
357 buf = cluster_prepare_routing_msg(highest, c_ptr->addr); 357 buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
358 if (buf) { 358 if (buf) {
359 msg = buf_msg(buf); 359 msg = buf_msg(buf);
360 msg_set_remote_node(msg, c_ptr->addr); 360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE); 361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) { 362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] && 363 if (c_ptr->nodes[n_num] &&
364 node_has_active_links(c_ptr->nodes[n_num])) { 364 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1; 365 send = 1;
366 msg_set_dataoctet(msg, n_num); 366 msg_set_dataoctet(msg, n_num);
367 } 367 }
368 } 368 }
369 if (send) 369 if (send)
370 link_send(buf, dest, dest); 370 tipc_link_send(buf, dest, dest);
371 else 371 else
372 buf_discard(buf); 372 buf_discard(buf);
373 } else { 373 } else {
@@ -375,7 +375,7 @@ void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
375 } 375 }
376} 376}
377 377
378void cluster_recv_routing_table(struct sk_buff *buf) 378void tipc_cltr_recv_routing_table(struct sk_buff *buf)
379{ 379{
380 struct tipc_msg *msg = buf_msg(buf); 380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr; 381 struct cluster *c_ptr;
@@ -388,9 +388,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
388 u32 c_num; 388 u32 c_num;
389 u32 n_num; 389 u32 n_num;
390 390
391 c_ptr = cluster_find(rem_node); 391 c_ptr = tipc_cltr_find(rem_node);
392 if (!c_ptr) { 392 if (!c_ptr) {
393 c_ptr = cluster_create(rem_node); 393 c_ptr = tipc_cltr_create(rem_node);
394 if (!c_ptr) { 394 if (!c_ptr) {
395 buf_discard(buf); 395 buf_discard(buf);
396 return; 396 return;
@@ -412,10 +412,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
412 u32 addr = tipc_addr(z_num, c_num, n_num); 412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num]; 413 n_ptr = c_ptr->nodes[n_num];
414 if (!n_ptr) { 414 if (!n_ptr) {
415 n_ptr = node_create(addr); 415 n_ptr = tipc_node_create(addr);
416 } 416 }
417 if (n_ptr) 417 if (n_ptr)
418 node_add_router(n_ptr, router); 418 tipc_node_add_router(n_ptr, router);
419 } 419 }
420 } 420 }
421 break; 421 break;
@@ -428,10 +428,10 @@ void cluster_recv_routing_table(struct sk_buff *buf)
428 u32 addr = tipc_addr(z_num, c_num, slave_num); 428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num]; 429 n_ptr = c_ptr->nodes[slave_num];
430 if (!n_ptr) { 430 if (!n_ptr) {
431 n_ptr = node_create(addr); 431 n_ptr = tipc_node_create(addr);
432 } 432 }
433 if (n_ptr) 433 if (n_ptr)
434 node_add_router(n_ptr, router); 434 tipc_node_add_router(n_ptr, router);
435 } 435 }
436 } 436 }
437 break; 437 break;
@@ -445,9 +445,9 @@ void cluster_recv_routing_table(struct sk_buff *buf)
445 } 445 }
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr) 447 if (!n_ptr)
448 n_ptr = node_create(rem_node); 448 n_ptr = tipc_node_create(rem_node);
449 if (n_ptr) 449 if (n_ptr)
450 node_add_router(n_ptr, router); 450 tipc_node_add_router(n_ptr, router);
451 break; 451 break;
452 case ROUTE_REMOVAL: 452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) { 453 if (!is_slave(tipc_own_addr)) {
@@ -459,7 +459,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
459 } 459 }
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)]; 460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr) 461 if (n_ptr)
462 node_remove_router(n_ptr, router); 462 tipc_node_remove_router(n_ptr, router);
463 break; 463 break;
464 default: 464 default:
465 assert(!"Illegal routing manager message received\n"); 465 assert(!"Illegal routing manager message received\n");
@@ -467,7 +467,7 @@ void cluster_recv_routing_table(struct sk_buff *buf)
467 buf_discard(buf); 467 buf_discard(buf);
468} 468}
469 469
470void cluster_remove_as_router(struct cluster *c_ptr, u32 router) 470void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
471{ 471{
472 u32 start_entry; 472 u32 start_entry;
473 u32 tstop; 473 u32 tstop;
@@ -486,17 +486,17 @@ void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
486 486
487 for (n_num = start_entry; n_num <= tstop; n_num++) { 487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) { 488 if (c_ptr->nodes[n_num]) {
489 node_remove_router(c_ptr->nodes[n_num], router); 489 tipc_node_remove_router(c_ptr->nodes[n_num], router);
490 } 490 }
491 } 491 }
492} 492}
493 493
494/** 494/**
495 * cluster_multicast - multicast message to local nodes 495 * tipc_cltr_multicast - multicast message to local nodes
496 */ 496 */
497 497
498void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf, 498void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper) 499 u32 lower, u32 upper)
500{ 500{
501 struct sk_buff *buf_copy; 501 struct sk_buff *buf_copy;
502 struct node *n_ptr; 502 struct node *n_ptr;
@@ -505,9 +505,9 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
505 505
506 assert(lower <= upper); 506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) || 507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave))); 508 ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) || 509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave))); 510 ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr)); 511 assert(in_own_cluster(c_ptr->addr));
512 512
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node; 513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
@@ -515,22 +515,22 @@ void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
515 tstop = upper; 515 tstop = upper;
516 for (n_num = lower; n_num <= tstop; n_num++) { 516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num]; 517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && node_has_active_links(n_ptr)) { 518 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC); 519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL) 520 if (buf_copy == NULL)
521 break; 521 break;
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 link_send(buf_copy, n_ptr->addr, n_ptr->addr); 523 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
524 } 524 }
525 } 525 }
526 buf_discard(buf); 526 buf_discard(buf);
527} 527}
528 528
529/** 529/**
530 * cluster_broadcast - broadcast message to all nodes within cluster 530 * tipc_cltr_broadcast - broadcast message to all nodes within cluster
531 */ 531 */
532 532
533void cluster_broadcast(struct sk_buff *buf) 533void tipc_cltr_broadcast(struct sk_buff *buf)
534{ 534{
535 struct sk_buff *buf_copy; 535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr; 536 struct cluster *c_ptr;
@@ -541,7 +541,7 @@ void cluster_broadcast(struct sk_buff *buf)
541 u32 node_type; 541 u32 node_type;
542 542
543 if (tipc_mode == TIPC_NET_MODE) { 543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = cluster_find(tipc_own_addr); 544 c_ptr = tipc_cltr_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */ 545 assert(in_own_cluster(c_ptr->addr)); /* For now */
546 546
547 /* Send to standard nodes, then repeat loop sending to slaves */ 547 /* Send to standard nodes, then repeat loop sending to slaves */
@@ -550,14 +550,14 @@ void cluster_broadcast(struct sk_buff *buf)
550 for (node_type = 1; node_type <= 2; node_type++) { 550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) { 551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num]; 552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && node_has_active_links(n_ptr)) { 553 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC); 554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL) 555 if (buf_copy == NULL)
556 goto exit; 556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy), 557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr); 558 n_ptr->addr);
559 link_send(buf_copy, n_ptr->addr, 559 tipc_link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr); 560 n_ptr->addr);
561 } 561 }
562 } 562 }
563 tstart = LOWEST_SLAVE; 563 tstart = LOWEST_SLAVE;
@@ -568,9 +568,9 @@ exit:
568 buf_discard(buf); 568 buf_discard(buf);
569} 569}
570 570
571int cluster_init(void) 571int tipc_cltr_init(void)
572{ 572{
573 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves; 573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM; 574 return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
575} 575}
576 576
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
index 1ffb095991df..9963642e1058 100644
--- a/net/tipc/cluster.h
+++ b/net/tipc/cluster.h
@@ -60,29 +60,29 @@ struct cluster {
60}; 60};
61 61
62 62
63extern struct node **local_nodes; 63extern struct node **tipc_local_nodes;
64extern u32 highest_allowed_slave; 64extern u32 tipc_highest_allowed_slave;
65extern struct node_map cluster_bcast_nodes; 65extern struct node_map tipc_cltr_bcast_nodes;
66 66
67void cluster_remove_as_router(struct cluster *c_ptr, u32 router); 67void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
68void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest); 68void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
69struct node *cluster_select_node(struct cluster *c_ptr, u32 selector); 69struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
70u32 cluster_select_router(struct cluster *c_ptr, u32 ref); 70u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
71void cluster_recv_routing_table(struct sk_buff *buf); 71void tipc_cltr_recv_routing_table(struct sk_buff *buf);
72struct cluster *cluster_create(u32 addr); 72struct cluster *tipc_cltr_create(u32 addr);
73void cluster_delete(struct cluster *c_ptr); 73void tipc_cltr_delete(struct cluster *c_ptr);
74void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr); 74void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr);
75void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest); 75void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
76void cluster_broadcast(struct sk_buff *buf); 76void tipc_cltr_broadcast(struct sk_buff *buf);
77int cluster_init(void); 77int tipc_cltr_init(void);
78u32 cluster_next_node(struct cluster *c_ptr, u32 addr); 78u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr);
79void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi); 79void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
80void cluster_send_local_routes(struct cluster *c_ptr, u32 dest); 80void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
81void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi); 81void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
82 82
83static inline struct cluster *cluster_find(u32 addr) 83static inline struct cluster *tipc_cltr_find(u32 addr)
84{ 84{
85 struct _zone *z_ptr = zone_find(addr); 85 struct _zone *z_ptr = tipc_zone_find(addr);
86 86
87 if (z_ptr) 87 if (z_ptr)
88 return z_ptr->clusters[1]; 88 return z_ptr->clusters[1];
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 8ddef4fce2c2..3c8e6740e5ae 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -70,13 +70,13 @@ static int req_tlv_space; /* request message TLV area size */
70static int rep_headroom; /* reply message headroom to use */ 70static int rep_headroom; /* reply message headroom to use */
71 71
72 72
73void cfg_link_event(u32 addr, char *name, int up) 73void tipc_cfg_link_event(u32 addr, char *name, int up)
74{ 74{
75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */ 75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
76} 76}
77 77
78 78
79struct sk_buff *cfg_reply_alloc(int payload_size) 79struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
80{ 80{
81 struct sk_buff *buf; 81 struct sk_buff *buf;
82 82
@@ -86,14 +86,14 @@ struct sk_buff *cfg_reply_alloc(int payload_size)
86 return buf; 86 return buf;
87} 87}
88 88
89int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 89int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
90 void *tlv_data, int tlv_data_size) 90 void *tlv_data, int tlv_data_size)
91{ 91{
92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail; 92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
93 int new_tlv_space = TLV_SPACE(tlv_data_size); 93 int new_tlv_space = TLV_SPACE(tlv_data_size);
94 94
95 if (skb_tailroom(buf) < new_tlv_space) { 95 if (skb_tailroom(buf) < new_tlv_space) {
96 dbg("cfg_append_tlv unable to append TLV\n"); 96 dbg("tipc_cfg_append_tlv unable to append TLV\n");
97 return 0; 97 return 0;
98 } 98 }
99 skb_put(buf, new_tlv_space); 99 skb_put(buf, new_tlv_space);
@@ -104,28 +104,28 @@ int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
104 return 1; 104 return 1;
105} 105}
106 106
107struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value) 107struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
108{ 108{
109 struct sk_buff *buf; 109 struct sk_buff *buf;
110 u32 value_net; 110 u32 value_net;
111 111
112 buf = cfg_reply_alloc(TLV_SPACE(sizeof(value))); 112 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
113 if (buf) { 113 if (buf) {
114 value_net = htonl(value); 114 value_net = htonl(value);
115 cfg_append_tlv(buf, tlv_type, &value_net, 115 tipc_cfg_append_tlv(buf, tlv_type, &value_net,
116 sizeof(value_net)); 116 sizeof(value_net));
117 } 117 }
118 return buf; 118 return buf;
119} 119}
120 120
121struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string) 121struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
122{ 122{
123 struct sk_buff *buf; 123 struct sk_buff *buf;
124 int string_len = strlen(string) + 1; 124 int string_len = strlen(string) + 1;
125 125
126 buf = cfg_reply_alloc(TLV_SPACE(string_len)); 126 buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
127 if (buf) 127 if (buf)
128 cfg_append_tlv(buf, tlv_type, string, string_len); 128 tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
129 return buf; 129 return buf;
130} 130}
131 131
@@ -246,7 +246,7 @@ static void cfg_cmd_event(struct tipc_cmd_msg *msg,
246 exit: 246 exit:
247 rmsg.result_len = htonl(msg_sect[1].iov_len); 247 rmsg.result_len = htonl(msg_sect[1].iov_len);
248 rmsg.retval = htonl(rv); 248 rmsg.retval = htonl(rv);
249 cfg_respond(msg_sect, 2u, orig); 249 tipc_cfg_respond(msg_sect, 2u, orig);
250} 250}
251#endif 251#endif
252 252
@@ -255,26 +255,26 @@ static struct sk_buff *cfg_enable_bearer(void)
255 struct tipc_bearer_config *args; 255 struct tipc_bearer_config *args;
256 256
257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG)) 257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
258 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 258 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
259 259
260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area); 260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
261 if (tipc_enable_bearer(args->name, 261 if (tipc_enable_bearer(args->name,
262 ntohl(args->detect_scope), 262 ntohl(args->detect_scope),
263 ntohl(args->priority))) 263 ntohl(args->priority)))
264 return cfg_reply_error_string("unable to enable bearer"); 264 return tipc_cfg_reply_error_string("unable to enable bearer");
265 265
266 return cfg_reply_none(); 266 return tipc_cfg_reply_none();
267} 267}
268 268
269static struct sk_buff *cfg_disable_bearer(void) 269static struct sk_buff *cfg_disable_bearer(void)
270{ 270{
271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME)) 271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
272 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 272 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
273 273
274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area))) 274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
275 return cfg_reply_error_string("unable to disable bearer"); 275 return tipc_cfg_reply_error_string("unable to disable bearer");
276 276
277 return cfg_reply_none(); 277 return tipc_cfg_reply_none();
278} 278}
279 279
280static struct sk_buff *cfg_set_own_addr(void) 280static struct sk_buff *cfg_set_own_addr(void)
@@ -282,25 +282,25 @@ static struct sk_buff *cfg_set_own_addr(void)
282 u32 addr; 282 u32 addr;
283 283
284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
285 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 285 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
286 286
287 addr = *(u32 *)TLV_DATA(req_tlv_area); 287 addr = *(u32 *)TLV_DATA(req_tlv_area);
288 addr = ntohl(addr); 288 addr = ntohl(addr);
289 if (addr == tipc_own_addr) 289 if (addr == tipc_own_addr)
290 return cfg_reply_none(); 290 return tipc_cfg_reply_none();
291 if (!addr_node_valid(addr)) 291 if (!tipc_addr_node_valid(addr))
292 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)"); 293 " (node address)");
294 if (tipc_own_addr) 294 if (tipc_own_addr)
295 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)"); 296 " (cannot change node address once assigned)");
297 297
298 spin_unlock_bh(&config_lock); 298 spin_unlock_bh(&config_lock);
299 stop_net(); 299 tipc_core_stop_net();
300 tipc_own_addr = addr; 300 tipc_own_addr = addr;
301 start_net(); 301 tipc_core_start_net();
302 spin_lock_bh(&config_lock); 302 spin_lock_bh(&config_lock);
303 return cfg_reply_none(); 303 return tipc_cfg_reply_none();
304} 304}
305 305
306static struct sk_buff *cfg_set_remote_mng(void) 306static struct sk_buff *cfg_set_remote_mng(void)
@@ -308,12 +308,12 @@ static struct sk_buff *cfg_set_remote_mng(void)
308 u32 value; 308 u32 value;
309 309
310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
311 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 311 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
312 312
313 value = *(u32 *)TLV_DATA(req_tlv_area); 313 value = *(u32 *)TLV_DATA(req_tlv_area);
314 value = ntohl(value); 314 value = ntohl(value);
315 tipc_remote_management = (value != 0); 315 tipc_remote_management = (value != 0);
316 return cfg_reply_none(); 316 return tipc_cfg_reply_none();
317} 317}
318 318
319static struct sk_buff *cfg_set_max_publications(void) 319static struct sk_buff *cfg_set_max_publications(void)
@@ -321,15 +321,15 @@ static struct sk_buff *cfg_set_max_publications(void)
321 u32 value; 321 u32 value;
322 322
323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
324 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 324 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
325 325
326 value = *(u32 *)TLV_DATA(req_tlv_area); 326 value = *(u32 *)TLV_DATA(req_tlv_area);
327 value = ntohl(value); 327 value = ntohl(value);
328 if (value != delimit(value, 1, 65535)) 328 if (value != delimit(value, 1, 65535))
329 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 329 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
330 " (max publications must be 1-65535)"); 330 " (max publications must be 1-65535)");
331 tipc_max_publications = value; 331 tipc_max_publications = value;
332 return cfg_reply_none(); 332 return tipc_cfg_reply_none();
333} 333}
334 334
335static struct sk_buff *cfg_set_max_subscriptions(void) 335static struct sk_buff *cfg_set_max_subscriptions(void)
@@ -337,15 +337,15 @@ static struct sk_buff *cfg_set_max_subscriptions(void)
337 u32 value; 337 u32 value;
338 338
339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
340 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 340 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
341 341
342 value = *(u32 *)TLV_DATA(req_tlv_area); 342 value = *(u32 *)TLV_DATA(req_tlv_area);
343 value = ntohl(value); 343 value = ntohl(value);
344 if (value != delimit(value, 1, 65535)) 344 if (value != delimit(value, 1, 65535))
345 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 345 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
346 " (max subscriptions must be 1-65535"); 346 " (max subscriptions must be 1-65535");
347 tipc_max_subscriptions = value; 347 tipc_max_subscriptions = value;
348 return cfg_reply_none(); 348 return tipc_cfg_reply_none();
349} 349}
350 350
351static struct sk_buff *cfg_set_max_ports(void) 351static struct sk_buff *cfg_set_max_ports(void)
@@ -354,31 +354,31 @@ static struct sk_buff *cfg_set_max_ports(void)
354 u32 value; 354 u32 value;
355 355
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 357 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area); 358 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value); 359 value = ntohl(value);
360 if (value != delimit(value, 127, 65535)) 360 if (value != delimit(value, 127, 65535))
361 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 361 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)"); 362 " (max ports must be 127-65535)");
363 363
364 if (value == tipc_max_ports) 364 if (value == tipc_max_ports)
365 return cfg_reply_none(); 365 return tipc_cfg_reply_none();
366 366
367 if (atomic_read(&tipc_user_count) > 2) 367 if (atomic_read(&tipc_user_count) > 2)
368 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 368 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)"); 369 " (cannot change max ports while TIPC users exist)");
370 370
371 spin_unlock_bh(&config_lock); 371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode(); 372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE) 373 if (orig_mode == TIPC_NET_MODE)
374 stop_net(); 374 tipc_core_stop_net();
375 stop_core(); 375 tipc_core_stop();
376 tipc_max_ports = value; 376 tipc_max_ports = value;
377 start_core(); 377 tipc_core_start();
378 if (orig_mode == TIPC_NET_MODE) 378 if (orig_mode == TIPC_NET_MODE)
379 start_net(); 379 tipc_core_start_net();
380 spin_lock_bh(&config_lock); 380 spin_lock_bh(&config_lock);
381 return cfg_reply_none(); 381 return tipc_cfg_reply_none();
382} 382}
383 383
384static struct sk_buff *set_net_max(int value, int *parameter) 384static struct sk_buff *set_net_max(int value, int *parameter)
@@ -388,13 +388,13 @@ static struct sk_buff *set_net_max(int value, int *parameter)
388 if (value != *parameter) { 388 if (value != *parameter) {
389 orig_mode = tipc_get_mode(); 389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE) 390 if (orig_mode == TIPC_NET_MODE)
391 stop_net(); 391 tipc_core_stop_net();
392 *parameter = value; 392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE) 393 if (orig_mode == TIPC_NET_MODE)
394 start_net(); 394 tipc_core_start_net();
395 } 395 }
396 396
397 return cfg_reply_none(); 397 return tipc_cfg_reply_none();
398} 398}
399 399
400static struct sk_buff *cfg_set_max_zones(void) 400static struct sk_buff *cfg_set_max_zones(void)
@@ -402,12 +402,12 @@ static struct sk_buff *cfg_set_max_zones(void)
402 u32 value; 402 u32 value;
403 403
404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
405 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area); 406 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value); 407 value = ntohl(value);
408 if (value != delimit(value, 1, 255)) 408 if (value != delimit(value, 1, 255))
409 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)"); 410 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones); 411 return set_net_max(value, &tipc_max_zones);
412} 412}
413 413
@@ -416,13 +416,13 @@ static struct sk_buff *cfg_set_max_clusters(void)
416 u32 value; 416 u32 value;
417 417
418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
419 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area); 420 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value); 421 value = ntohl(value);
422 if (value != 1) 422 if (value != 1)
423 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 423 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
424 " (max clusters fixed at 1)"); 424 " (max clusters fixed at 1)");
425 return cfg_reply_none(); 425 return tipc_cfg_reply_none();
426} 426}
427 427
428static struct sk_buff *cfg_set_max_nodes(void) 428static struct sk_buff *cfg_set_max_nodes(void)
@@ -430,12 +430,12 @@ static struct sk_buff *cfg_set_max_nodes(void)
430 u32 value; 430 u32 value;
431 431
432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
433 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 433 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area); 434 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value); 435 value = ntohl(value);
436 if (value != delimit(value, 8, 2047)) 436 if (value != delimit(value, 8, 2047))
437 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 437 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)"); 438 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes); 439 return set_net_max(value, &tipc_max_nodes);
440} 440}
441 441
@@ -444,13 +444,13 @@ static struct sk_buff *cfg_set_max_slaves(void)
444 u32 value; 444 u32 value;
445 445
446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
447 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 447 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
448 value = *(u32 *)TLV_DATA(req_tlv_area); 448 value = *(u32 *)TLV_DATA(req_tlv_area);
449 value = ntohl(value); 449 value = ntohl(value);
450 if (value != 0) 450 if (value != 0)
451 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 451 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
452 " (max secondary nodes fixed at 0)"); 452 " (max secondary nodes fixed at 0)");
453 return cfg_reply_none(); 453 return tipc_cfg_reply_none();
454} 454}
455 455
456static struct sk_buff *cfg_set_netid(void) 456static struct sk_buff *cfg_set_netid(void)
@@ -458,22 +458,22 @@ static struct sk_buff *cfg_set_netid(void)
458 u32 value; 458 u32 value;
459 459
460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
461 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 461 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area); 462 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value); 463 value = ntohl(value);
464 if (value != delimit(value, 1, 9999)) 464 if (value != delimit(value, 1, 9999))
465 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 465 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)"); 466 " (network id must be 1-9999)");
467 467
468 if (tipc_own_addr) 468 if (tipc_own_addr)
469 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 469 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)"); 470 " (cannot change network id once part of network)");
471 471
472 return set_net_max(value, &tipc_net_id); 472 return set_net_max(value, &tipc_net_id);
473} 473}
474 474
475struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, 475struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
476 int request_space, int reply_headroom) 476 int request_space, int reply_headroom)
477{ 477{
478 struct sk_buff *rep_tlv_buf; 478 struct sk_buff *rep_tlv_buf;
479 479
@@ -490,19 +490,19 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
490 if (likely(orig_node == tipc_own_addr)) { 490 if (likely(orig_node == tipc_own_addr)) {
491 /* command is permitted */ 491 /* command is permitted */
492 } else if (cmd >= 0x8000) { 492 } else if (cmd >= 0x8000) {
493 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 493 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
494 " (cannot be done remotely)"); 494 " (cannot be done remotely)");
495 goto exit; 495 goto exit;
496 } else if (!tipc_remote_management) { 496 } else if (!tipc_remote_management) {
497 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE); 497 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
498 goto exit; 498 goto exit;
499 } 499 }
500 else if (cmd >= 0x4000) { 500 else if (cmd >= 0x4000) {
501 u32 domain = 0; 501 u32 domain = 0;
502 502
503 if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) || 503 if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
504 (domain != orig_node)) { 504 (domain != orig_node)) {
505 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR); 505 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
506 goto exit; 506 goto exit;
507 } 507 }
508 } 508 }
@@ -511,50 +511,50 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
511 511
512 switch (cmd) { 512 switch (cmd) {
513 case TIPC_CMD_NOOP: 513 case TIPC_CMD_NOOP:
514 rep_tlv_buf = cfg_reply_none(); 514 rep_tlv_buf = tipc_cfg_reply_none();
515 break; 515 break;
516 case TIPC_CMD_GET_NODES: 516 case TIPC_CMD_GET_NODES:
517 rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space); 517 rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
518 break; 518 break;
519 case TIPC_CMD_GET_LINKS: 519 case TIPC_CMD_GET_LINKS:
520 rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space); 520 rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
521 break; 521 break;
522 case TIPC_CMD_SHOW_LINK_STATS: 522 case TIPC_CMD_SHOW_LINK_STATS:
523 rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space); 523 rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
524 break; 524 break;
525 case TIPC_CMD_RESET_LINK_STATS: 525 case TIPC_CMD_RESET_LINK_STATS:
526 rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space); 526 rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
527 break; 527 break;
528 case TIPC_CMD_SHOW_NAME_TABLE: 528 case TIPC_CMD_SHOW_NAME_TABLE:
529 rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space); 529 rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
530 break; 530 break;
531 case TIPC_CMD_GET_BEARER_NAMES: 531 case TIPC_CMD_GET_BEARER_NAMES:
532 rep_tlv_buf = bearer_get_names(); 532 rep_tlv_buf = tipc_bearer_get_names();
533 break; 533 break;
534 case TIPC_CMD_GET_MEDIA_NAMES: 534 case TIPC_CMD_GET_MEDIA_NAMES:
535 rep_tlv_buf = media_get_names(); 535 rep_tlv_buf = tipc_media_get_names();
536 break; 536 break;
537 case TIPC_CMD_SHOW_PORTS: 537 case TIPC_CMD_SHOW_PORTS:
538 rep_tlv_buf = port_get_ports(); 538 rep_tlv_buf = tipc_port_get_ports();
539 break; 539 break;
540#if 0 540#if 0
541 case TIPC_CMD_SHOW_PORT_STATS: 541 case TIPC_CMD_SHOW_PORT_STATS:
542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space); 542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
543 break; 543 break;
544 case TIPC_CMD_RESET_PORT_STATS: 544 case TIPC_CMD_RESET_PORT_STATS:
545 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED); 545 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
546 break; 546 break;
547#endif 547#endif
548 case TIPC_CMD_SET_LOG_SIZE: 548 case TIPC_CMD_SET_LOG_SIZE:
549 rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space); 549 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space);
550 break; 550 break;
551 case TIPC_CMD_DUMP_LOG: 551 case TIPC_CMD_DUMP_LOG:
552 rep_tlv_buf = log_dump(); 552 rep_tlv_buf = tipc_log_dump();
553 break; 553 break;
554 case TIPC_CMD_SET_LINK_TOL: 554 case TIPC_CMD_SET_LINK_TOL:
555 case TIPC_CMD_SET_LINK_PRI: 555 case TIPC_CMD_SET_LINK_PRI:
556 case TIPC_CMD_SET_LINK_WINDOW: 556 case TIPC_CMD_SET_LINK_WINDOW:
557 rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd); 557 rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
558 break; 558 break;
559 case TIPC_CMD_ENABLE_BEARER: 559 case TIPC_CMD_ENABLE_BEARER:
560 rep_tlv_buf = cfg_enable_bearer(); 560 rep_tlv_buf = cfg_enable_bearer();
@@ -593,31 +593,31 @@ struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
593 rep_tlv_buf = cfg_set_netid(); 593 rep_tlv_buf = cfg_set_netid();
594 break; 594 break;
595 case TIPC_CMD_GET_REMOTE_MNG: 595 case TIPC_CMD_GET_REMOTE_MNG:
596 rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management); 596 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
597 break; 597 break;
598 case TIPC_CMD_GET_MAX_PORTS: 598 case TIPC_CMD_GET_MAX_PORTS:
599 rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports); 599 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
600 break; 600 break;
601 case TIPC_CMD_GET_MAX_PUBL: 601 case TIPC_CMD_GET_MAX_PUBL:
602 rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications); 602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
603 break; 603 break;
604 case TIPC_CMD_GET_MAX_SUBSCR: 604 case TIPC_CMD_GET_MAX_SUBSCR:
605 rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions); 605 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
606 break; 606 break;
607 case TIPC_CMD_GET_MAX_ZONES: 607 case TIPC_CMD_GET_MAX_ZONES:
608 rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones); 608 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
609 break; 609 break;
610 case TIPC_CMD_GET_MAX_CLUSTERS: 610 case TIPC_CMD_GET_MAX_CLUSTERS:
611 rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters); 611 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
612 break; 612 break;
613 case TIPC_CMD_GET_MAX_NODES: 613 case TIPC_CMD_GET_MAX_NODES:
614 rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes); 614 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
615 break; 615 break;
616 case TIPC_CMD_GET_MAX_SLAVES: 616 case TIPC_CMD_GET_MAX_SLAVES:
617 rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves); 617 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
618 break; 618 break;
619 case TIPC_CMD_GET_NETID: 619 case TIPC_CMD_GET_NETID:
620 rep_tlv_buf = cfg_reply_unsigned(tipc_net_id); 620 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
621 break; 621 break;
622 default: 622 default:
623 rep_tlv_buf = NULL; 623 rep_tlv_buf = NULL;
@@ -655,11 +655,11 @@ static void cfg_named_msg_event(void *userdata,
655 655
656 /* Generate reply for request (if can't, return request) */ 656 /* Generate reply for request (if can't, return request) */
657 657
658 rep_buf = cfg_do_cmd(orig->node, 658 rep_buf = tipc_cfg_do_cmd(orig->node,
659 ntohs(req_hdr->tcm_type), 659 ntohs(req_hdr->tcm_type),
660 msg + sizeof(*req_hdr), 660 msg + sizeof(*req_hdr),
661 size - sizeof(*req_hdr), 661 size - sizeof(*req_hdr),
662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr)); 662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
663 if (rep_buf) { 663 if (rep_buf) {
664 skb_push(rep_buf, sizeof(*rep_hdr)); 664 skb_push(rep_buf, sizeof(*rep_hdr));
665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data; 665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
@@ -675,7 +675,7 @@ static void cfg_named_msg_event(void *userdata,
675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len); 675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
676} 676}
677 677
678int cfg_init(void) 678int tipc_cfg_init(void)
679{ 679{
680 struct tipc_name_seq seq; 680 struct tipc_name_seq seq;
681 int res; 681 int res;
@@ -696,7 +696,7 @@ int cfg_init(void)
696 696
697 seq.type = TIPC_CFG_SRV; 697 seq.type = TIPC_CFG_SRV;
698 seq.lower = seq.upper = tipc_own_addr; 698 seq.lower = seq.upper = tipc_own_addr;
699 res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq); 699 res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
700 if (res) 700 if (res)
701 goto failed; 701 goto failed;
702 702
@@ -709,7 +709,7 @@ failed:
709 return res; 709 return res;
710} 710}
711 711
712void cfg_stop(void) 712void tipc_cfg_stop(void)
713{ 713{
714 if (mng.user_ref) { 714 if (mng.user_ref) {
715 tipc_detach(mng.user_ref); 715 tipc_detach(mng.user_ref);
diff --git a/net/tipc/config.h b/net/tipc/config.h
index e74d94f753c9..7a728f954d84 100644
--- a/net/tipc/config.h
+++ b/net/tipc/config.h
@@ -42,38 +42,38 @@
42#include "core.h" 42#include "core.h"
43#include "link.h" 43#include "link.h"
44 44
45struct sk_buff *cfg_reply_alloc(int payload_size); 45struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
46int cfg_append_tlv(struct sk_buff *buf, int tlv_type, 46int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
47 void *tlv_data, int tlv_data_size); 47 void *tlv_data, int tlv_data_size);
48struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value); 48struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
49struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string); 49struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
50 50
51static inline struct sk_buff *cfg_reply_none(void) 51static inline struct sk_buff *tipc_cfg_reply_none(void)
52{ 52{
53 return cfg_reply_alloc(0); 53 return tipc_cfg_reply_alloc(0);
54} 54}
55 55
56static inline struct sk_buff *cfg_reply_unsigned(u32 value) 56static inline struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
57{ 57{
58 return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value); 58 return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
59} 59}
60 60
61static inline struct sk_buff *cfg_reply_error_string(char *string) 61static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
62{ 62{
63 return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string); 63 return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
64} 64}
65 65
66static inline struct sk_buff *cfg_reply_ultra_string(char *string) 66static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
67{ 67{
68 return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string); 68 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
69} 69}
70 70
71struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, 71struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
72 const void *req_tlv_area, int req_tlv_space, 72 const void *req_tlv_area, int req_tlv_space,
73 int headroom); 73 int headroom);
74 74
75void cfg_link_event(u32 addr, char *name, int up); 75void tipc_cfg_link_event(u32 addr, char *name, int up);
76int cfg_init(void); 76int tipc_cfg_init(void);
77void cfg_stop(void); 77void tipc_cfg_stop(void);
78 78
79#endif 79#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 9a1ab178b446..3d0a8ee4e1d3 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -48,14 +48,14 @@
48#include "subscr.h" 48#include "subscr.h"
49#include "config.h" 49#include "config.h"
50 50
51int eth_media_start(void); 51int tipc_eth_media_start(void);
52void eth_media_stop(void); 52void tipc_eth_media_stop(void);
53int handler_start(void); 53int tipc_handler_start(void);
54void handler_stop(void); 54void tipc_handler_stop(void);
55int socket_init(void); 55int tipc_socket_init(void);
56void socket_stop(void); 56void tipc_socket_stop(void);
57int netlink_start(void); 57int tipc_netlink_start(void);
58void netlink_stop(void); 58void tipc_netlink_stop(void);
59 59
60#define MOD_NAME "tipc_start: " 60#define MOD_NAME "tipc_start: "
61 61
@@ -112,56 +112,56 @@ int tipc_get_mode(void)
112} 112}
113 113
114/** 114/**
115 * stop_net - shut down TIPC networking sub-systems 115 * tipc_core_stop_net - shut down TIPC networking sub-systems
116 */ 116 */
117 117
118void stop_net(void) 118void tipc_core_stop_net(void)
119{ 119{
120 eth_media_stop(); 120 tipc_eth_media_stop();
121 tipc_stop_net(); 121 tipc_net_stop();
122} 122}
123 123
124/** 124/**
125 * start_net - start TIPC networking sub-systems 125 * start_net - start TIPC networking sub-systems
126 */ 126 */
127 127
128int start_net(void) 128int tipc_core_start_net(void)
129{ 129{
130 int res; 130 int res;
131 131
132 if ((res = tipc_start_net()) || 132 if ((res = tipc_net_start()) ||
133 (res = eth_media_start())) { 133 (res = tipc_eth_media_start())) {
134 stop_net(); 134 tipc_core_stop_net();
135 } 135 }
136 return res; 136 return res;
137} 137}
138 138
139/** 139/**
140 * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode 140 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
141 */ 141 */
142 142
143void stop_core(void) 143void tipc_core_stop(void)
144{ 144{
145 if (tipc_mode != TIPC_NODE_MODE) 145 if (tipc_mode != TIPC_NODE_MODE)
146 return; 146 return;
147 147
148 tipc_mode = TIPC_NOT_RUNNING; 148 tipc_mode = TIPC_NOT_RUNNING;
149 149
150 netlink_stop(); 150 tipc_netlink_stop();
151 handler_stop(); 151 tipc_handler_stop();
152 cfg_stop(); 152 tipc_cfg_stop();
153 subscr_stop(); 153 tipc_subscr_stop();
154 reg_stop(); 154 tipc_reg_stop();
155 nametbl_stop(); 155 tipc_nametbl_stop();
156 ref_table_stop(); 156 tipc_ref_table_stop();
157 socket_stop(); 157 tipc_socket_stop();
158} 158}
159 159
160/** 160/**
161 * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode 161 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
162 */ 162 */
163 163
164int start_core(void) 164int tipc_core_start(void)
165{ 165{
166 int res; 166 int res;
167 167
@@ -171,16 +171,16 @@ int start_core(void)
171 get_random_bytes(&tipc_random, sizeof(tipc_random)); 171 get_random_bytes(&tipc_random, sizeof(tipc_random));
172 tipc_mode = TIPC_NODE_MODE; 172 tipc_mode = TIPC_NODE_MODE;
173 173
174 if ((res = handler_start()) || 174 if ((res = tipc_handler_start()) ||
175 (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions, 175 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
176 tipc_random)) || 176 tipc_random)) ||
177 (res = reg_start()) || 177 (res = tipc_reg_start()) ||
178 (res = nametbl_init()) || 178 (res = tipc_nametbl_init()) ||
179 (res = k_signal((Handler)subscr_start, 0)) || 179 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
180 (res = k_signal((Handler)cfg_init, 0)) || 180 (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
181 (res = netlink_start()) || 181 (res = tipc_netlink_start()) ||
182 (res = socket_init())) { 182 (res = tipc_socket_init())) {
183 stop_core(); 183 tipc_core_stop();
184 } 184 }
185 return res; 185 return res;
186} 186}
@@ -190,7 +190,7 @@ static int __init tipc_init(void)
190{ 190{
191 int res; 191 int res;
192 192
193 log_reinit(CONFIG_TIPC_LOG); 193 tipc_log_reinit(CONFIG_TIPC_LOG);
194 info("Activated (compiled " __DATE__ " " __TIME__ ")\n"); 194 info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
195 195
196 tipc_own_addr = 0; 196 tipc_own_addr = 0;
@@ -204,7 +204,7 @@ static int __init tipc_init(void)
204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047); 204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
205 tipc_net_id = 4711; 205 tipc_net_id = 4711;
206 206
207 if ((res = start_core())) 207 if ((res = tipc_core_start()))
208 err("Unable to start in single node mode\n"); 208 err("Unable to start in single node mode\n");
209 else 209 else
210 info("Started in single node mode\n"); 210 info("Started in single node mode\n");
@@ -213,10 +213,10 @@ static int __init tipc_init(void)
213 213
214static void __exit tipc_exit(void) 214static void __exit tipc_exit(void)
215{ 215{
216 stop_net(); 216 tipc_core_stop_net();
217 stop_core(); 217 tipc_core_stop();
218 info("Deactivated\n"); 218 info("Deactivated\n");
219 log_stop(); 219 tipc_log_stop();
220} 220}
221 221
222module_init(tipc_init); 222module_init(tipc_init);
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1a34cc95b9e0..1f2e8b27a13f 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -65,9 +65,9 @@
65#define assert(i) BUG_ON(!(i)) 65#define assert(i) BUG_ON(!(i))
66 66
67struct tipc_msg; 67struct tipc_msg;
68extern struct print_buf *CONS, *LOG; 68extern struct print_buf *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TEE(struct print_buf *, struct print_buf *); 69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void msg_print(struct print_buf*,struct tipc_msg *,const char*); 70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...); 71void tipc_printf(struct print_buf *, const char *fmt, ...);
72void tipc_dump(struct print_buf*,const char *fmt, ...); 72void tipc_dump(struct print_buf*,const char *fmt, ...);
73 73
@@ -84,7 +84,7 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg) 84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
85 85
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0) 86#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0) 87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0)
88#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0) 88#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
89 89
90 90
@@ -94,15 +94,15 @@ void tipc_dump(struct print_buf*,const char *fmt, ...);
94 * here, or on a per .c file basis, by redefining these symbols. The following 94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available: 95 * print buffer options are available:
96 * 96 *
97 * NULL : Output to null print buffer (i.e. print nowhere) 97 * NULL : Output to null print buffer (i.e. print nowhere)
98 * CONS : Output to system console 98 * TIPC_CONS : Output to system console
99 * LOG : Output to TIPC log buffer 99 * TIPC_LOG : Output to TIPC log buffer
100 * &buf : Output to user-defined buffer (struct print_buf *) 100 * &buf : Output to user-defined buffer (struct print_buf *)
101 * TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TEE(CONS,LOG) ) 101 * TIPC_TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG) )
102 */ 102 */
103 103
104#ifndef TIPC_OUTPUT 104#ifndef TIPC_OUTPUT
105#define TIPC_OUTPUT TEE(CONS,LOG) 105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG)
106#endif 106#endif
107 107
108#ifndef DBG_OUTPUT 108#ifndef DBG_OUTPUT
@@ -167,10 +167,10 @@ extern atomic_t tipc_user_count;
167 * Routines available to privileged subsystems 167 * Routines available to privileged subsystems
168 */ 168 */
169 169
170extern int start_core(void); 170extern int tipc_core_start(void);
171extern void stop_core(void); 171extern void tipc_core_stop(void);
172extern int start_net(void); 172extern int tipc_core_start_net(void);
173extern void stop_net(void); 173extern void tipc_core_stop_net(void);
174 174
175static inline int delimit(int val, int min, int max) 175static inline int delimit(int val, int min, int max)
176{ 176{
@@ -188,7 +188,7 @@ static inline int delimit(int val, int min, int max)
188 188
189typedef void (*Handler) (unsigned long); 189typedef void (*Handler) (unsigned long);
190 190
191u32 k_signal(Handler routine, unsigned long argument); 191u32 tipc_k_signal(Handler routine, unsigned long argument);
192 192
193/** 193/**
194 * k_init_timer - initialize a timer 194 * k_init_timer - initialize a timer
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
index 7ed60a1cfbb8..4f4beefa7830 100644
--- a/net/tipc/dbg.c
+++ b/net/tipc/dbg.c
@@ -44,10 +44,10 @@ static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED; 44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
45 45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL }; 46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *CONS = &cons_buf; 47struct print_buf *TIPC_CONS = &cons_buf;
48 48
49static struct print_buf log_buf = { NULL, 0, NULL, NULL }; 49static struct print_buf log_buf = { NULL, 0, NULL, NULL };
50struct print_buf *LOG = &log_buf; 50struct print_buf *TIPC_LOG = &log_buf;
51 51
52 52
53#define FORMAT(PTR,LEN,FMT) \ 53#define FORMAT(PTR,LEN,FMT) \
@@ -66,15 +66,15 @@ struct print_buf *LOG = &log_buf;
66 * simultaneous use of the print buffer(s) being manipulated. 66 * simultaneous use of the print buffer(s) being manipulated.
67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of 67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
68 * 'print_string' and to protect its print buffer(s). 68 * 'print_string' and to protect its print buffer(s).
69 * 3) TEE() uses 'print_lock' to protect its print buffer(s). 69 * 3) TIPC_TEE() uses 'print_lock' to protect its print buffer(s).
70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG. 70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect TIPC_LOG.
71 */ 71 */
72 72
73/** 73/**
74 * printbuf_init - initialize print buffer to empty 74 * tipc_printbuf_init - initialize print buffer to empty
75 */ 75 */
76 76
77void printbuf_init(struct print_buf *pb, char *raw, u32 sz) 77void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 sz)
78{ 78{
79 if (!pb || !raw || (sz < (MAX_STRING + 1))) 79 if (!pb || !raw || (sz < (MAX_STRING + 1)))
80 return; 80 return;
@@ -87,26 +87,26 @@ void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
87} 87}
88 88
89/** 89/**
90 * printbuf_reset - reinitialize print buffer to empty state 90 * tipc_printbuf_reset - reinitialize print buffer to empty state
91 */ 91 */
92 92
93void printbuf_reset(struct print_buf *pb) 93void tipc_printbuf_reset(struct print_buf *pb)
94{ 94{
95 if (pb && pb->buf) 95 if (pb && pb->buf)
96 printbuf_init(pb, pb->buf, pb->size); 96 tipc_printbuf_init(pb, pb->buf, pb->size);
97} 97}
98 98
99/** 99/**
100 * printbuf_empty - test if print buffer is in empty state 100 * tipc_printbuf_empty - test if print buffer is in empty state
101 */ 101 */
102 102
103int printbuf_empty(struct print_buf *pb) 103int tipc_printbuf_empty(struct print_buf *pb)
104{ 104{
105 return (!pb || !pb->buf || (pb->crs == pb->buf)); 105 return (!pb || !pb->buf || (pb->crs == pb->buf));
106} 106}
107 107
108/** 108/**
109 * printbuf_validate - check for print buffer overflow 109 * tipc_printbuf_validate - check for print buffer overflow
110 * 110 *
111 * Verifies that a print buffer has captured all data written to it. 111 * Verifies that a print buffer has captured all data written to it.
112 * If data has been lost, linearize buffer and prepend an error message 112 * If data has been lost, linearize buffer and prepend an error message
@@ -114,7 +114,7 @@ int printbuf_empty(struct print_buf *pb)
114 * Returns length of print buffer data string (including trailing NULL) 114 * Returns length of print buffer data string (including trailing NULL)
115 */ 115 */
116 116
117int printbuf_validate(struct print_buf *pb) 117int tipc_printbuf_validate(struct print_buf *pb)
118{ 118{
119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n"; 119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n";
120 char *cp_buf; 120 char *cp_buf;
@@ -126,13 +126,13 @@ int printbuf_validate(struct print_buf *pb)
126 if (pb->buf[pb->size - 1] == '\0') { 126 if (pb->buf[pb->size - 1] == '\0') {
127 cp_buf = kmalloc(pb->size, GFP_ATOMIC); 127 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
128 if (cp_buf != NULL){ 128 if (cp_buf != NULL){
129 printbuf_init(&cb, cp_buf, pb->size); 129 tipc_printbuf_init(&cb, cp_buf, pb->size);
130 printbuf_move(&cb, pb); 130 tipc_printbuf_move(&cb, pb);
131 printbuf_move(pb, &cb); 131 tipc_printbuf_move(pb, &cb);
132 kfree(cp_buf); 132 kfree(cp_buf);
133 memcpy(pb->buf, err, strlen(err)); 133 memcpy(pb->buf, err, strlen(err));
134 } else { 134 } else {
135 printbuf_reset(pb); 135 tipc_printbuf_reset(pb);
136 tipc_printf(pb, err); 136 tipc_printf(pb, err);
137 } 137 }
138 } 138 }
@@ -140,13 +140,13 @@ int printbuf_validate(struct print_buf *pb)
140} 140}
141 141
142/** 142/**
143 * printbuf_move - move print buffer contents to another print buffer 143 * tipc_printbuf_move - move print buffer contents to another print buffer
144 * 144 *
145 * Current contents of destination print buffer (if any) are discarded. 145 * Current contents of destination print buffer (if any) are discarded.
146 * Source print buffer becomes empty if a successful move occurs. 146 * Source print buffer becomes empty if a successful move occurs.
147 */ 147 */
148 148
149void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from) 149void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
150{ 150{
151 int len; 151 int len;
152 152
@@ -156,12 +156,12 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
156 return; 156 return;
157 157
158 if (!pb_from || !pb_from->buf) { 158 if (!pb_from || !pb_from->buf) {
159 printbuf_reset(pb_to); 159 tipc_printbuf_reset(pb_to);
160 return; 160 return;
161 } 161 }
162 162
163 if (pb_to->size < pb_from->size) { 163 if (pb_to->size < pb_from->size) {
164 printbuf_reset(pb_to); 164 tipc_printbuf_reset(pb_to);
165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***"); 165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
166 return; 166 return;
167 } 167 }
@@ -179,7 +179,7 @@ void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
179 strcpy(pb_to->crs, pb_from->buf); 179 strcpy(pb_to->crs, pb_from->buf);
180 pb_to->crs += len; 180 pb_to->crs += len;
181 181
182 printbuf_reset(pb_from); 182 tipc_printbuf_reset(pb_from);
183} 183}
184 184
185/** 185/**
@@ -199,7 +199,7 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
199 strcpy(print_string, "*** STRING TOO LONG ***"); 199 strcpy(print_string, "*** STRING TOO LONG ***");
200 200
201 while (pb) { 201 while (pb) {
202 if (pb == CONS) 202 if (pb == TIPC_CONS)
203 printk(print_string); 203 printk(print_string);
204 else if (pb->buf) { 204 else if (pb->buf) {
205 chars_left = pb->buf + pb->size - pb->crs - 1; 205 chars_left = pb->buf + pb->size - pb->crs - 1;
@@ -223,10 +223,10 @@ void tipc_printf(struct print_buf *pb, const char *fmt, ...)
223} 223}
224 224
225/** 225/**
226 * TEE - perform next output operation on both print buffers 226 * TIPC_TEE - perform next output operation on both print buffers
227 */ 227 */
228 228
229struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1) 229struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
230{ 230{
231 struct print_buf *pb = b0; 231 struct print_buf *pb = b0;
232 232
@@ -294,96 +294,96 @@ void tipc_dump(struct print_buf *pb, const char *fmt, ...)
294 int len; 294 int len;
295 295
296 spin_lock_bh(&print_lock); 296 spin_lock_bh(&print_lock);
297 FORMAT(CONS->buf, len, fmt); 297 FORMAT(TIPC_CONS->buf, len, fmt);
298 printk(CONS->buf); 298 printk(TIPC_CONS->buf);
299 299
300 for (; pb; pb = pb->next) { 300 for (; pb; pb = pb->next) {
301 if (pb == CONS) 301 if (pb == TIPC_CONS)
302 continue; 302 continue;
303 printk("\n---- Start of dump,%s log ----\n\n", 303 printk("\n---- Start of dump,%s log ----\n\n",
304 (pb == LOG) ? "global" : "local"); 304 (pb == TIPC_LOG) ? "global" : "local");
305 printbuf_dump(pb); 305 printbuf_dump(pb);
306 printbuf_reset(pb); 306 tipc_printbuf_reset(pb);
307 printk("\n-------- End of dump --------\n"); 307 printk("\n-------- End of dump --------\n");
308 } 308 }
309 spin_unlock_bh(&print_lock); 309 spin_unlock_bh(&print_lock);
310} 310}
311 311
312/** 312/**
313 * log_stop - free up TIPC log print buffer 313 * tipc_log_stop - free up TIPC log print buffer
314 */ 314 */
315 315
316void log_stop(void) 316void tipc_log_stop(void)
317{ 317{
318 spin_lock_bh(&print_lock); 318 spin_lock_bh(&print_lock);
319 if (LOG->buf) { 319 if (TIPC_LOG->buf) {
320 kfree(LOG->buf); 320 kfree(TIPC_LOG->buf);
321 LOG->buf = NULL; 321 TIPC_LOG->buf = NULL;
322 } 322 }
323 spin_unlock_bh(&print_lock); 323 spin_unlock_bh(&print_lock);
324} 324}
325 325
326/** 326/**
327 * log_reinit - set TIPC log print buffer to specified size 327 * tipc_log_reinit - set TIPC log print buffer to specified size
328 */ 328 */
329 329
330void log_reinit(int log_size) 330void tipc_log_reinit(int log_size)
331{ 331{
332 log_stop(); 332 tipc_log_stop();
333 333
334 if (log_size) { 334 if (log_size) {
335 if (log_size <= MAX_STRING) 335 if (log_size <= MAX_STRING)
336 log_size = MAX_STRING + 1; 336 log_size = MAX_STRING + 1;
337 spin_lock_bh(&print_lock); 337 spin_lock_bh(&print_lock);
338 printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size); 338 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
339 spin_unlock_bh(&print_lock); 339 spin_unlock_bh(&print_lock);
340 } 340 }
341} 341}
342 342
343/** 343/**
344 * log_resize - reconfigure size of TIPC log buffer 344 * tipc_log_resize - reconfigure size of TIPC log buffer
345 */ 345 */
346 346
347struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space) 347struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
348{ 348{
349 u32 value; 349 u32 value;
350 350
351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) 351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
352 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 352 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
353 353
354 value = *(u32 *)TLV_DATA(req_tlv_area); 354 value = *(u32 *)TLV_DATA(req_tlv_area);
355 value = ntohl(value); 355 value = ntohl(value);
356 if (value != delimit(value, 0, 32768)) 356 if (value != delimit(value, 0, 32768))
357 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 357 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
358 " (log size must be 0-32768)"); 358 " (log size must be 0-32768)");
359 log_reinit(value); 359 tipc_log_reinit(value);
360 return cfg_reply_none(); 360 return tipc_cfg_reply_none();
361} 361}
362 362
363/** 363/**
364 * log_dump - capture TIPC log buffer contents in configuration message 364 * tipc_log_dump - capture TIPC log buffer contents in configuration message
365 */ 365 */
366 366
367struct sk_buff *log_dump(void) 367struct sk_buff *tipc_log_dump(void)
368{ 368{
369 struct sk_buff *reply; 369 struct sk_buff *reply;
370 370
371 spin_lock_bh(&print_lock); 371 spin_lock_bh(&print_lock);
372 if (!LOG->buf) 372 if (!TIPC_LOG->buf)
373 reply = cfg_reply_ultra_string("log not activated\n"); 373 reply = tipc_cfg_reply_ultra_string("log not activated\n");
374 else if (printbuf_empty(LOG)) 374 else if (tipc_printbuf_empty(TIPC_LOG))
375 reply = cfg_reply_ultra_string("log is empty\n"); 375 reply = tipc_cfg_reply_ultra_string("log is empty\n");
376 else { 376 else {
377 struct tlv_desc *rep_tlv; 377 struct tlv_desc *rep_tlv;
378 struct print_buf pb; 378 struct print_buf pb;
379 int str_len; 379 int str_len;
380 380
381 str_len = min(LOG->size, 32768u); 381 str_len = min(TIPC_LOG->size, 32768u);
382 reply = cfg_reply_alloc(TLV_SPACE(str_len)); 382 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
383 if (reply) { 383 if (reply) {
384 rep_tlv = (struct tlv_desc *)reply->data; 384 rep_tlv = (struct tlv_desc *)reply->data;
385 printbuf_init(&pb, TLV_DATA(rep_tlv), str_len); 385 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
386 printbuf_move(&pb, LOG); 386 tipc_printbuf_move(&pb, TIPC_LOG);
387 str_len = strlen(TLV_DATA(rep_tlv)) + 1; 387 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
388 skb_put(reply, TLV_SPACE(str_len)); 388 skb_put(reply, TLV_SPACE(str_len));
389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
index c6b2a64c224f..227f050d2a52 100644
--- a/net/tipc/dbg.h
+++ b/net/tipc/dbg.h
@@ -44,16 +44,16 @@ struct print_buf {
44 struct print_buf *next; 44 struct print_buf *next;
45}; 45};
46 46
47void printbuf_init(struct print_buf *pb, char *buf, u32 sz); 47void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 sz);
48void printbuf_reset(struct print_buf *pb); 48void tipc_printbuf_reset(struct print_buf *pb);
49int printbuf_empty(struct print_buf *pb); 49int tipc_printbuf_empty(struct print_buf *pb);
50int printbuf_validate(struct print_buf *pb); 50int tipc_printbuf_validate(struct print_buf *pb);
51void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from); 51void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
52 52
53void log_reinit(int log_size); 53void tipc_log_reinit(int log_size);
54void log_stop(void); 54void tipc_log_stop(void);
55 55
56struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space); 56struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space);
57struct sk_buff *log_dump(void); 57struct sk_buff *tipc_log_dump(void);
58 58
59#endif 59#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index b106ef1621cc..53ba4630c10d 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -93,7 +93,7 @@ int disc_create_link(const struct tipc_link_create *argv)
93 * disc_lost_link(): A link has lost contact 93 * disc_lost_link(): A link has lost contact
94 */ 94 */
95 95
96void disc_link_event(u32 addr, char *name, int up) 96void tipc_disc_link_event(u32 addr, char *name, int up)
97{ 97{
98 if (in_own_cluster(addr)) 98 if (in_own_cluster(addr))
99 return; 99 return;
@@ -103,17 +103,17 @@ void disc_link_event(u32 addr, char *name, int up)
103} 103}
104 104
105/** 105/**
106 * disc_init_msg - initialize a link setup message 106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response) 107 * @type: message type (request or response)
108 * @req_links: number of links associated with message 108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message 109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message 110 * @b_ptr: ptr to bearer issuing message
111 */ 111 */
112 112
113struct sk_buff *disc_init_msg(u32 type, 113struct sk_buff *tipc_disc_init_msg(u32 type,
114 u32 req_links, 114 u32 req_links,
115 u32 dest_domain, 115 u32 dest_domain,
116 struct bearer *b_ptr) 116 struct bearer *b_ptr)
117{ 117{
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE); 118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg; 119 struct tipc_msg *msg;
@@ -132,11 +132,11 @@ struct sk_buff *disc_init_msg(u32 type,
132} 132}
133 133
134/** 134/**
135 * disc_recv_msg - handle incoming link setup message (request or response) 135 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
136 * @buf: buffer containing message 136 * @buf: buffer containing message
137 */ 137 */
138 138
139void disc_recv_msg(struct sk_buff *buf) 139void tipc_disc_recv_msg(struct sk_buff *buf)
140{ 140{
141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle; 141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
142 struct link *link; 142 struct link *link;
@@ -153,9 +153,9 @@ void disc_recv_msg(struct sk_buff *buf)
153 153
154 if (net_id != tipc_net_id) 154 if (net_id != tipc_net_id)
155 return; 155 return;
156 if (!addr_domain_valid(dest)) 156 if (!tipc_addr_domain_valid(dest))
157 return; 157 return;
158 if (!addr_node_valid(orig)) 158 if (!tipc_addr_node_valid(orig))
159 return; 159 return;
160 if (orig == tipc_own_addr) 160 if (orig == tipc_own_addr)
161 return; 161 return;
@@ -169,11 +169,11 @@ void disc_recv_msg(struct sk_buff *buf)
169 /* Always accept link here */ 169 /* Always accept link here */
170 struct sk_buff *rbuf; 170 struct sk_buff *rbuf;
171 struct tipc_media_addr *addr; 171 struct tipc_media_addr *addr;
172 struct node *n_ptr = node_find(orig); 172 struct node *n_ptr = tipc_node_find(orig);
173 int link_up; 173 int link_up;
174 dbg(" in own cluster\n"); 174 dbg(" in own cluster\n");
175 if (n_ptr == NULL) { 175 if (n_ptr == NULL) {
176 n_ptr = node_create(orig); 176 n_ptr = tipc_node_create(orig);
177 } 177 }
178 if (n_ptr == NULL) { 178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n"); 179 warn("Memory squeeze; Failed to create node\n");
@@ -183,7 +183,7 @@ void disc_recv_msg(struct sk_buff *buf)
183 link = n_ptr->links[b_ptr->identity]; 183 link = n_ptr->links[b_ptr->identity];
184 if (!link) { 184 if (!link) {
185 dbg("creating link\n"); 185 dbg("creating link\n");
186 link = link_create(b_ptr, orig, &media_addr); 186 link = tipc_link_create(b_ptr, orig, &media_addr);
187 if (!link) { 187 if (!link) {
188 spin_unlock_bh(&n_ptr->lock); 188 spin_unlock_bh(&n_ptr->lock);
189 return; 189 return;
@@ -196,13 +196,13 @@ void disc_recv_msg(struct sk_buff *buf)
196 warn("New bearer address for %s\n", 196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig)); 197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr)); 198 memcpy(addr, &media_addr, sizeof(*addr));
199 link_reset(link); 199 tipc_link_reset(link);
200 } 200 }
201 link_up = link_is_up(link); 201 link_up = tipc_link_is_up(link);
202 spin_unlock_bh(&n_ptr->lock); 202 spin_unlock_bh(&n_ptr->lock);
203 if ((type == DSC_RESP_MSG) || link_up) 203 if ((type == DSC_RESP_MSG) || link_up)
204 return; 204 return;
205 rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr); 205 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
206 if (rbuf != NULL) { 206 if (rbuf != NULL) {
207 msg_dbg(buf_msg(rbuf),"SEND:"); 207 msg_dbg(buf_msg(rbuf),"SEND:");
208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr); 208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
@@ -212,11 +212,11 @@ void disc_recv_msg(struct sk_buff *buf)
212} 212}
213 213
214/** 214/**
215 * disc_stop_link_req - stop sending periodic link setup requests 215 * tipc_disc_stop_link_req - stop sending periodic link setup requests
216 * @req: ptr to link request structure 216 * @req: ptr to link request structure
217 */ 217 */
218 218
219void disc_stop_link_req(struct link_req *req) 219void tipc_disc_stop_link_req(struct link_req *req)
220{ 220{
221 if (!req) 221 if (!req)
222 return; 222 return;
@@ -228,11 +228,11 @@ void disc_stop_link_req(struct link_req *req)
228} 228}
229 229
230/** 230/**
231 * disc_update_link_req - update frequency of periodic link setup requests 231 * tipc_disc_update_link_req - update frequency of periodic link setup requests
232 * @req: ptr to link request structure 232 * @req: ptr to link request structure
233 */ 233 */
234 234
235void disc_update_link_req(struct link_req *req) 235void tipc_disc_update_link_req(struct link_req *req)
236{ 236{
237 if (!req) 237 if (!req)
238 return; 238 return;
@@ -282,7 +282,7 @@ static void disc_timeout(struct link_req *req)
282} 282}
283 283
284/** 284/**
285 * disc_init_link_req - start sending periodic link setup requests 285 * tipc_disc_init_link_req - start sending periodic link setup requests
286 * @b_ptr: ptr to bearer issuing requests 286 * @b_ptr: ptr to bearer issuing requests
287 * @dest: destination address for request messages 287 * @dest: destination address for request messages
288 * @dest_domain: network domain of node(s) which should respond to message 288 * @dest_domain: network domain of node(s) which should respond to message
@@ -291,10 +291,10 @@ static void disc_timeout(struct link_req *req)
291 * Returns pointer to link request structure, or NULL if unable to create. 291 * Returns pointer to link request structure, or NULL if unable to create.
292 */ 292 */
293 293
294struct link_req *disc_init_link_req(struct bearer *b_ptr, 294struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295 const struct tipc_media_addr *dest, 295 const struct tipc_media_addr *dest,
296 u32 dest_domain, 296 u32 dest_domain,
297 u32 req_links) 297 u32 req_links)
298{ 298{
299 struct link_req *req; 299 struct link_req *req;
300 300
@@ -302,7 +302,7 @@ struct link_req *disc_init_link_req(struct bearer *b_ptr,
302 if (!req) 302 if (!req)
303 return NULL; 303 return NULL;
304 304
305 req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr); 305 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
306 if (!req->buf) { 306 if (!req->buf) {
307 kfree(req); 307 kfree(req);
308 return NULL; 308 return NULL;
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index f4acb360d6c3..0454fd1ae7f3 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,16 +41,16 @@
41 41
42struct link_req; 42struct link_req;
43 43
44struct link_req *disc_init_link_req(struct bearer *b_ptr, 44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
45 const struct tipc_media_addr *dest, 45 const struct tipc_media_addr *dest,
46 u32 dest_domain, 46 u32 dest_domain,
47 u32 req_links); 47 u32 req_links);
48void disc_update_link_req(struct link_req *req); 48void tipc_disc_update_link_req(struct link_req *req);
49void disc_stop_link_req(struct link_req *req); 49void tipc_disc_stop_link_req(struct link_req *req);
50 50
51void disc_recv_msg(struct sk_buff *buf); 51void tipc_disc_recv_msg(struct sk_buff *buf);
52 52
53void disc_link_event(u32 addr, char *name, int up); 53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0 54#if 0
55int disc_create_link(const struct tipc_link_create *argv); 55int disc_create_link(const struct tipc_link_create *argv);
56#endif 56#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index cb507c4b7e6e..1f8d83b9c8b4 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -238,13 +238,13 @@ static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size
238} 238}
239 239
240/** 240/**
241 * eth_media_start - activate Ethernet bearer support 241 * tipc_eth_media_start - activate Ethernet bearer support
242 * 242 *
243 * Register Ethernet media type with TIPC bearer code. Also register 243 * Register Ethernet media type with TIPC bearer code. Also register
244 * with OS for notifications about device state changes. 244 * with OS for notifications about device state changes.
245 */ 245 */
246 246
247int eth_media_start(void) 247int tipc_eth_media_start(void)
248{ 248{
249 struct tipc_media_addr bcast_addr; 249 struct tipc_media_addr bcast_addr;
250 int res; 250 int res;
@@ -271,10 +271,10 @@ int eth_media_start(void)
271} 271}
272 272
273/** 273/**
274 * eth_media_stop - deactivate Ethernet bearer support 274 * tipc_eth_media_stop - deactivate Ethernet bearer support
275 */ 275 */
276 276
277void eth_media_stop(void) 277void tipc_eth_media_stop(void)
278{ 278{
279 int i; 279 int i;
280 280
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index f320010f8a65..966f70a1b608 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -52,7 +52,7 @@ static void process_signal_queue(unsigned long dummy);
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0); 52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53 53
54 54
55unsigned int k_signal(Handler routine, unsigned long argument) 55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{ 56{
57 struct queue_item *item; 57 struct queue_item *item;
58 58
@@ -93,7 +93,7 @@ static void process_signal_queue(unsigned long dummy)
93 spin_unlock_bh(&qitem_lock); 93 spin_unlock_bh(&qitem_lock);
94} 94}
95 95
96int handler_start(void) 96int tipc_handler_start(void)
97{ 97{
98 tipc_queue_item_cache = 98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item), 99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
@@ -107,7 +107,7 @@ int handler_start(void)
107 return 0; 107 return 0;
108} 108}
109 109
110void handler_stop(void) 110void tipc_handler_stop(void)
111{ 111{
112 struct list_head *l, *n; 112 struct list_head *l, *n;
113 struct queue_item *item; 113 struct queue_item *item;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d1e1ae66464a..511872afa459 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -148,12 +148,12 @@ static void link_print(struct link *l_ptr, struct print_buf *buf,
148#define LINK_LOG_BUF_SIZE 0 148#define LINK_LOG_BUF_SIZE 0
149 149
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0) 150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0) 151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0) 152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153#define dbg_link_dump() do { \ 153#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \ 154 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \ 155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156 printbuf_move(LOG, &l_ptr->print_buf); \ 156 tipc_printbuf_move(LOG, &l_ptr->print_buf); \
157 } \ 157 } \
158} while (0) 158} while (0)
159 159
@@ -252,14 +252,14 @@ static inline u32 link_last_sent(struct link *l_ptr)
252 * Simple non-inlined link routines (i.e. referenced outside this file) 252 * Simple non-inlined link routines (i.e. referenced outside this file)
253 */ 253 */
254 254
255int link_is_up(struct link *l_ptr) 255int tipc_link_is_up(struct link *l_ptr)
256{ 256{
257 if (!l_ptr) 257 if (!l_ptr)
258 return 0; 258 return 0;
259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr)); 259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
260} 260}
261 261
262int link_is_active(struct link *l_ptr) 262int tipc_link_is_active(struct link *l_ptr)
263{ 263{
264 return ((l_ptr->owner->active_links[0] == l_ptr) || 264 return ((l_ptr->owner->active_links[0] == l_ptr) ||
265 (l_ptr->owner->active_links[1] == l_ptr)); 265 (l_ptr->owner->active_links[1] == l_ptr));
@@ -338,15 +338,15 @@ static int link_name_validate(const char *name, struct link_name *name_parts)
338 * link_timeout - handle expiration of link timer 338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link 339 * @l_ptr: pointer to link
340 * 340 *
341 * This routine must not grab "net_lock" to avoid a potential deadlock conflict 341 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
342 * with link_delete(). (There is no risk that the node will be deleted by 342 * with tipc_link_delete(). (There is no risk that the node will be deleted by
343 * another thread because link_delete() always cancels the link timer before 343 * another thread because tipc_link_delete() always cancels the link timer before
344 * node_delete() is called.) 344 * tipc_node_delete() is called.)
345 */ 345 */
346 346
347static void link_timeout(struct link *l_ptr) 347static void link_timeout(struct link *l_ptr)
348{ 348{
349 node_lock(l_ptr->owner); 349 tipc_node_lock(l_ptr->owner);
350 350
351 /* update counters used in statistical profiling of send traffic */ 351 /* update counters used in statistical profiling of send traffic */
352 352
@@ -391,9 +391,9 @@ static void link_timeout(struct link *l_ptr)
391 link_state_event(l_ptr, TIMEOUT_EVT); 391 link_state_event(l_ptr, TIMEOUT_EVT);
392 392
393 if (l_ptr->next_out) 393 if (l_ptr->next_out)
394 link_push_queue(l_ptr); 394 tipc_link_push_queue(l_ptr);
395 395
396 node_unlock(l_ptr->owner); 396 tipc_node_unlock(l_ptr->owner);
397} 397}
398 398
399static inline void link_set_timer(struct link *l_ptr, u32 time) 399static inline void link_set_timer(struct link *l_ptr, u32 time)
@@ -402,7 +402,7 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
402} 402}
403 403
404/** 404/**
405 * link_create - create a new link 405 * tipc_link_create - create a new link
406 * @b_ptr: pointer to associated bearer 406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link 407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link 408 * @media_addr: media address to use when sending messages over link
@@ -410,8 +410,8 @@ static inline void link_set_timer(struct link *l_ptr, u32 time)
410 * Returns pointer to link. 410 * Returns pointer to link.
411 */ 411 */
412 412
413struct link *link_create(struct bearer *b_ptr, const u32 peer, 413struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
414 const struct tipc_media_addr *media_addr) 414 const struct tipc_media_addr *media_addr)
415{ 415{
416 struct link *l_ptr; 416 struct link *l_ptr;
417 struct tipc_msg *msg; 417 struct tipc_msg *msg;
@@ -449,7 +449,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
449 strcpy((char *)msg_data(msg), if_name); 449 strcpy((char *)msg_data(msg), if_name);
450 450
451 l_ptr->priority = b_ptr->priority; 451 l_ptr->priority = b_ptr->priority;
452 link_set_queue_limits(l_ptr, b_ptr->media->window); 452 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
453 453
454 link_init_max_pkt(l_ptr); 454 link_init_max_pkt(l_ptr);
455 455
@@ -458,7 +458,7 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
458 458
459 link_reset_statistics(l_ptr); 459 link_reset_statistics(l_ptr);
460 460
461 l_ptr->owner = node_attach_link(l_ptr); 461 l_ptr->owner = tipc_node_attach_link(l_ptr);
462 if (!l_ptr->owner) { 462 if (!l_ptr->owner) {
463 kfree(l_ptr); 463 kfree(l_ptr);
464 return NULL; 464 return NULL;
@@ -472,52 +472,52 @@ struct link *link_create(struct bearer *b_ptr, const u32 peer,
472 warn("Memory squeeze; Failed to create link\n"); 472 warn("Memory squeeze; Failed to create link\n");
473 return NULL; 473 return NULL;
474 } 474 }
475 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE); 475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
476 } 476 }
477 477
478 k_signal((Handler)link_start, (unsigned long)l_ptr); 478 tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
479 479
480 dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n", 480 dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit); 481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
482 482
483 return l_ptr; 483 return l_ptr;
484} 484}
485 485
486/** 486/**
487 * link_delete - delete a link 487 * tipc_link_delete - delete a link
488 * @l_ptr: pointer to link 488 * @l_ptr: pointer to link
489 * 489 *
490 * Note: 'net_lock' is write_locked, bearer is locked. 490 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
491 * This routine must not grab the node lock until after link timer cancellation 491 * This routine must not grab the node lock until after link timer cancellation
492 * to avoid a potential deadlock situation. 492 * to avoid a potential deadlock situation.
493 */ 493 */
494 494
495void link_delete(struct link *l_ptr) 495void tipc_link_delete(struct link *l_ptr)
496{ 496{
497 if (!l_ptr) { 497 if (!l_ptr) {
498 err("Attempt to delete non-existent link\n"); 498 err("Attempt to delete non-existent link\n");
499 return; 499 return;
500 } 500 }
501 501
502 dbg("link_delete()\n"); 502 dbg("tipc_link_delete()\n");
503 503
504 k_cancel_timer(&l_ptr->timer); 504 k_cancel_timer(&l_ptr->timer);
505 505
506 node_lock(l_ptr->owner); 506 tipc_node_lock(l_ptr->owner);
507 link_reset(l_ptr); 507 tipc_link_reset(l_ptr);
508 node_detach_link(l_ptr->owner, l_ptr); 508 tipc_node_detach_link(l_ptr->owner, l_ptr);
509 link_stop(l_ptr); 509 tipc_link_stop(l_ptr);
510 list_del_init(&l_ptr->link_list); 510 list_del_init(&l_ptr->link_list);
511 if (LINK_LOG_BUF_SIZE) 511 if (LINK_LOG_BUF_SIZE)
512 kfree(l_ptr->print_buf.buf); 512 kfree(l_ptr->print_buf.buf);
513 node_unlock(l_ptr->owner); 513 tipc_node_unlock(l_ptr->owner);
514 k_term_timer(&l_ptr->timer); 514 k_term_timer(&l_ptr->timer);
515 kfree(l_ptr); 515 kfree(l_ptr);
516} 516}
517 517
518void link_start(struct link *l_ptr) 518void tipc_link_start(struct link *l_ptr)
519{ 519{
520 dbg("link_start %x\n", l_ptr); 520 dbg("tipc_link_start %x\n", l_ptr);
521 link_state_event(l_ptr, STARTING_EVT); 521 link_state_event(l_ptr, STARTING_EVT);
522} 522}
523 523
@@ -535,8 +535,8 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
535{ 535{
536 struct port *p_ptr; 536 struct port *p_ptr;
537 537
538 spin_lock_bh(&port_list_lock); 538 spin_lock_bh(&tipc_port_list_lock);
539 p_ptr = port_lock(origport); 539 p_ptr = tipc_port_lock(origport);
540 if (p_ptr) { 540 if (p_ptr) {
541 if (!p_ptr->wakeup) 541 if (!p_ptr->wakeup)
542 goto exit; 542 goto exit;
@@ -548,13 +548,13 @@ static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549 l_ptr->stats.link_congs++; 549 l_ptr->stats.link_congs++;
550exit: 550exit:
551 port_unlock(p_ptr); 551 tipc_port_unlock(p_ptr);
552 } 552 }
553 spin_unlock_bh(&port_list_lock); 553 spin_unlock_bh(&tipc_port_list_lock);
554 return -ELINKCONG; 554 return -ELINKCONG;
555} 555}
556 556
557void link_wakeup_ports(struct link *l_ptr, int all) 557void tipc_link_wakeup_ports(struct link *l_ptr, int all)
558{ 558{
559 struct port *p_ptr; 559 struct port *p_ptr;
560 struct port *temp_p_ptr; 560 struct port *temp_p_ptr;
@@ -564,7 +564,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
564 win = 100000; 564 win = 100000;
565 if (win <= 0) 565 if (win <= 0)
566 return; 566 return;
567 if (!spin_trylock_bh(&port_list_lock)) 567 if (!spin_trylock_bh(&tipc_port_list_lock))
568 return; 568 return;
569 if (link_congested(l_ptr)) 569 if (link_congested(l_ptr))
570 goto exit; 570 goto exit;
@@ -583,7 +583,7 @@ void link_wakeup_ports(struct link *l_ptr, int all)
583 } 583 }
584 584
585exit: 585exit:
586 spin_unlock_bh(&port_list_lock); 586 spin_unlock_bh(&tipc_port_list_lock);
587} 587}
588 588
589/** 589/**
@@ -606,11 +606,11 @@ static void link_release_outqueue(struct link *l_ptr)
606} 606}
607 607
608/** 608/**
609 * link_reset_fragments - purge link's inbound message fragments queue 609 * tipc_link_reset_fragments - purge link's inbound message fragments queue
610 * @l_ptr: pointer to link 610 * @l_ptr: pointer to link
611 */ 611 */
612 612
613void link_reset_fragments(struct link *l_ptr) 613void tipc_link_reset_fragments(struct link *l_ptr)
614{ 614{
615 struct sk_buff *buf = l_ptr->defragm_buf; 615 struct sk_buff *buf = l_ptr->defragm_buf;
616 struct sk_buff *next; 616 struct sk_buff *next;
@@ -624,11 +624,11 @@ void link_reset_fragments(struct link *l_ptr)
624} 624}
625 625
626/** 626/**
627 * link_stop - purge all inbound and outbound messages associated with link 627 * tipc_link_stop - purge all inbound and outbound messages associated with link
628 * @l_ptr: pointer to link 628 * @l_ptr: pointer to link
629 */ 629 */
630 630
631void link_stop(struct link *l_ptr) 631void tipc_link_stop(struct link *l_ptr)
632{ 632{
633 struct sk_buff *buf; 633 struct sk_buff *buf;
634 struct sk_buff *next; 634 struct sk_buff *next;
@@ -647,7 +647,7 @@ void link_stop(struct link *l_ptr)
647 buf = next; 647 buf = next;
648 } 648 }
649 649
650 link_reset_fragments(l_ptr); 650 tipc_link_reset_fragments(l_ptr);
651 651
652 buf_discard(l_ptr->proto_msg_queue); 652 buf_discard(l_ptr->proto_msg_queue);
653 l_ptr->proto_msg_queue = NULL; 653 l_ptr->proto_msg_queue = NULL;
@@ -677,7 +677,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
677 ev->up = up; 677 ev->up = up;
678 ev->fcn = fcn; 678 ev->fcn = fcn;
679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME); 679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680 k_signal((Handler)link_recv_event, (unsigned long)ev); 680 tipc_k_signal((Handler)link_recv_event, (unsigned long)ev);
681} 681}
682 682
683#else 683#else
@@ -686,7 +686,7 @@ static void link_send_event(void (*fcn)(u32 a, char *n, int up),
686 686
687#endif 687#endif
688 688
689void link_reset(struct link *l_ptr) 689void tipc_link_reset(struct link *l_ptr)
690{ 690{
691 struct sk_buff *buf; 691 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state; 692 u32 prev_state = l_ptr->state;
@@ -706,13 +706,13 @@ void link_reset(struct link *l_ptr)
706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET)) 706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
707 return; 707 return;
708 708
709 node_link_down(l_ptr->owner, l_ptr); 709 tipc_node_link_down(l_ptr->owner, l_ptr);
710 bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 710 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
711#if 0 711#if 0
712 tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name); 712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump(); 713 dbg_link_dump();
714#endif 714#endif
715 if (node_has_active_links(l_ptr->owner) && 715 if (tipc_node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) { 716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint; 717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER; 718 l_ptr->exp_msg_count = START_CHANGEOVER;
@@ -730,7 +730,7 @@ void link_reset(struct link *l_ptr)
730 buf = next; 730 buf = next;
731 } 731 }
732 if (!list_empty(&l_ptr->waiting_ports)) 732 if (!list_empty(&l_ptr->waiting_ports))
733 link_wakeup_ports(l_ptr, 1); 733 tipc_link_wakeup_ports(l_ptr, 1);
734 734
735 l_ptr->retransm_queue_head = 0; 735 l_ptr->retransm_queue_head = 0;
736 l_ptr->retransm_queue_size = 0; 736 l_ptr->retransm_queue_size = 0;
@@ -747,20 +747,20 @@ void link_reset(struct link *l_ptr)
747 l_ptr->stale_count = 0; 747 l_ptr->stale_count = 0;
748 link_reset_statistics(l_ptr); 748 link_reset_statistics(l_ptr);
749 749
750 link_send_event(cfg_link_event, l_ptr, 0); 750 link_send_event(tipc_cfg_link_event, l_ptr, 0);
751 if (!in_own_cluster(l_ptr->addr)) 751 if (!in_own_cluster(l_ptr->addr))
752 link_send_event(disc_link_event, l_ptr, 0); 752 link_send_event(tipc_disc_link_event, l_ptr, 0);
753} 753}
754 754
755 755
756static void link_activate(struct link *l_ptr) 756static void link_activate(struct link *l_ptr)
757{ 757{
758 l_ptr->next_in_no = 1; 758 l_ptr->next_in_no = 1;
759 node_link_up(l_ptr->owner, l_ptr); 759 tipc_node_link_up(l_ptr->owner, l_ptr);
760 bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(cfg_link_event, l_ptr, 1); 761 link_send_event(tipc_cfg_link_event, l_ptr, 1);
762 if (!in_own_cluster(l_ptr->addr)) 762 if (!in_own_cluster(l_ptr->addr))
763 link_send_event(disc_link_event, l_ptr, 1); 763 link_send_event(tipc_disc_link_event, l_ptr, 1);
764} 764}
765 765
766/** 766/**
@@ -799,13 +799,13 @@ static void link_state_event(struct link *l_ptr, unsigned event)
799 dbg_link("TIM "); 799 dbg_link("TIM ");
800 if (l_ptr->next_in_no != l_ptr->checkpoint) { 800 if (l_ptr->next_in_no != l_ptr->checkpoint) {
801 l_ptr->checkpoint = l_ptr->next_in_no; 801 l_ptr->checkpoint = l_ptr->next_in_no;
802 if (bclink_acks_missing(l_ptr->owner)) { 802 if (tipc_bclink_acks_missing(l_ptr->owner)) {
803 link_send_proto_msg(l_ptr, STATE_MSG, 803 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
804 0, 0, 0, 0, 0); 804 0, 0, 0, 0, 0);
805 l_ptr->fsm_msg_cnt++; 805 l_ptr->fsm_msg_cnt++;
806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { 806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807 link_send_proto_msg(l_ptr, STATE_MSG, 807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
808 1, 0, 0, 0, 0); 808 1, 0, 0, 0, 0);
809 l_ptr->fsm_msg_cnt++; 809 l_ptr->fsm_msg_cnt++;
810 } 810 }
811 link_set_timer(l_ptr, cont_intv); 811 link_set_timer(l_ptr, cont_intv);
@@ -814,16 +814,16 @@ static void link_state_event(struct link *l_ptr, unsigned event)
814 dbg_link(" -> WU\n"); 814 dbg_link(" -> WU\n");
815 l_ptr->state = WORKING_UNKNOWN; 815 l_ptr->state = WORKING_UNKNOWN;
816 l_ptr->fsm_msg_cnt = 0; 816 l_ptr->fsm_msg_cnt = 0;
817 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 817 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818 l_ptr->fsm_msg_cnt++; 818 l_ptr->fsm_msg_cnt++;
819 link_set_timer(l_ptr, cont_intv / 4); 819 link_set_timer(l_ptr, cont_intv / 4);
820 break; 820 break;
821 case RESET_MSG: 821 case RESET_MSG:
822 dbg_link("RES -> RR\n"); 822 dbg_link("RES -> RR\n");
823 link_reset(l_ptr); 823 tipc_link_reset(l_ptr);
824 l_ptr->state = RESET_RESET; 824 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0; 825 l_ptr->fsm_msg_cnt = 0;
826 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 826 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827 l_ptr->fsm_msg_cnt++; 827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv); 828 link_set_timer(l_ptr, cont_intv);
829 break; 829 break;
@@ -844,10 +844,10 @@ static void link_state_event(struct link *l_ptr, unsigned event)
844 break; 844 break;
845 case RESET_MSG: 845 case RESET_MSG:
846 dbg_link("RES -> RR\n"); 846 dbg_link("RES -> RR\n");
847 link_reset(l_ptr); 847 tipc_link_reset(l_ptr);
848 l_ptr->state = RESET_RESET; 848 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0; 849 l_ptr->fsm_msg_cnt = 0;
850 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 850 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851 l_ptr->fsm_msg_cnt++; 851 l_ptr->fsm_msg_cnt++;
852 link_set_timer(l_ptr, cont_intv); 852 link_set_timer(l_ptr, cont_intv);
853 break; 853 break;
@@ -858,9 +858,9 @@ static void link_state_event(struct link *l_ptr, unsigned event)
858 l_ptr->state = WORKING_WORKING; 858 l_ptr->state = WORKING_WORKING;
859 l_ptr->fsm_msg_cnt = 0; 859 l_ptr->fsm_msg_cnt = 0;
860 l_ptr->checkpoint = l_ptr->next_in_no; 860 l_ptr->checkpoint = l_ptr->next_in_no;
861 if (bclink_acks_missing(l_ptr->owner)) { 861 if (tipc_bclink_acks_missing(l_ptr->owner)) {
862 link_send_proto_msg(l_ptr, STATE_MSG, 862 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
863 0, 0, 0, 0, 0); 863 0, 0, 0, 0, 0);
864 l_ptr->fsm_msg_cnt++; 864 l_ptr->fsm_msg_cnt++;
865 } 865 }
866 link_set_timer(l_ptr, cont_intv); 866 link_set_timer(l_ptr, cont_intv);
@@ -868,18 +868,18 @@ static void link_state_event(struct link *l_ptr, unsigned event)
868 dbg_link("Probing %u/%u,timer = %u ms)\n", 868 dbg_link("Probing %u/%u,timer = %u ms)\n",
869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit, 869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
870 cont_intv / 4); 870 cont_intv / 4);
871 link_send_proto_msg(l_ptr, STATE_MSG, 871 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
872 1, 0, 0, 0, 0); 872 1, 0, 0, 0, 0);
873 l_ptr->fsm_msg_cnt++; 873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv / 4); 874 link_set_timer(l_ptr, cont_intv / 4);
875 } else { /* Link has failed */ 875 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n", 876 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt); 877 l_ptr->fsm_msg_cnt);
878 link_reset(l_ptr); 878 tipc_link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN; 879 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0; 880 l_ptr->fsm_msg_cnt = 0;
881 link_send_proto_msg(l_ptr, RESET_MSG, 881 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
882 0, 0, 0, 0, 0); 882 0, 0, 0, 0, 0);
883 l_ptr->fsm_msg_cnt++; 883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv); 884 link_set_timer(l_ptr, cont_intv);
885 } 885 }
@@ -904,7 +904,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
904 l_ptr->state = WORKING_WORKING; 904 l_ptr->state = WORKING_WORKING;
905 l_ptr->fsm_msg_cnt = 0; 905 l_ptr->fsm_msg_cnt = 0;
906 link_activate(l_ptr); 906 link_activate(l_ptr);
907 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 907 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908 l_ptr->fsm_msg_cnt++; 908 l_ptr->fsm_msg_cnt++;
909 link_set_timer(l_ptr, cont_intv); 909 link_set_timer(l_ptr, cont_intv);
910 break; 910 break;
@@ -913,7 +913,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
913 dbg_link(" -> RR\n"); 913 dbg_link(" -> RR\n");
914 l_ptr->state = RESET_RESET; 914 l_ptr->state = RESET_RESET;
915 l_ptr->fsm_msg_cnt = 0; 915 l_ptr->fsm_msg_cnt = 0;
916 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0); 916 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917 l_ptr->fsm_msg_cnt++; 917 l_ptr->fsm_msg_cnt++;
918 link_set_timer(l_ptr, cont_intv); 918 link_set_timer(l_ptr, cont_intv);
919 break; 919 break;
@@ -923,7 +923,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
923 /* fall through */ 923 /* fall through */
924 case TIMEOUT_EVT: 924 case TIMEOUT_EVT:
925 dbg_link("TIM \n"); 925 dbg_link("TIM \n");
926 link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 926 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927 l_ptr->fsm_msg_cnt++; 927 l_ptr->fsm_msg_cnt++;
928 link_set_timer(l_ptr, cont_intv); 928 link_set_timer(l_ptr, cont_intv);
929 break; 929 break;
@@ -947,7 +947,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
947 l_ptr->state = WORKING_WORKING; 947 l_ptr->state = WORKING_WORKING;
948 l_ptr->fsm_msg_cnt = 0; 948 l_ptr->fsm_msg_cnt = 0;
949 link_activate(l_ptr); 949 link_activate(l_ptr);
950 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); 950 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951 l_ptr->fsm_msg_cnt++; 951 l_ptr->fsm_msg_cnt++;
952 link_set_timer(l_ptr, cont_intv); 952 link_set_timer(l_ptr, cont_intv);
953 break; 953 break;
@@ -956,7 +956,7 @@ static void link_state_event(struct link *l_ptr, unsigned event)
956 break; 956 break;
957 case TIMEOUT_EVT: 957 case TIMEOUT_EVT:
958 dbg_link("TIM\n"); 958 dbg_link("TIM\n");
959 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0); 959 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960 l_ptr->fsm_msg_cnt++; 960 l_ptr->fsm_msg_cnt++;
961 link_set_timer(l_ptr, cont_intv); 961 link_set_timer(l_ptr, cont_intv);
962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt); 962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
@@ -1023,12 +1023,12 @@ static inline void link_add_to_outqueue(struct link *l_ptr,
1023} 1023}
1024 1024
1025/* 1025/*
1026 * link_send_buf() is the 'full path' for messages, called from 1026 * tipc_link_send_buf() is the 'full path' for messages, called from
1027 * inside TIPC when the 'fast path' in tipc_send_buf 1027 * inside TIPC when the 'fast path' in tipc_send_buf
1028 * has failed, and from link_send() 1028 * has failed, and from link_send()
1029 */ 1029 */
1030 1030
1031int link_send_buf(struct link *l_ptr, struct sk_buff *buf) 1031int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1032{ 1032{
1033 struct tipc_msg *msg = buf_msg(buf); 1033 struct tipc_msg *msg = buf_msg(buf);
1034 u32 size = msg_size(msg); 1034 u32 size = msg_size(msg);
@@ -1051,7 +1051,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1051 buf_discard(buf); 1051 buf_discard(buf);
1052 if (imp > CONN_MANAGER) { 1052 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name); 1053 warn("Resetting <%s>, send queue full", l_ptr->name);
1054 link_reset(l_ptr); 1054 tipc_link_reset(l_ptr);
1055 } 1055 }
1056 return dsz; 1056 return dsz;
1057 } 1057 }
@@ -1059,21 +1059,21 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1059 /* Fragmentation needed ? */ 1059 /* Fragmentation needed ? */
1060 1060
1061 if (size > max_packet) 1061 if (size > max_packet)
1062 return link_send_long_buf(l_ptr, buf); 1062 return tipc_link_send_long_buf(l_ptr, buf);
1063 1063
1064 /* Packet can be queued or sent: */ 1064 /* Packet can be queued or sent: */
1065 1065
1066 if (queue_size > l_ptr->stats.max_queue_sz) 1066 if (queue_size > l_ptr->stats.max_queue_sz)
1067 l_ptr->stats.max_queue_sz = queue_size; 1067 l_ptr->stats.max_queue_sz = queue_size;
1068 1068
1069 if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) && 1069 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
1070 !link_congested(l_ptr))) { 1070 !link_congested(l_ptr))) {
1071 link_add_to_outqueue(l_ptr, buf, msg); 1071 link_add_to_outqueue(l_ptr, buf, msg);
1072 1072
1073 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { 1073 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074 l_ptr->unacked_window = 0; 1074 l_ptr->unacked_window = 0;
1075 } else { 1075 } else {
1076 bearer_schedule(l_ptr->b_ptr, l_ptr); 1076 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1077 l_ptr->stats.bearer_congs++; 1077 l_ptr->stats.bearer_congs++;
1078 l_ptr->next_out = buf; 1078 l_ptr->next_out = buf;
1079 } 1079 }
@@ -1088,7 +1088,7 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1088 1088
1089 if (l_ptr->next_out && 1089 if (l_ptr->next_out &&
1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 1091 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1092 return dsz; 1092 return dsz;
1093 } 1093 }
1094 1094
@@ -1114,38 +1114,38 @@ int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1114 if (!l_ptr->next_out) 1114 if (!l_ptr->next_out)
1115 l_ptr->next_out = buf; 1115 l_ptr->next_out = buf;
1116 link_add_to_outqueue(l_ptr, buf, msg); 1116 link_add_to_outqueue(l_ptr, buf, msg);
1117 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr); 1117 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1118 return dsz; 1118 return dsz;
1119} 1119}
1120 1120
1121/* 1121/*
1122 * link_send(): same as link_send_buf(), but the link to use has 1122 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
1123 * not been selected yet, and the the owner node is not locked 1123 * not been selected yet, and the the owner node is not locked
1124 * Called by TIPC internal users, e.g. the name distributor 1124 * Called by TIPC internal users, e.g. the name distributor
1125 */ 1125 */
1126 1126
1127int link_send(struct sk_buff *buf, u32 dest, u32 selector) 1127int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1128{ 1128{
1129 struct link *l_ptr; 1129 struct link *l_ptr;
1130 struct node *n_ptr; 1130 struct node *n_ptr;
1131 int res = -ELINKCONG; 1131 int res = -ELINKCONG;
1132 1132
1133 read_lock_bh(&net_lock); 1133 read_lock_bh(&tipc_net_lock);
1134 n_ptr = node_select(dest, selector); 1134 n_ptr = tipc_node_select(dest, selector);
1135 if (n_ptr) { 1135 if (n_ptr) {
1136 node_lock(n_ptr); 1136 tipc_node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1]; 1137 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("link_send: found link %x for dest %x\n", l_ptr, dest); 1138 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) { 1139 if (l_ptr) {
1140 res = link_send_buf(l_ptr, buf); 1140 res = tipc_link_send_buf(l_ptr, buf);
1141 } 1141 }
1142 node_unlock(n_ptr); 1142 tipc_node_unlock(n_ptr);
1143 } else { 1143 } else {
1144 dbg("Attempt to send msg to unknown node:\n"); 1144 dbg("Attempt to send msg to unknown node:\n");
1145 msg_dbg(buf_msg(buf),">>>"); 1145 msg_dbg(buf_msg(buf),">>>");
1146 buf_discard(buf); 1146 buf_discard(buf);
1147 } 1147 }
1148 read_unlock_bh(&net_lock); 1148 read_unlock_bh(&tipc_net_lock);
1149 return res; 1149 return res;
1150} 1150}
1151 1151
@@ -1166,14 +1166,14 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) { 1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { 1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168 link_add_to_outqueue(l_ptr, buf, msg); 1168 link_add_to_outqueue(l_ptr, buf, msg);
1169 if (likely(bearer_send(l_ptr->b_ptr, buf, 1169 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1170 &l_ptr->media_addr))) { 1170 &l_ptr->media_addr))) {
1171 l_ptr->unacked_window = 0; 1171 l_ptr->unacked_window = 0;
1172 msg_dbg(msg,"SENT_FAST:"); 1172 msg_dbg(msg,"SENT_FAST:");
1173 return res; 1173 return res;
1174 } 1174 }
1175 dbg("failed sent fast...\n"); 1175 dbg("failed sent fast...\n");
1176 bearer_schedule(l_ptr->b_ptr, l_ptr); 1176 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1177 l_ptr->stats.bearer_congs++; 1177 l_ptr->stats.bearer_congs++;
1178 l_ptr->next_out = buf; 1178 l_ptr->next_out = buf;
1179 return res; 1179 return res;
@@ -1182,7 +1182,7 @@ static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1182 else 1182 else
1183 *used_max_pkt = link_max_pkt(l_ptr); 1183 *used_max_pkt = link_max_pkt(l_ptr);
1184 } 1184 }
1185 return link_send_buf(l_ptr, buf); /* All other cases */ 1185 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1186} 1186}
1187 1187
1188/* 1188/*
@@ -1200,24 +1200,24 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1200 u32 dummy; 1200 u32 dummy;
1201 1201
1202 if (destnode == tipc_own_addr) 1202 if (destnode == tipc_own_addr)
1203 return port_recv_msg(buf); 1203 return tipc_port_recv_msg(buf);
1204 1204
1205 read_lock_bh(&net_lock); 1205 read_lock_bh(&tipc_net_lock);
1206 n_ptr = node_select(destnode, selector); 1206 n_ptr = tipc_node_select(destnode, selector);
1207 if (likely(n_ptr)) { 1207 if (likely(n_ptr)) {
1208 node_lock(n_ptr); 1208 tipc_node_lock(n_ptr);
1209 l_ptr = n_ptr->active_links[selector]; 1209 l_ptr = n_ptr->active_links[selector];
1210 dbg("send_fast: buf %x selected %x, destnode = %x\n", 1210 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211 buf, l_ptr, destnode); 1211 buf, l_ptr, destnode);
1212 if (likely(l_ptr)) { 1212 if (likely(l_ptr)) {
1213 res = link_send_buf_fast(l_ptr, buf, &dummy); 1213 res = link_send_buf_fast(l_ptr, buf, &dummy);
1214 node_unlock(n_ptr); 1214 tipc_node_unlock(n_ptr);
1215 read_unlock_bh(&net_lock); 1215 read_unlock_bh(&tipc_net_lock);
1216 return res; 1216 return res;
1217 } 1217 }
1218 node_unlock(n_ptr); 1218 tipc_node_unlock(n_ptr);
1219 } 1219 }
1220 read_unlock_bh(&net_lock); 1220 read_unlock_bh(&tipc_net_lock);
1221 res = msg_data_sz(buf_msg(buf)); 1221 res = msg_data_sz(buf_msg(buf));
1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1223 return res; 1223 return res;
@@ -1225,15 +1225,15 @@ int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1225 1225
1226 1226
1227/* 1227/*
1228 * link_send_sections_fast: Entry for messages where the 1228 * tipc_link_send_sections_fast: Entry for messages where the
1229 * destination processor is known and the header is complete, 1229 * destination processor is known and the header is complete,
1230 * except for total message length. 1230 * except for total message length.
1231 * Returns user data length or errno. 1231 * Returns user data length or errno.
1232 */ 1232 */
1233int link_send_sections_fast(struct port *sender, 1233int tipc_link_send_sections_fast(struct port *sender,
1234 struct iovec const *msg_sect, 1234 struct iovec const *msg_sect,
1235 const u32 num_sect, 1235 const u32 num_sect,
1236 u32 destaddr) 1236 u32 destaddr)
1237{ 1237{
1238 struct tipc_msg *hdr = &sender->publ.phdr; 1238 struct tipc_msg *hdr = &sender->publ.phdr;
1239 struct link *l_ptr; 1239 struct link *l_ptr;
@@ -1253,10 +1253,10 @@ again:
1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt, 1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254 !sender->user_port, &buf); 1254 !sender->user_port, &buf);
1255 1255
1256 read_lock_bh(&net_lock); 1256 read_lock_bh(&tipc_net_lock);
1257 node = node_select(destaddr, selector); 1257 node = tipc_node_select(destaddr, selector);
1258 if (likely(node)) { 1258 if (likely(node)) {
1259 node_lock(node); 1259 tipc_node_lock(node);
1260 l_ptr = node->active_links[selector]; 1260 l_ptr = node->active_links[selector];
1261 if (likely(l_ptr)) { 1261 if (likely(l_ptr)) {
1262 if (likely(buf)) { 1262 if (likely(buf)) {
@@ -1265,8 +1265,8 @@ again:
1265 if (unlikely(res < 0)) 1265 if (unlikely(res < 0))
1266 buf_discard(buf); 1266 buf_discard(buf);
1267exit: 1267exit:
1268 node_unlock(node); 1268 tipc_node_unlock(node);
1269 read_unlock_bh(&net_lock); 1269 read_unlock_bh(&tipc_net_lock);
1270 return res; 1270 return res;
1271 } 1271 }
1272 1272
@@ -1290,8 +1290,8 @@ exit:
1290 */ 1290 */
1291 1291
1292 sender->max_pkt = link_max_pkt(l_ptr); 1292 sender->max_pkt = link_max_pkt(l_ptr);
1293 node_unlock(node); 1293 tipc_node_unlock(node);
1294 read_unlock_bh(&net_lock); 1294 read_unlock_bh(&tipc_net_lock);
1295 1295
1296 1296
1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1300,17 +1300,17 @@ exit:
1300 return link_send_sections_long(sender, msg_sect, 1300 return link_send_sections_long(sender, msg_sect,
1301 num_sect, destaddr); 1301 num_sect, destaddr);
1302 } 1302 }
1303 node_unlock(node); 1303 tipc_node_unlock(node);
1304 } 1304 }
1305 read_unlock_bh(&net_lock); 1305 read_unlock_bh(&tipc_net_lock);
1306 1306
1307 /* Couldn't find a link to the destination node */ 1307 /* Couldn't find a link to the destination node */
1308 1308
1309 if (buf) 1309 if (buf)
1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE); 1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1311 if (res >= 0) 1311 if (res >= 0)
1312 return port_reject_sections(sender, hdr, msg_sect, num_sect, 1312 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1313 TIPC_ERR_NO_NODE); 1313 TIPC_ERR_NO_NODE);
1314 return res; 1314 return res;
1315} 1315}
1316 1316
@@ -1444,17 +1444,17 @@ error:
1444 * Now we have a buffer chain. Select a link and check 1444 * Now we have a buffer chain. Select a link and check
1445 * that packet size is still OK 1445 * that packet size is still OK
1446 */ 1446 */
1447 node = node_select(destaddr, sender->publ.ref & 1); 1447 node = tipc_node_select(destaddr, sender->publ.ref & 1);
1448 if (likely(node)) { 1448 if (likely(node)) {
1449 node_lock(node); 1449 tipc_node_lock(node);
1450 l_ptr = node->active_links[sender->publ.ref & 1]; 1450 l_ptr = node->active_links[sender->publ.ref & 1];
1451 if (!l_ptr) { 1451 if (!l_ptr) {
1452 node_unlock(node); 1452 tipc_node_unlock(node);
1453 goto reject; 1453 goto reject;
1454 } 1454 }
1455 if (link_max_pkt(l_ptr) < max_pkt) { 1455 if (link_max_pkt(l_ptr) < max_pkt) {
1456 sender->max_pkt = link_max_pkt(l_ptr); 1456 sender->max_pkt = link_max_pkt(l_ptr);
1457 node_unlock(node); 1457 tipc_node_unlock(node);
1458 for (; buf_chain; buf_chain = buf) { 1458 for (; buf_chain; buf_chain = buf) {
1459 buf = buf_chain->next; 1459 buf = buf_chain->next;
1460 buf_discard(buf_chain); 1460 buf_discard(buf_chain);
@@ -1467,8 +1467,8 @@ reject:
1467 buf = buf_chain->next; 1467 buf = buf_chain->next;
1468 buf_discard(buf_chain); 1468 buf_discard(buf_chain);
1469 } 1469 }
1470 return port_reject_sections(sender, hdr, msg_sect, num_sect, 1470 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1471 TIPC_ERR_NO_NODE); 1471 TIPC_ERR_NO_NODE);
1472 } 1472 }
1473 1473
1474 /* Append whole chain to send queue: */ 1474 /* Append whole chain to send queue: */
@@ -1491,15 +1491,15 @@ reject:
1491 1491
1492 /* Send it, if possible: */ 1492 /* Send it, if possible: */
1493 1493
1494 link_push_queue(l_ptr); 1494 tipc_link_push_queue(l_ptr);
1495 node_unlock(node); 1495 tipc_node_unlock(node);
1496 return dsz; 1496 return dsz;
1497} 1497}
1498 1498
1499/* 1499/*
1500 * link_push_packet: Push one unsent packet to the media 1500 * tipc_link_push_packet: Push one unsent packet to the media
1501 */ 1501 */
1502u32 link_push_packet(struct link *l_ptr) 1502u32 tipc_link_push_packet(struct link *l_ptr)
1503{ 1503{
1504 struct sk_buff *buf = l_ptr->first_out; 1504 struct sk_buff *buf = l_ptr->first_out;
1505 u32 r_q_size = l_ptr->retransm_queue_size; 1505 u32 r_q_size = l_ptr->retransm_queue_size;
@@ -1526,7 +1526,7 @@ u32 link_push_packet(struct link *l_ptr)
1526 if (r_q_size && buf && !skb_cloned(buf)) { 1526 if (r_q_size && buf && !skb_cloned(buf)) {
1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1529 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1529 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530 msg_dbg(buf_msg(buf), ">DEF-RETR>"); 1530 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531 l_ptr->retransm_queue_head = mod(++r_q_head); 1531 l_ptr->retransm_queue_head = mod(++r_q_head);
1532 l_ptr->retransm_queue_size = --r_q_size; 1532 l_ptr->retransm_queue_size = --r_q_size;
@@ -1545,7 +1545,7 @@ u32 link_push_packet(struct link *l_ptr)
1545 if (buf) { 1545 if (buf) {
1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in); 1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1548 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1548 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549 msg_dbg(buf_msg(buf), ">DEF-PROT>"); 1549 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550 l_ptr->unacked_window = 0; 1550 l_ptr->unacked_window = 0;
1551 buf_discard(buf); 1551 buf_discard(buf);
@@ -1569,7 +1569,7 @@ u32 link_push_packet(struct link *l_ptr)
1569 if (mod(next - first) < l_ptr->queue_limit[0]) { 1569 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1572 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1572 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573 if (msg_user(msg) == MSG_BUNDLER) 1573 if (msg_user(msg) == MSG_BUNDLER)
1574 msg_set_type(msg, CLOSED_MSG); 1574 msg_set_type(msg, CLOSED_MSG);
1575 msg_dbg(msg, ">PUSH-DATA>"); 1575 msg_dbg(msg, ">PUSH-DATA>");
@@ -1589,29 +1589,29 @@ u32 link_push_packet(struct link *l_ptr)
1589 * push_queue(): push out the unsent messages of a link where 1589 * push_queue(): push out the unsent messages of a link where
1590 * congestion has abated. Node is locked 1590 * congestion has abated. Node is locked
1591 */ 1591 */
1592void link_push_queue(struct link *l_ptr) 1592void tipc_link_push_queue(struct link *l_ptr)
1593{ 1593{
1594 u32 res; 1594 u32 res;
1595 1595
1596 if (bearer_congested(l_ptr->b_ptr, l_ptr)) 1596 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1597 return; 1597 return;
1598 1598
1599 do { 1599 do {
1600 res = link_push_packet(l_ptr); 1600 res = tipc_link_push_packet(l_ptr);
1601 } 1601 }
1602 while (res == TIPC_OK); 1602 while (res == TIPC_OK);
1603 if (res == PUSH_FAILED) 1603 if (res == PUSH_FAILED)
1604 bearer_schedule(l_ptr->b_ptr, l_ptr); 1604 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1605} 1605}
1606 1606
1607void link_retransmit(struct link *l_ptr, struct sk_buff *buf, 1607void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits) 1608 u32 retransmits)
1609{ 1609{
1610 struct tipc_msg *msg; 1610 struct tipc_msg *msg;
1611 1611
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr); 1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613 1613
1614 if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) { 1614 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>"); 1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616 dbg_print_link(l_ptr, " "); 1616 dbg_print_link(l_ptr, " ");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
@@ -1622,15 +1622,15 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1622 msg = buf_msg(buf); 1622 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1625 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */ 1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) { 1628 if (++l_ptr->stale_count > 100) {
1629 msg_print(CONS, buf_msg(buf), ">RETR>"); 1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n", 1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count); 1631 l_ptr->stale_count);
1632 link_print(l_ptr, CONS, "Resetting Link\n");; 1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");;
1633 link_reset(l_ptr); 1633 tipc_link_reset(l_ptr);
1634 break; 1634 break;
1635 } 1635 }
1636 } else { 1636 } else {
@@ -1643,7 +1643,7 @@ void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1643 retransmits--; 1643 retransmits--;
1644 l_ptr->stats.retransmitted++; 1644 l_ptr->stats.retransmitted++;
1645 } else { 1645 } else {
1646 bearer_schedule(l_ptr->b_ptr, l_ptr); 1646 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1647 l_ptr->stats.bearer_congs++; 1647 l_ptr->stats.bearer_congs++;
1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf)); 1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649 l_ptr->retransm_queue_size = retransmits; 1649 l_ptr->retransm_queue_size = retransmits;
@@ -1663,9 +1663,9 @@ static void link_recv_non_seq(struct sk_buff *buf)
1663 struct tipc_msg *msg = buf_msg(buf); 1663 struct tipc_msg *msg = buf_msg(buf);
1664 1664
1665 if (msg_user(msg) == LINK_CONFIG) 1665 if (msg_user(msg) == LINK_CONFIG)
1666 disc_recv_msg(buf); 1666 tipc_disc_recv_msg(buf);
1667 else 1667 else
1668 bclink_recv_pkt(buf); 1668 tipc_bclink_recv_pkt(buf);
1669} 1669}
1670 1670
1671/** 1671/**
@@ -1692,7 +1692,7 @@ static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1692 1692
1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr) 1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1694{ 1694{
1695 read_lock_bh(&net_lock); 1695 read_lock_bh(&tipc_net_lock);
1696 while (head) { 1696 while (head) {
1697 struct bearer *b_ptr; 1697 struct bearer *b_ptr;
1698 struct node *n_ptr; 1698 struct node *n_ptr;
@@ -1720,22 +1720,22 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1720 link_recv_non_seq(buf); 1720 link_recv_non_seq(buf);
1721 continue; 1721 continue;
1722 } 1722 }
1723 n_ptr = node_find(msg_prevnode(msg)); 1723 n_ptr = tipc_node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr)) 1724 if (unlikely(!n_ptr))
1725 goto cont; 1725 goto cont;
1726 1726
1727 node_lock(n_ptr); 1727 tipc_node_lock(n_ptr);
1728 l_ptr = n_ptr->links[b_ptr->identity]; 1728 l_ptr = n_ptr->links[b_ptr->identity];
1729 if (unlikely(!l_ptr)) { 1729 if (unlikely(!l_ptr)) {
1730 node_unlock(n_ptr); 1730 tipc_node_unlock(n_ptr);
1731 goto cont; 1731 goto cont;
1732 } 1732 }
1733 /* 1733 /*
1734 * Release acked messages 1734 * Release acked messages
1735 */ 1735 */
1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) { 1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737 if (node_is_up(n_ptr) && n_ptr->bclink.supported) 1737 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1738 bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1738 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739 } 1739 }
1740 1740
1741 crs = l_ptr->first_out; 1741 crs = l_ptr->first_out;
@@ -1752,12 +1752,12 @@ void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1752 l_ptr->out_queue_size -= released; 1752 l_ptr->out_queue_size -= released;
1753 } 1753 }
1754 if (unlikely(l_ptr->next_out)) 1754 if (unlikely(l_ptr->next_out))
1755 link_push_queue(l_ptr); 1755 tipc_link_push_queue(l_ptr);
1756 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1756 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757 link_wakeup_ports(l_ptr, 0); 1757 tipc_link_wakeup_ports(l_ptr, 0);
1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759 l_ptr->stats.sent_acks++; 1759 l_ptr->stats.sent_acks++;
1760 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1760 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1761 } 1761 }
1762 1762
1763protocol_check: 1763protocol_check:
@@ -1770,8 +1770,8 @@ protocol_check:
1770 if (likely(msg_is_dest(msg, tipc_own_addr))) { 1770 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1771deliver: 1771deliver:
1772 if (likely(msg_isdata(msg))) { 1772 if (likely(msg_isdata(msg))) {
1773 node_unlock(n_ptr); 1773 tipc_node_unlock(n_ptr);
1774 port_recv_msg(buf); 1774 tipc_port_recv_msg(buf);
1775 continue; 1775 continue;
1776 } 1776 }
1777 switch (msg_user(msg)) { 1777 switch (msg_user(msg)) {
@@ -1779,34 +1779,32 @@ deliver:
1779 l_ptr->stats.recv_bundles++; 1779 l_ptr->stats.recv_bundles++;
1780 l_ptr->stats.recv_bundled += 1780 l_ptr->stats.recv_bundled +=
1781 msg_msgcnt(msg); 1781 msg_msgcnt(msg);
1782 node_unlock(n_ptr); 1782 tipc_node_unlock(n_ptr);
1783 link_recv_bundle(buf); 1783 tipc_link_recv_bundle(buf);
1784 continue; 1784 continue;
1785 case ROUTE_DISTRIBUTOR: 1785 case ROUTE_DISTRIBUTOR:
1786 node_unlock(n_ptr); 1786 tipc_node_unlock(n_ptr);
1787 cluster_recv_routing_table(buf); 1787 tipc_cltr_recv_routing_table(buf);
1788 continue; 1788 continue;
1789 case NAME_DISTRIBUTOR: 1789 case NAME_DISTRIBUTOR:
1790 node_unlock(n_ptr); 1790 tipc_node_unlock(n_ptr);
1791 named_recv(buf); 1791 tipc_named_recv(buf);
1792 continue; 1792 continue;
1793 case CONN_MANAGER: 1793 case CONN_MANAGER:
1794 node_unlock(n_ptr); 1794 tipc_node_unlock(n_ptr);
1795 port_recv_proto_msg(buf); 1795 tipc_port_recv_proto_msg(buf);
1796 continue; 1796 continue;
1797 case MSG_FRAGMENTER: 1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++; 1798 l_ptr->stats.recv_fragments++;
1799 if (link_recv_fragment( 1799 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1800 &l_ptr->defragm_buf, 1800 &buf, &msg)) {
1801 &buf, &msg)) {
1802 l_ptr->stats.recv_fragmented++; 1801 l_ptr->stats.recv_fragmented++;
1803 goto deliver; 1802 goto deliver;
1804 } 1803 }
1805 break; 1804 break;
1806 case CHANGEOVER_PROTOCOL: 1805 case CHANGEOVER_PROTOCOL:
1807 type = msg_type(msg); 1806 type = msg_type(msg);
1808 if (link_recv_changeover_msg( 1807 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1809 &l_ptr, &buf)) {
1810 msg = buf_msg(buf); 1808 msg = buf_msg(buf);
1811 seq_no = msg_seqno(msg); 1809 seq_no = msg_seqno(msg);
1812 TIPC_SKB_CB(buf)->handle 1810 TIPC_SKB_CB(buf)->handle
@@ -1818,20 +1816,20 @@ deliver:
1818 break; 1816 break;
1819 } 1817 }
1820 } 1818 }
1821 node_unlock(n_ptr); 1819 tipc_node_unlock(n_ptr);
1822 net_route_msg(buf); 1820 tipc_net_route_msg(buf);
1823 continue; 1821 continue;
1824 } 1822 }
1825 link_handle_out_of_seq_msg(l_ptr, buf); 1823 link_handle_out_of_seq_msg(l_ptr, buf);
1826 head = link_insert_deferred_queue(l_ptr, head); 1824 head = link_insert_deferred_queue(l_ptr, head);
1827 node_unlock(n_ptr); 1825 tipc_node_unlock(n_ptr);
1828 continue; 1826 continue;
1829 } 1827 }
1830 1828
1831 if (msg_user(msg) == LINK_PROTOCOL) { 1829 if (msg_user(msg) == LINK_PROTOCOL) {
1832 link_recv_proto_msg(l_ptr, buf); 1830 link_recv_proto_msg(l_ptr, buf);
1833 head = link_insert_deferred_queue(l_ptr, head); 1831 head = link_insert_deferred_queue(l_ptr, head);
1834 node_unlock(n_ptr); 1832 tipc_node_unlock(n_ptr);
1835 continue; 1833 continue;
1836 } 1834 }
1837 msg_dbg(msg,"NSEQ<REC<"); 1835 msg_dbg(msg,"NSEQ<REC<");
@@ -1842,14 +1840,14 @@ deliver:
1842 msg_dbg(msg,"RECV-REINS:"); 1840 msg_dbg(msg,"RECV-REINS:");
1843 buf->next = head; 1841 buf->next = head;
1844 head = buf; 1842 head = buf;
1845 node_unlock(n_ptr); 1843 tipc_node_unlock(n_ptr);
1846 continue; 1844 continue;
1847 } 1845 }
1848 node_unlock(n_ptr); 1846 tipc_node_unlock(n_ptr);
1849cont: 1847cont:
1850 buf_discard(buf); 1848 buf_discard(buf);
1851 } 1849 }
1852 read_unlock_bh(&net_lock); 1850 read_unlock_bh(&tipc_net_lock);
1853} 1851}
1854 1852
1855/* 1853/*
@@ -1858,9 +1856,9 @@ cont:
1858 * Returns the increase of the queue length,i.e. 0 or 1 1856 * Returns the increase of the queue length,i.e. 0 or 1
1859 */ 1857 */
1860 1858
1861u32 link_defer_pkt(struct sk_buff **head, 1859u32 tipc_link_defer_pkt(struct sk_buff **head,
1862 struct sk_buff **tail, 1860 struct sk_buff **tail,
1863 struct sk_buff *buf) 1861 struct sk_buff *buf)
1864{ 1862{
1865 struct sk_buff *prev = 0; 1863 struct sk_buff *prev = 0;
1866 struct sk_buff *crs = *head; 1864 struct sk_buff *crs = *head;
@@ -1939,12 +1937,12 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
1939 return; 1937 return;
1940 } 1938 }
1941 1939
1942 if (link_defer_pkt(&l_ptr->oldest_deferred_in, 1940 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1943 &l_ptr->newest_deferred_in, buf)) { 1941 &l_ptr->newest_deferred_in, buf)) {
1944 l_ptr->deferred_inqueue_sz++; 1942 l_ptr->deferred_inqueue_sz++;
1945 l_ptr->stats.deferred_recv++; 1943 l_ptr->stats.deferred_recv++;
1946 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1944 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1947 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1945 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1948 } else 1946 } else
1949 l_ptr->stats.duplicates++; 1947 l_ptr->stats.duplicates++;
1950} 1948}
@@ -1952,8 +1950,8 @@ static void link_handle_out_of_seq_msg(struct link *l_ptr,
1952/* 1950/*
1953 * Send protocol message to the other endpoint. 1951 * Send protocol message to the other endpoint.
1954 */ 1952 */
1955void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg, 1953void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1956 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) 1954 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1957{ 1955{
1958 struct sk_buff *buf = 0; 1956 struct sk_buff *buf = 0;
1959 struct tipc_msg *msg = l_ptr->pmsg; 1957 struct tipc_msg *msg = l_ptr->pmsg;
@@ -1964,12 +1962,12 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1964 msg_set_type(msg, msg_typ); 1962 msg_set_type(msg, msg_typ);
1965 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1963 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1966 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in)); 1964 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1967 msg_set_last_bcast(msg, bclink_get_last_sent()); 1965 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1968 1966
1969 if (msg_typ == STATE_MSG) { 1967 if (msg_typ == STATE_MSG) {
1970 u32 next_sent = mod(l_ptr->next_out_no); 1968 u32 next_sent = mod(l_ptr->next_out_no);
1971 1969
1972 if (!link_is_up(l_ptr)) 1970 if (!tipc_link_is_up(l_ptr))
1973 return; 1971 return;
1974 if (l_ptr->next_out) 1972 if (l_ptr->next_out)
1975 next_sent = msg_seqno(buf_msg(l_ptr->next_out)); 1973 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
@@ -2013,7 +2011,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2013 msg_set_max_pkt(msg, l_ptr->max_pkt_target); 2011 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2014 } 2012 }
2015 2013
2016 if (node_has_redundant_links(l_ptr->owner)) { 2014 if (tipc_node_has_redundant_links(l_ptr->owner)) {
2017 msg_set_redundant_link(msg); 2015 msg_set_redundant_link(msg);
2018 } else { 2016 } else {
2019 msg_clear_redundant_link(msg); 2017 msg_clear_redundant_link(msg);
@@ -2026,7 +2024,7 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2026 2024
2027 /* Congestion? */ 2025 /* Congestion? */
2028 2026
2029 if (bearer_congested(l_ptr->b_ptr, l_ptr)) { 2027 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2030 if (!l_ptr->proto_msg_queue) { 2028 if (!l_ptr->proto_msg_queue) {
2031 l_ptr->proto_msg_queue = 2029 l_ptr->proto_msg_queue =
2032 buf_acquire(sizeof(l_ptr->proto_msg)); 2030 buf_acquire(sizeof(l_ptr->proto_msg));
@@ -2050,14 +2048,14 @@ void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
2050 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg)); 2048 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2051 msg_set_size(buf_msg(buf), msg_size); 2049 msg_set_size(buf_msg(buf), msg_size);
2052 2050
2053 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 2051 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2054 l_ptr->unacked_window = 0; 2052 l_ptr->unacked_window = 0;
2055 buf_discard(buf); 2053 buf_discard(buf);
2056 return; 2054 return;
2057 } 2055 }
2058 2056
2059 /* New congestion */ 2057 /* New congestion */
2060 bearer_schedule(l_ptr->b_ptr, l_ptr); 2058 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2061 l_ptr->proto_msg_queue = buf; 2059 l_ptr->proto_msg_queue = buf;
2062 l_ptr->stats.bearer_congs++; 2060 l_ptr->stats.bearer_congs++;
2063} 2061}
@@ -2131,7 +2129,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2131 l_ptr->peer_bearer_id = msg_bearer_id(msg); 2129 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2132 2130
2133 /* Synchronize broadcast sequence numbers */ 2131 /* Synchronize broadcast sequence numbers */
2134 if (!node_has_redundant_links(l_ptr->owner)) { 2132 if (!tipc_node_has_redundant_links(l_ptr->owner)) {
2135 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg)); 2133 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2136 } 2134 }
2137 break; 2135 break;
@@ -2145,7 +2143,7 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2145 warn("Changing prio <%s>: %u->%u\n", 2143 warn("Changing prio <%s>: %u->%u\n",
2146 l_ptr->name, l_ptr->priority, msg_linkprio(msg)); 2144 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2147 l_ptr->priority = msg_linkprio(msg); 2145 l_ptr->priority = msg_linkprio(msg);
2148 link_reset(l_ptr); /* Enforce change to take effect */ 2146 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2149 break; 2147 break;
2150 } 2148 }
2151 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 2149 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
@@ -2176,17 +2174,17 @@ static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2176 2174
2177 /* Protocol message before retransmits, reduce loss risk */ 2175 /* Protocol message before retransmits, reduce loss risk */
2178 2176
2179 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg)); 2177 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2180 2178
2181 if (rec_gap || (msg_probe(msg))) { 2179 if (rec_gap || (msg_probe(msg))) {
2182 link_send_proto_msg(l_ptr, STATE_MSG, 2180 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2183 0, rec_gap, 0, 0, max_pkt_ack); 2181 0, rec_gap, 0, 0, max_pkt_ack);
2184 } 2182 }
2185 if (msg_seq_gap(msg)) { 2183 if (msg_seq_gap(msg)) {
2186 msg_dbg(msg, "With Gap:"); 2184 msg_dbg(msg, "With Gap:");
2187 l_ptr->stats.recv_nacks++; 2185 l_ptr->stats.recv_nacks++;
2188 link_retransmit(l_ptr, l_ptr->first_out, 2186 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2189 msg_seq_gap(msg)); 2187 msg_seq_gap(msg));
2190 } 2188 }
2191 break; 2189 break;
2192 default: 2190 default:
@@ -2198,20 +2196,20 @@ exit:
2198 2196
2199 2197
2200/* 2198/*
2201 * link_tunnel(): Send one message via a link belonging to 2199 * tipc_link_tunnel(): Send one message via a link belonging to
2202 * another bearer. Owner node is locked. 2200 * another bearer. Owner node is locked.
2203 */ 2201 */
2204void link_tunnel(struct link *l_ptr, 2202void tipc_link_tunnel(struct link *l_ptr,
2205 struct tipc_msg *tunnel_hdr, 2203 struct tipc_msg *tunnel_hdr,
2206 struct tipc_msg *msg, 2204 struct tipc_msg *msg,
2207 u32 selector) 2205 u32 selector)
2208{ 2206{
2209 struct link *tunnel; 2207 struct link *tunnel;
2210 struct sk_buff *buf; 2208 struct sk_buff *buf;
2211 u32 length = msg_size(msg); 2209 u32 length = msg_size(msg);
2212 2210
2213 tunnel = l_ptr->owner->active_links[selector & 1]; 2211 tunnel = l_ptr->owner->active_links[selector & 1];
2214 if (!link_is_up(tunnel)) 2212 if (!tipc_link_is_up(tunnel))
2215 return; 2213 return;
2216 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 2214 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2217 buf = buf_acquire(length + INT_H_SIZE); 2215 buf = buf_acquire(length + INT_H_SIZE);
@@ -2222,7 +2220,7 @@ void link_tunnel(struct link *l_ptr,
2222 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane); 2220 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2223 msg_dbg(buf_msg(buf), ">SEND>"); 2221 msg_dbg(buf_msg(buf), ">SEND>");
2224 assert(tunnel); 2222 assert(tunnel);
2225 link_send_buf(tunnel, buf); 2223 tipc_link_send_buf(tunnel, buf);
2226} 2224}
2227 2225
2228 2226
@@ -2232,12 +2230,12 @@ void link_tunnel(struct link *l_ptr,
2232 * Owner node is locked. 2230 * Owner node is locked.
2233 */ 2231 */
2234 2232
2235void link_changeover(struct link *l_ptr) 2233void tipc_link_changeover(struct link *l_ptr)
2236{ 2234{
2237 u32 msgcount = l_ptr->out_queue_size; 2235 u32 msgcount = l_ptr->out_queue_size;
2238 struct sk_buff *crs = l_ptr->first_out; 2236 struct sk_buff *crs = l_ptr->first_out;
2239 struct link *tunnel = l_ptr->owner->active_links[0]; 2237 struct link *tunnel = l_ptr->owner->active_links[0];
2240 int split_bundles = node_has_redundant_links(l_ptr->owner); 2238 int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
2241 struct tipc_msg tunnel_hdr; 2239 struct tipc_msg tunnel_hdr;
2242 2240
2243 if (!tunnel) 2241 if (!tunnel)
@@ -2261,7 +2259,7 @@ void link_changeover(struct link *l_ptr)
2261 dbg("%c->%c:", l_ptr->b_ptr->net_plane, 2259 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2262 tunnel->b_ptr->net_plane); 2260 tunnel->b_ptr->net_plane);
2263 msg_dbg(&tunnel_hdr, "EMPTY>SEND>"); 2261 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2264 link_send_buf(tunnel, buf); 2262 tipc_link_send_buf(tunnel, buf);
2265 } else { 2263 } else {
2266 warn("Memory squeeze; link changeover failed\n"); 2264 warn("Memory squeeze; link changeover failed\n");
2267 } 2265 }
@@ -2277,20 +2275,20 @@ void link_changeover(struct link *l_ptr)
2277 2275
2278 while (msgcount--) { 2276 while (msgcount--) {
2279 msg_set_seqno(m,msg_seqno(msg)); 2277 msg_set_seqno(m,msg_seqno(msg));
2280 link_tunnel(l_ptr, &tunnel_hdr, m, 2278 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2281 msg_link_selector(m)); 2279 msg_link_selector(m));
2282 pos += align(msg_size(m)); 2280 pos += align(msg_size(m));
2283 m = (struct tipc_msg *)pos; 2281 m = (struct tipc_msg *)pos;
2284 } 2282 }
2285 } else { 2283 } else {
2286 link_tunnel(l_ptr, &tunnel_hdr, msg, 2284 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2287 msg_link_selector(msg)); 2285 msg_link_selector(msg));
2288 } 2286 }
2289 crs = crs->next; 2287 crs = crs->next;
2290 } 2288 }
2291} 2289}
2292 2290
2293void link_send_duplicate(struct link *l_ptr, struct link *tunnel) 2291void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2294{ 2292{
2295 struct sk_buff *iter; 2293 struct sk_buff *iter;
2296 struct tipc_msg tunnel_hdr; 2294 struct tipc_msg tunnel_hdr;
@@ -2320,8 +2318,8 @@ void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2320 dbg("%c->%c:", l_ptr->b_ptr->net_plane, 2318 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2321 tunnel->b_ptr->net_plane); 2319 tunnel->b_ptr->net_plane);
2322 msg_dbg(buf_msg(outbuf), ">SEND>"); 2320 msg_dbg(buf_msg(outbuf), ">SEND>");
2323 link_send_buf(tunnel, outbuf); 2321 tipc_link_send_buf(tunnel, outbuf);
2324 if (!link_is_up(l_ptr)) 2322 if (!tipc_link_is_up(l_ptr))
2325 return; 2323 return;
2326 iter = iter->next; 2324 iter = iter->next;
2327 } 2325 }
@@ -2393,9 +2391,9 @@ static int link_recv_changeover_msg(struct link **l_ptr,
2393 2391
2394 /* First original message ?: */ 2392 /* First original message ?: */
2395 2393
2396 if (link_is_up(dest_link)) { 2394 if (tipc_link_is_up(dest_link)) {
2397 msg_dbg(tunnel_msg, "UP/FIRST/<REC<"); 2395 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2398 link_reset(dest_link); 2396 tipc_link_reset(dest_link);
2399 dest_link->exp_msg_count = msg_count; 2397 dest_link->exp_msg_count = msg_count;
2400 if (!msg_count) 2398 if (!msg_count)
2401 goto exit; 2399 goto exit;
@@ -2436,7 +2434,7 @@ exit:
2436/* 2434/*
2437 * Bundler functionality: 2435 * Bundler functionality:
2438 */ 2436 */
2439void link_recv_bundle(struct sk_buff *buf) 2437void tipc_link_recv_bundle(struct sk_buff *buf)
2440{ 2438{
2441 u32 msgcount = msg_msgcnt(buf_msg(buf)); 2439 u32 msgcount = msg_msgcnt(buf_msg(buf));
2442 u32 pos = INT_H_SIZE; 2440 u32 pos = INT_H_SIZE;
@@ -2456,7 +2454,7 @@ void link_recv_bundle(struct sk_buff *buf)
2456 }; 2454 };
2457 pos += align(msg_size(buf_msg(obuf))); 2455 pos += align(msg_size(buf_msg(obuf)));
2458 msg_dbg(buf_msg(obuf), " /"); 2456 msg_dbg(buf_msg(obuf), " /");
2459 net_route_msg(obuf); 2457 tipc_net_route_msg(obuf);
2460 } 2458 }
2461 buf_discard(buf); 2459 buf_discard(buf);
2462} 2460}
@@ -2467,11 +2465,11 @@ void link_recv_bundle(struct sk_buff *buf)
2467 2465
2468 2466
2469/* 2467/*
2470 * link_send_long_buf: Entry for buffers needing fragmentation. 2468 * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
2471 * The buffer is complete, inclusive total message length. 2469 * The buffer is complete, inclusive total message length.
2472 * Returns user data length. 2470 * Returns user data length.
2473 */ 2471 */
2474int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf) 2472int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2475{ 2473{
2476 struct tipc_msg *inmsg = buf_msg(buf); 2474 struct tipc_msg *inmsg = buf_msg(buf);
2477 struct tipc_msg fragm_hdr; 2475 struct tipc_msg fragm_hdr;
@@ -2521,8 +2519,8 @@ int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2521 /* Send queued messages first, if any: */ 2519 /* Send queued messages first, if any: */
2522 2520
2523 l_ptr->stats.sent_fragments++; 2521 l_ptr->stats.sent_fragments++;
2524 link_send_buf(l_ptr, fragm); 2522 tipc_link_send_buf(l_ptr, fragm);
2525 if (!link_is_up(l_ptr)) 2523 if (!tipc_link_is_up(l_ptr))
2526 return dsz; 2524 return dsz;
2527 msg_set_fragm_no(&fragm_hdr, ++fragm_no); 2525 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2528 rest -= fragm_sz; 2526 rest -= fragm_sz;
@@ -2582,11 +2580,11 @@ static inline void incr_timer_cnt(struct sk_buff *buf)
2582} 2580}
2583 2581
2584/* 2582/*
2585 * link_recv_fragment(): Called with node lock on. Returns 2583 * tipc_link_recv_fragment(): Called with node lock on. Returns
2586 * the reassembled buffer if message is complete. 2584 * the reassembled buffer if message is complete.
2587 */ 2585 */
2588int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb, 2586int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2589 struct tipc_msg **m) 2587 struct tipc_msg **m)
2590{ 2588{
2591 struct sk_buff *prev = 0; 2589 struct sk_buff *prev = 0;
2592 struct sk_buff *fbuf = *fb; 2590 struct sk_buff *fbuf = *fb;
@@ -2714,7 +2712,7 @@ static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2714} 2712}
2715 2713
2716 2714
2717void link_set_queue_limits(struct link *l_ptr, u32 window) 2715void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2718{ 2716{
2719 /* Data messages from this node, inclusive FIRST_FRAGM */ 2717 /* Data messages from this node, inclusive FIRST_FRAGM */
2720 l_ptr->queue_limit[DATA_LOW] = window; 2718 l_ptr->queue_limit[DATA_LOW] = window;
@@ -2739,7 +2737,7 @@ void link_set_queue_limits(struct link *l_ptr, u32 window)
2739 * @name - ptr to link name string 2737 * @name - ptr to link name string
2740 * @node - ptr to area to be filled with ptr to associated node 2738 * @node - ptr to area to be filled with ptr to associated node
2741 * 2739 *
2742 * Caller must hold 'net_lock' to ensure node and bearer are not deleted; 2740 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2743 * this also prevents link deletion. 2741 * this also prevents link deletion.
2744 * 2742 *
2745 * Returns pointer to link (or 0 if invalid link name). 2743 * Returns pointer to link (or 0 if invalid link name).
@@ -2754,11 +2752,11 @@ static struct link *link_find_link(const char *name, struct node **node)
2754 if (!link_name_validate(name, &link_name_parts)) 2752 if (!link_name_validate(name, &link_name_parts))
2755 return 0; 2753 return 0;
2756 2754
2757 b_ptr = bearer_find_interface(link_name_parts.if_local); 2755 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2758 if (!b_ptr) 2756 if (!b_ptr)
2759 return 0; 2757 return 0;
2760 2758
2761 *node = node_find(link_name_parts.addr_peer); 2759 *node = tipc_node_find(link_name_parts.addr_peer);
2762 if (!*node) 2760 if (!*node)
2763 return 0; 2761 return 0;
2764 2762
@@ -2769,8 +2767,8 @@ static struct link *link_find_link(const char *name, struct node **node)
2769 return l_ptr; 2767 return l_ptr;
2770} 2768}
2771 2769
2772struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, 2770struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2773 u16 cmd) 2771 u16 cmd)
2774{ 2772{
2775 struct tipc_link_config *args; 2773 struct tipc_link_config *args;
2776 u32 new_value; 2774 u32 new_value;
@@ -2779,35 +2777,35 @@ struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2779 int res; 2777 int res;
2780 2778
2781 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG)) 2779 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2780 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2783 2781
2784 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area); 2782 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785 new_value = ntohl(args->value); 2783 new_value = ntohl(args->value);
2786 2784
2787 if (!strcmp(args->name, bc_link_name)) { 2785 if (!strcmp(args->name, tipc_bclink_name)) {
2788 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) && 2786 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789 (bclink_set_queue_limits(new_value) == 0)) 2787 (tipc_bclink_set_queue_limits(new_value) == 0))
2790 return cfg_reply_none(); 2788 return tipc_cfg_reply_none();
2791 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 2789 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792 " (cannot change setting on broadcast link)"); 2790 " (cannot change setting on broadcast link)");
2793 } 2791 }
2794 2792
2795 read_lock_bh(&net_lock); 2793 read_lock_bh(&tipc_net_lock);
2796 l_ptr = link_find_link(args->name, &node); 2794 l_ptr = link_find_link(args->name, &node);
2797 if (!l_ptr) { 2795 if (!l_ptr) {
2798 read_unlock_bh(&net_lock); 2796 read_unlock_bh(&tipc_net_lock);
2799 return cfg_reply_error_string("link not found"); 2797 return tipc_cfg_reply_error_string("link not found");
2800 } 2798 }
2801 2799
2802 node_lock(node); 2800 tipc_node_lock(node);
2803 res = -EINVAL; 2801 res = -EINVAL;
2804 switch (cmd) { 2802 switch (cmd) {
2805 case TIPC_CMD_SET_LINK_TOL: 2803 case TIPC_CMD_SET_LINK_TOL:
2806 if ((new_value >= TIPC_MIN_LINK_TOL) && 2804 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2807 (new_value <= TIPC_MAX_LINK_TOL)) { 2805 (new_value <= TIPC_MAX_LINK_TOL)) {
2808 link_set_supervision_props(l_ptr, new_value); 2806 link_set_supervision_props(l_ptr, new_value);
2809 link_send_proto_msg(l_ptr, STATE_MSG, 2807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2810 0, 0, new_value, 0, 0); 2808 0, 0, new_value, 0, 0);
2811 res = TIPC_OK; 2809 res = TIPC_OK;
2812 } 2810 }
2813 break; 2811 break;
@@ -2815,26 +2813,26 @@ struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2815 if ((new_value >= TIPC_MIN_LINK_PRI) && 2813 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2816 (new_value <= TIPC_MAX_LINK_PRI)) { 2814 (new_value <= TIPC_MAX_LINK_PRI)) {
2817 l_ptr->priority = new_value; 2815 l_ptr->priority = new_value;
2818 link_send_proto_msg(l_ptr, STATE_MSG, 2816 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2819 0, 0, 0, new_value, 0); 2817 0, 0, 0, new_value, 0);
2820 res = TIPC_OK; 2818 res = TIPC_OK;
2821 } 2819 }
2822 break; 2820 break;
2823 case TIPC_CMD_SET_LINK_WINDOW: 2821 case TIPC_CMD_SET_LINK_WINDOW:
2824 if ((new_value >= TIPC_MIN_LINK_WIN) && 2822 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2825 (new_value <= TIPC_MAX_LINK_WIN)) { 2823 (new_value <= TIPC_MAX_LINK_WIN)) {
2826 link_set_queue_limits(l_ptr, new_value); 2824 tipc_link_set_queue_limits(l_ptr, new_value);
2827 res = TIPC_OK; 2825 res = TIPC_OK;
2828 } 2826 }
2829 break; 2827 break;
2830 } 2828 }
2831 node_unlock(node); 2829 tipc_node_unlock(node);
2832 2830
2833 read_unlock_bh(&net_lock); 2831 read_unlock_bh(&tipc_net_lock);
2834 if (res) 2832 if (res)
2835 return cfg_reply_error_string("cannot change link setting"); 2833 return tipc_cfg_reply_error_string("cannot change link setting");
2836 2834
2837 return cfg_reply_none(); 2835 return tipc_cfg_reply_none();
2838} 2836}
2839 2837
2840/** 2838/**
@@ -2849,34 +2847,34 @@ static void link_reset_statistics(struct link *l_ptr)
2849 l_ptr->stats.recv_info = l_ptr->next_in_no; 2847 l_ptr->stats.recv_info = l_ptr->next_in_no;
2850} 2848}
2851 2849
2852struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space) 2850struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2853{ 2851{
2854 char *link_name; 2852 char *link_name;
2855 struct link *l_ptr; 2853 struct link *l_ptr;
2856 struct node *node; 2854 struct node *node;
2857 2855
2858 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2856 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2859 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2857 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2860 2858
2861 link_name = (char *)TLV_DATA(req_tlv_area); 2859 link_name = (char *)TLV_DATA(req_tlv_area);
2862 if (!strcmp(link_name, bc_link_name)) { 2860 if (!strcmp(link_name, tipc_bclink_name)) {
2863 if (bclink_reset_stats()) 2861 if (tipc_bclink_reset_stats())
2864 return cfg_reply_error_string("link not found"); 2862 return tipc_cfg_reply_error_string("link not found");
2865 return cfg_reply_none(); 2863 return tipc_cfg_reply_none();
2866 } 2864 }
2867 2865
2868 read_lock_bh(&net_lock); 2866 read_lock_bh(&tipc_net_lock);
2869 l_ptr = link_find_link(link_name, &node); 2867 l_ptr = link_find_link(link_name, &node);
2870 if (!l_ptr) { 2868 if (!l_ptr) {
2871 read_unlock_bh(&net_lock); 2869 read_unlock_bh(&tipc_net_lock);
2872 return cfg_reply_error_string("link not found"); 2870 return tipc_cfg_reply_error_string("link not found");
2873 } 2871 }
2874 2872
2875 node_lock(node); 2873 tipc_node_lock(node);
2876 link_reset_statistics(l_ptr); 2874 link_reset_statistics(l_ptr);
2877 node_unlock(node); 2875 tipc_node_unlock(node);
2878 read_unlock_bh(&net_lock); 2876 read_unlock_bh(&tipc_net_lock);
2879 return cfg_reply_none(); 2877 return tipc_cfg_reply_none();
2880} 2878}
2881 2879
2882/** 2880/**
@@ -2889,7 +2887,7 @@ static u32 percent(u32 count, u32 total)
2889} 2887}
2890 2888
2891/** 2889/**
2892 * link_stats - print link statistics 2890 * tipc_link_stats - print link statistics
2893 * @name: link name 2891 * @name: link name
2894 * @buf: print buffer area 2892 * @buf: print buffer area
2895 * @buf_size: size of print buffer area 2893 * @buf_size: size of print buffer area
@@ -2897,7 +2895,7 @@ static u32 percent(u32 count, u32 total)
2897 * Returns length of print buffer data string (or 0 if error) 2895 * Returns length of print buffer data string (or 0 if error)
2898 */ 2896 */
2899 2897
2900static int link_stats(const char *name, char *buf, const u32 buf_size) 2898static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2901{ 2899{
2902 struct print_buf pb; 2900 struct print_buf pb;
2903 struct link *l_ptr; 2901 struct link *l_ptr;
@@ -2905,22 +2903,22 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
2905 char *status; 2903 char *status;
2906 u32 profile_total = 0; 2904 u32 profile_total = 0;
2907 2905
2908 if (!strcmp(name, bc_link_name)) 2906 if (!strcmp(name, tipc_bclink_name))
2909 return bclink_stats(buf, buf_size); 2907 return tipc_bclink_stats(buf, buf_size);
2910 2908
2911 printbuf_init(&pb, buf, buf_size); 2909 tipc_printbuf_init(&pb, buf, buf_size);
2912 2910
2913 read_lock_bh(&net_lock); 2911 read_lock_bh(&tipc_net_lock);
2914 l_ptr = link_find_link(name, &node); 2912 l_ptr = link_find_link(name, &node);
2915 if (!l_ptr) { 2913 if (!l_ptr) {
2916 read_unlock_bh(&net_lock); 2914 read_unlock_bh(&tipc_net_lock);
2917 return 0; 2915 return 0;
2918 } 2916 }
2919 node_lock(node); 2917 tipc_node_lock(node);
2920 2918
2921 if (link_is_active(l_ptr)) 2919 if (tipc_link_is_active(l_ptr))
2922 status = "ACTIVE"; 2920 status = "ACTIVE";
2923 else if (link_is_up(l_ptr)) 2921 else if (tipc_link_is_up(l_ptr))
2924 status = "STANDBY"; 2922 status = "STANDBY";
2925 else 2923 else
2926 status = "DEFUNCT"; 2924 status = "DEFUNCT";
@@ -2976,33 +2974,33 @@ static int link_stats(const char *name, char *buf, const u32 buf_size)
2976 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts) 2974 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2977 : 0); 2975 : 0);
2978 2976
2979 node_unlock(node); 2977 tipc_node_unlock(node);
2980 read_unlock_bh(&net_lock); 2978 read_unlock_bh(&tipc_net_lock);
2981 return printbuf_validate(&pb); 2979 return tipc_printbuf_validate(&pb);
2982} 2980}
2983 2981
2984#define MAX_LINK_STATS_INFO 2000 2982#define MAX_LINK_STATS_INFO 2000
2985 2983
2986struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space) 2984struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2987{ 2985{
2988 struct sk_buff *buf; 2986 struct sk_buff *buf;
2989 struct tlv_desc *rep_tlv; 2987 struct tlv_desc *rep_tlv;
2990 int str_len; 2988 int str_len;
2991 2989
2992 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME)) 2990 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2993 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 2991 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2994 2992
2995 buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO)); 2993 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2996 if (!buf) 2994 if (!buf)
2997 return NULL; 2995 return NULL;
2998 2996
2999 rep_tlv = (struct tlv_desc *)buf->data; 2997 rep_tlv = (struct tlv_desc *)buf->data;
3000 2998
3001 str_len = link_stats((char *)TLV_DATA(req_tlv_area), 2999 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3002 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO); 3000 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3003 if (!str_len) { 3001 if (!str_len) {
3004 buf_discard(buf); 3002 buf_discard(buf);
3005 return cfg_reply_error_string("link not found"); 3003 return tipc_cfg_reply_error_string("link not found");
3006 } 3004 }
3007 3005
3008 skb_put(buf, TLV_SPACE(str_len)); 3006 skb_put(buf, TLV_SPACE(str_len));
@@ -3021,20 +3019,20 @@ int link_control(const char *name, u32 op, u32 val)
3021 u32 a; 3019 u32 a;
3022 3020
3023 a = link_name2addr(name, &bearer_id); 3021 a = link_name2addr(name, &bearer_id);
3024 read_lock_bh(&net_lock); 3022 read_lock_bh(&tipc_net_lock);
3025 node = node_find(a); 3023 node = tipc_node_find(a);
3026 if (node) { 3024 if (node) {
3027 node_lock(node); 3025 tipc_node_lock(node);
3028 l_ptr = node->links[bearer_id]; 3026 l_ptr = node->links[bearer_id];
3029 if (l_ptr) { 3027 if (l_ptr) {
3030 if (op == TIPC_REMOVE_LINK) { 3028 if (op == TIPC_REMOVE_LINK) {
3031 struct bearer *b_ptr = l_ptr->b_ptr; 3029 struct bearer *b_ptr = l_ptr->b_ptr;
3032 spin_lock_bh(&b_ptr->publ.lock); 3030 spin_lock_bh(&b_ptr->publ.lock);
3033 link_delete(l_ptr); 3031 tipc_link_delete(l_ptr);
3034 spin_unlock_bh(&b_ptr->publ.lock); 3032 spin_unlock_bh(&b_ptr->publ.lock);
3035 } 3033 }
3036 if (op == TIPC_CMD_BLOCK_LINK) { 3034 if (op == TIPC_CMD_BLOCK_LINK) {
3037 link_reset(l_ptr); 3035 tipc_link_reset(l_ptr);
3038 l_ptr->blocked = 1; 3036 l_ptr->blocked = 1;
3039 } 3037 }
3040 if (op == TIPC_CMD_UNBLOCK_LINK) { 3038 if (op == TIPC_CMD_UNBLOCK_LINK) {
@@ -3042,22 +3040,22 @@ int link_control(const char *name, u32 op, u32 val)
3042 } 3040 }
3043 res = TIPC_OK; 3041 res = TIPC_OK;
3044 } 3042 }
3045 node_unlock(node); 3043 tipc_node_unlock(node);
3046 } 3044 }
3047 read_unlock_bh(&net_lock); 3045 read_unlock_bh(&tipc_net_lock);
3048 return res; 3046 return res;
3049} 3047}
3050#endif 3048#endif
3051 3049
3052/** 3050/**
3053 * link_get_max_pkt - get maximum packet size to use when sending to destination 3051 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3054 * @dest: network address of destination node 3052 * @dest: network address of destination node
3055 * @selector: used to select from set of active links 3053 * @selector: used to select from set of active links
3056 * 3054 *
3057 * If no active link can be found, uses default maximum packet size. 3055 * If no active link can be found, uses default maximum packet size.
3058 */ 3056 */
3059 3057
3060u32 link_get_max_pkt(u32 dest, u32 selector) 3058u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3061{ 3059{
3062 struct node *n_ptr; 3060 struct node *n_ptr;
3063 struct link *l_ptr; 3061 struct link *l_ptr;
@@ -3066,16 +3064,16 @@ u32 link_get_max_pkt(u32 dest, u32 selector)
3066 if (dest == tipc_own_addr) 3064 if (dest == tipc_own_addr)
3067 return MAX_MSG_SIZE; 3065 return MAX_MSG_SIZE;
3068 3066
3069 read_lock_bh(&net_lock); 3067 read_lock_bh(&tipc_net_lock);
3070 n_ptr = node_select(dest, selector); 3068 n_ptr = tipc_node_select(dest, selector);
3071 if (n_ptr) { 3069 if (n_ptr) {
3072 node_lock(n_ptr); 3070 tipc_node_lock(n_ptr);
3073 l_ptr = n_ptr->active_links[selector & 1]; 3071 l_ptr = n_ptr->active_links[selector & 1];
3074 if (l_ptr) 3072 if (l_ptr)
3075 res = link_max_pkt(l_ptr); 3073 res = link_max_pkt(l_ptr);
3076 node_unlock(n_ptr); 3074 tipc_node_unlock(n_ptr);
3077 } 3075 }
3078 read_unlock_bh(&net_lock); 3076 read_unlock_bh(&tipc_net_lock);
3079 return res; 3077 return res;
3080} 3078}
3081 3079
diff --git a/net/tipc/link.h b/net/tipc/link.h
index c2553f073757..2d3c157f707d 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -221,44 +221,43 @@ struct link {
221 221
222struct port; 222struct port;
223 223
224struct link *link_create(struct bearer *b_ptr, const u32 peer, 224struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
225 const struct tipc_media_addr *media_addr); 225 const struct tipc_media_addr *media_addr);
226void link_delete(struct link *l_ptr); 226void tipc_link_delete(struct link *l_ptr);
227void link_changeover(struct link *l_ptr); 227void tipc_link_changeover(struct link *l_ptr);
228void link_send_duplicate(struct link *l_ptr, struct link *dest); 228void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
229void link_reset_fragments(struct link *l_ptr); 229void tipc_link_reset_fragments(struct link *l_ptr);
230int link_is_up(struct link *l_ptr); 230int tipc_link_is_up(struct link *l_ptr);
231int link_is_active(struct link *l_ptr); 231int tipc_link_is_active(struct link *l_ptr);
232void link_start(struct link *l_ptr); 232void tipc_link_start(struct link *l_ptr);
233u32 link_push_packet(struct link *l_ptr); 233u32 tipc_link_push_packet(struct link *l_ptr);
234void link_stop(struct link *l_ptr); 234void tipc_link_stop(struct link *l_ptr);
235struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd); 235struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
236struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space); 236struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
237struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space); 237struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
238void link_reset(struct link *l_ptr); 238void tipc_link_reset(struct link *l_ptr);
239int link_send(struct sk_buff *buf, u32 dest, u32 selector); 239int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
240int link_send_buf(struct link *l_ptr, struct sk_buff *buf); 240int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
241u32 link_get_max_pkt(u32 dest,u32 selector); 241u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
242int link_send_sections_fast(struct port* sender, 242int tipc_link_send_sections_fast(struct port* sender,
243 struct iovec const *msg_sect, 243 struct iovec const *msg_sect,
244 const u32 num_sect, 244 const u32 num_sect,
245 u32 destnode); 245 u32 destnode);
246 246int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
247int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf); 247void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
248void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr, 248 struct tipc_msg *msg, u32 selector);
249 struct tipc_msg *msg, u32 selector); 249void tipc_link_recv_bundle(struct sk_buff *buf);
250void link_recv_bundle(struct sk_buff *buf); 250int tipc_link_recv_fragment(struct sk_buff **pending,
251int link_recv_fragment(struct sk_buff **pending, 251 struct sk_buff **fb,
252 struct sk_buff **fb, 252 struct tipc_msg **msg);
253 struct tipc_msg **msg); 253void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
254void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap, 254 u32 tolerance, u32 priority, u32 acked_mtu);
255 u32 tolerance, u32 priority, u32 acked_mtu); 255void tipc_link_push_queue(struct link *l_ptr);
256void link_push_queue(struct link *l_ptr); 256u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
257u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
258 struct sk_buff *buf); 257 struct sk_buff *buf);
259void link_wakeup_ports(struct link *l_ptr, int all); 258void tipc_link_wakeup_ports(struct link *l_ptr, int all);
260void link_set_queue_limits(struct link *l_ptr, u32 window); 259void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
261void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits); 260void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
262 261
263/* 262/*
264 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 263 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 03dbc55cb04c..3bd345a344e5 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -41,18 +41,7 @@
41#include "bearer.h" 41#include "bearer.h"
42 42
43 43
44void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a) 44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{
46 memcpy(&((int *)m)[5], a, sizeof(*a));
47}
48
49void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
50{
51 memcpy(a, &((int*)m)[5], sizeof(*a));
52}
53
54
55void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
56{ 45{
57 u32 usr = msg_user(msg); 46 u32 usr = msg_user(msg);
58 tipc_printf(buf, str); 47 tipc_printf(buf, str);
@@ -318,7 +307,7 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
318 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg)); 307 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
319 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg)); 308 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
320 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg)); 309 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
321 media_addr_printf(buf, orig); 310 tipc_media_addr_printf(buf, orig);
322 } 311 }
323 if (msg_user(msg) == BCAST_PROTOCOL) { 312 if (msg_user(msg) == BCAST_PROTOCOL) {
324 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg)); 313 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
@@ -326,9 +315,9 @@ void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
326 } 315 }
327 tipc_printf(buf, "\n"); 316 tipc_printf(buf, "\n");
328 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) { 317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
329 msg_print(buf,msg_get_wrapped(msg)," /"); 318 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
330 } 319 }
331 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) { 320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
332 msg_print(buf,msg_get_wrapped(msg)," /"); 321 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
333 } 322 }
334} 323}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 6574aab11fa4..6699aaf7bd4c 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -805,14 +805,14 @@ static inline int msg_build(struct tipc_msg *hdr,
805 return -EFAULT; 805 return -EFAULT;
806} 806}
807 807
808static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
809{
810 memcpy(&((int *)m)[5], a, sizeof(*a));
811}
808 812
809struct tipc_media_addr; 813static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
810 814{
811extern void msg_set_media_addr(struct tipc_msg *m, 815 memcpy(a, &((int*)m)[5], sizeof(*a));
812 struct tipc_media_addr *a); 816}
813
814extern void msg_get_media_addr(struct tipc_msg *m,
815 struct tipc_media_addr *a);
816
817 817
818#endif 818#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index 41cbaf1a4a73..830f90999041 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -114,10 +114,10 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
114} 114}
115 115
116/** 116/**
117 * named_publish - tell other nodes about a new publication by this node 117 * tipc_named_publish - tell other nodes about a new publication by this node
118 */ 118 */
119 119
120void named_publish(struct publication *publ) 120void tipc_named_publish(struct publication *publ)
121{ 121{
122 struct sk_buff *buf; 122 struct sk_buff *buf;
123 struct distr_item *item; 123 struct distr_item *item;
@@ -133,15 +133,15 @@ void named_publish(struct publication *publ)
133 133
134 item = (struct distr_item *)msg_data(buf_msg(buf)); 134 item = (struct distr_item *)msg_data(buf_msg(buf));
135 publ_to_item(item, publ); 135 publ_to_item(item, publ);
136 dbg("named_withdraw: broadcasting publish msg\n"); 136 dbg("tipc_named_withdraw: broadcasting publish msg\n");
137 cluster_broadcast(buf); 137 tipc_cltr_broadcast(buf);
138} 138}
139 139
140/** 140/**
141 * named_withdraw - tell other nodes about a withdrawn publication by this node 141 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
142 */ 142 */
143 143
144void named_withdraw(struct publication *publ) 144void tipc_named_withdraw(struct publication *publ)
145{ 145{
146 struct sk_buff *buf; 146 struct sk_buff *buf;
147 struct distr_item *item; 147 struct distr_item *item;
@@ -157,15 +157,15 @@ void named_withdraw(struct publication *publ)
157 157
158 item = (struct distr_item *)msg_data(buf_msg(buf)); 158 item = (struct distr_item *)msg_data(buf_msg(buf));
159 publ_to_item(item, publ); 159 publ_to_item(item, publ);
160 dbg("named_withdraw: broadcasting withdraw msg\n"); 160 dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
161 cluster_broadcast(buf); 161 tipc_cltr_broadcast(buf);
162} 162}
163 163
164/** 164/**
165 * named_node_up - tell specified node about all publications by this node 165 * tipc_named_node_up - tell specified node about all publications by this node
166 */ 166 */
167 167
168void named_node_up(unsigned long node) 168void tipc_named_node_up(unsigned long node)
169{ 169{
170 struct publication *publ; 170 struct publication *publ;
171 struct distr_item *item = 0; 171 struct distr_item *item = 0;
@@ -175,7 +175,7 @@ void named_node_up(unsigned long node)
175 u32 max_item_buf; 175 u32 max_item_buf;
176 176
177 assert(in_own_cluster(node)); 177 assert(in_own_cluster(node));
178 read_lock_bh(&nametbl_lock); 178 read_lock_bh(&tipc_nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE; 179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE; 180 max_item_buf *= ITEM_SIZE;
181 rest = publ_cnt * ITEM_SIZE; 181 rest = publ_cnt * ITEM_SIZE;
@@ -196,15 +196,15 @@ void named_node_up(unsigned long node)
196 left -= ITEM_SIZE; 196 left -= ITEM_SIZE;
197 if (!left) { 197 if (!left) {
198 msg_set_link_selector(buf_msg(buf), node); 198 msg_set_link_selector(buf_msg(buf), node);
199 dbg("named_node_up: sending publish msg to " 199 dbg("tipc_named_node_up: sending publish msg to "
200 "<%u.%u.%u>\n", tipc_zone(node), 200 "<%u.%u.%u>\n", tipc_zone(node),
201 tipc_cluster(node), tipc_node(node)); 201 tipc_cluster(node), tipc_node(node));
202 link_send(buf, node, node); 202 tipc_link_send(buf, node, node);
203 buf = 0; 203 buf = 0;
204 } 204 }
205 } 205 }
206exit: 206exit:
207 read_unlock_bh(&nametbl_lock); 207 read_unlock_bh(&tipc_nametbl_lock);
208} 208}
209 209
210/** 210/**
@@ -221,73 +221,73 @@ exit:
221static void node_is_down(struct publication *publ) 221static void node_is_down(struct publication *publ)
222{ 222{
223 struct publication *p; 223 struct publication *p;
224 write_lock_bh(&nametbl_lock); 224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n", 225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper); 226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345; 227 publ->key += 1222345;
228 p = nametbl_remove_publ(publ->type, publ->lower, 228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key); 229 publ->node, publ->ref, publ->key);
230 assert(p == publ); 230 assert(p == publ);
231 write_unlock_bh(&nametbl_lock); 231 write_unlock_bh(&tipc_nametbl_lock);
232 if (publ) 232 if (publ)
233 kfree(publ); 233 kfree(publ);
234} 234}
235 235
236/** 236/**
237 * named_recv - process name table update message sent by another node 237 * tipc_named_recv - process name table update message sent by another node
238 */ 238 */
239 239
240void named_recv(struct sk_buff *buf) 240void tipc_named_recv(struct sk_buff *buf)
241{ 241{
242 struct publication *publ; 242 struct publication *publ;
243 struct tipc_msg *msg = buf_msg(buf); 243 struct tipc_msg *msg = buf_msg(buf);
244 struct distr_item *item = (struct distr_item *)msg_data(msg); 244 struct distr_item *item = (struct distr_item *)msg_data(msg);
245 u32 count = msg_data_sz(msg) / ITEM_SIZE; 245 u32 count = msg_data_sz(msg) / ITEM_SIZE;
246 246
247 write_lock_bh(&nametbl_lock); 247 write_lock_bh(&tipc_nametbl_lock);
248 while (count--) { 248 while (count--) {
249 if (msg_type(msg) == PUBLICATION) { 249 if (msg_type(msg) == PUBLICATION) {
250 dbg("named_recv: got publication for %u, %u, %u\n", 250 dbg("tipc_named_recv: got publication for %u, %u, %u\n",
251 ntohl(item->type), ntohl(item->lower), 251 ntohl(item->type), ntohl(item->lower),
252 ntohl(item->upper)); 252 ntohl(item->upper));
253 publ = nametbl_insert_publ(ntohl(item->type), 253 publ = tipc_nametbl_insert_publ(ntohl(item->type),
254 ntohl(item->lower), 254 ntohl(item->lower),
255 ntohl(item->upper), 255 ntohl(item->upper),
256 TIPC_CLUSTER_SCOPE, 256 TIPC_CLUSTER_SCOPE,
257 msg_orignode(msg), 257 msg_orignode(msg),
258 ntohl(item->ref), 258 ntohl(item->ref),
259 ntohl(item->key)); 259 ntohl(item->key));
260 if (publ) { 260 if (publ) {
261 nodesub_subscribe(&publ->subscr, 261 tipc_nodesub_subscribe(&publ->subscr,
262 msg_orignode(msg), 262 msg_orignode(msg),
263 publ, 263 publ,
264 (net_ev_handler)node_is_down); 264 (net_ev_handler)node_is_down);
265 } 265 }
266 } else if (msg_type(msg) == WITHDRAWAL) { 266 } else if (msg_type(msg) == WITHDRAWAL) {
267 dbg("named_recv: got withdrawl for %u, %u, %u\n", 267 dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
268 ntohl(item->type), ntohl(item->lower), 268 ntohl(item->type), ntohl(item->lower),
269 ntohl(item->upper)); 269 ntohl(item->upper));
270 publ = nametbl_remove_publ(ntohl(item->type), 270 publ = tipc_nametbl_remove_publ(ntohl(item->type),
271 ntohl(item->lower), 271 ntohl(item->lower),
272 msg_orignode(msg), 272 msg_orignode(msg),
273 ntohl(item->ref), 273 ntohl(item->ref),
274 ntohl(item->key)); 274 ntohl(item->key));
275 275
276 if (publ) { 276 if (publ) {
277 nodesub_unsubscribe(&publ->subscr); 277 tipc_nodesub_unsubscribe(&publ->subscr);
278 kfree(publ); 278 kfree(publ);
279 } 279 }
280 } else { 280 } else {
281 warn("named_recv: unknown msg\n"); 281 warn("tipc_named_recv: unknown msg\n");
282 } 282 }
283 item++; 283 item++;
284 } 284 }
285 write_unlock_bh(&nametbl_lock); 285 write_unlock_bh(&tipc_nametbl_lock);
286 buf_discard(buf); 286 buf_discard(buf);
287} 287}
288 288
289/** 289/**
290 * named_reinit - re-initialize local publication list 290 * tipc_named_reinit - re-initialize local publication list
291 * 291 *
292 * This routine is called whenever TIPC networking is (re)enabled. 292 * This routine is called whenever TIPC networking is (re)enabled.
293 * All existing publications by this node that have "cluster" or "zone" scope 293 * All existing publications by this node that have "cluster" or "zone" scope
@@ -295,15 +295,15 @@ void named_recv(struct sk_buff *buf)
295 * (If the node's address is unchanged, the update loop terminates immediately.) 295 * (If the node's address is unchanged, the update loop terminates immediately.)
296 */ 296 */
297 297
298void named_reinit(void) 298void tipc_named_reinit(void)
299{ 299{
300 struct publication *publ; 300 struct publication *publ;
301 301
302 write_lock_bh(&nametbl_lock); 302 write_lock_bh(&tipc_nametbl_lock);
303 list_for_each_entry(publ, &publ_root, local_list) { 303 list_for_each_entry(publ, &publ_root, local_list) {
304 if (publ->node == tipc_own_addr) 304 if (publ->node == tipc_own_addr)
305 break; 305 break;
306 publ->node = tipc_own_addr; 306 publ->node = tipc_own_addr;
307 } 307 }
308 write_unlock_bh(&nametbl_lock); 308 write_unlock_bh(&tipc_nametbl_lock);
309} 309}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index a04bdeac84ea..843da0172f4e 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,10 +39,10 @@
39 39
40#include "name_table.h" 40#include "name_table.h"
41 41
42void named_publish(struct publication *publ); 42void tipc_named_publish(struct publication *publ);
43void named_withdraw(struct publication *publ); 43void tipc_named_withdraw(struct publication *publ);
44void named_node_up(unsigned long node); 44void tipc_named_node_up(unsigned long node);
45void named_recv(struct sk_buff *buf); 45void tipc_named_recv(struct sk_buff *buf);
46void named_reinit(void); 46void tipc_named_reinit(void);
47 47
48#endif 48#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 972c83eb83b4..3f4b23bd08f7 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -99,9 +99,9 @@ struct name_table {
99 u32 local_publ_count; 99 u32 local_publ_count;
100}; 100};
101 101
102struct name_table table = { NULL } ; 102static struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0); 103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t nametbl_lock = RW_LOCK_UNLOCKED; 104rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
105 105
106 106
107static inline int hash(int x) 107static inline int hash(int x)
@@ -139,10 +139,10 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
139} 139}
140 140
141/** 141/**
142 * subseq_alloc - allocate a specified number of sub-sequence structures 142 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
143 */ 143 */
144 144
145struct sub_seq *subseq_alloc(u32 cnt) 145struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{ 146{
147 u32 sz = cnt * sizeof(struct sub_seq); 147 u32 sz = cnt * sizeof(struct sub_seq);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC); 148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
@@ -153,16 +153,16 @@ struct sub_seq *subseq_alloc(u32 cnt)
153} 153}
154 154
155/** 155/**
156 * nameseq_create - create a name sequence structure for the specified 'type' 156 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
157 * 157 *
158 * Allocates a single sub-sequence structure and sets it to all 0's. 158 * Allocates a single sub-sequence structure and sets it to all 0's.
159 */ 159 */
160 160
161struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head) 161struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{ 162{
163 struct name_seq *nseq = 163 struct name_seq *nseq =
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC); 164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = subseq_alloc(1); 165 struct sub_seq *sseq = tipc_subseq_alloc(1);
166 166
167 if (!nseq || !sseq) { 167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n"); 168 warn("Memory squeeze; failed to create name sequence\n");
@@ -175,7 +175,7 @@ struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
175 nseq->lock = SPIN_LOCK_UNLOCKED; 175 nseq->lock = SPIN_LOCK_UNLOCKED;
176 nseq->type = type; 176 nseq->type = type;
177 nseq->sseqs = sseq; 177 nseq->sseqs = sseq;
178 dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n", 178 dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free); 179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1; 180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list); 181 INIT_HLIST_NODE(&nseq->ns_list);
@@ -240,10 +240,10 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
240} 240}
241 241
242/** 242/**
243 * nameseq_insert_publ - 243 * tipc_nameseq_insert_publ -
244 */ 244 */
245 245
246struct publication *nameseq_insert_publ(struct name_seq *nseq, 246struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
247 u32 type, u32 lower, u32 upper, 247 u32 type, u32 lower, u32 upper,
248 u32 scope, u32 node, u32 port, u32 key) 248 u32 scope, u32 node, u32 port, u32 key)
249{ 249{
@@ -285,7 +285,7 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
285 285
286 if (nseq->first_free == nseq->alloc) { 286 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs; 287 struct sub_seq *sseqs = nseq->sseqs;
288 nseq->sseqs = subseq_alloc(nseq->alloc * 2); 288 nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
289 if (nseq->sseqs != NULL) { 289 if (nseq->sseqs != NULL) {
290 memcpy(nseq->sseqs, sseqs, 290 memcpy(nseq->sseqs, sseqs,
291 nseq->alloc * sizeof (struct sub_seq)); 291 nseq->alloc * sizeof (struct sub_seq));
@@ -354,23 +354,23 @@ struct publication *nameseq_insert_publ(struct name_seq *nseq,
354 */ 354 */
355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
356 dbg("calling report_overlap()\n"); 356 dbg("calling report_overlap()\n");
357 subscr_report_overlap(s, 357 tipc_subscr_report_overlap(s,
358 publ->lower, 358 publ->lower,
359 publ->upper, 359 publ->upper,
360 TIPC_PUBLISHED, 360 TIPC_PUBLISHED,
361 publ->ref, 361 publ->ref,
362 publ->node, 362 publ->node,
363 created_subseq); 363 created_subseq);
364 } 364 }
365 return publ; 365 return publ;
366} 366}
367 367
368/** 368/**
369 * nameseq_remove_publ - 369 * tipc_nameseq_remove_publ -
370 */ 370 */
371 371
372struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst, 372struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key) 373 u32 node, u32 ref, u32 key)
374{ 374{
375 struct publication *publ; 375 struct publication *publ;
376 struct publication *prev; 376 struct publication *prev;
@@ -470,24 +470,24 @@ struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
470 * Any subscriptions waiting ? 470 * Any subscriptions waiting ?
471 */ 471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) { 472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 subscr_report_overlap(s, 473 tipc_subscr_report_overlap(s,
474 publ->lower, 474 publ->lower,
475 publ->upper, 475 publ->upper,
476 TIPC_WITHDRAWN, 476 TIPC_WITHDRAWN,
477 publ->ref, 477 publ->ref,
478 publ->node, 478 publ->node,
479 removed_subseq); 479 removed_subseq);
480 } 480 }
481 return publ; 481 return publ;
482} 482}
483 483
484/** 484/**
485 * nameseq_subscribe: attach a subscription, and issue 485 * tipc_nameseq_subscribe: attach a subscription, and issue
486 * the prescribed number of events if there is any sub- 486 * the prescribed number of events if there is any sub-
487 * sequence overlapping with the requested sequence 487 * sequence overlapping with the requested sequence
488 */ 488 */
489 489
490void nameseq_subscribe(struct name_seq *nseq, struct subscription *s) 490void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
491{ 491{
492 struct sub_seq *sseq = nseq->sseqs; 492 struct sub_seq *sseq = nseq->sseqs;
493 493
@@ -498,18 +498,18 @@ void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
498 498
499 while (sseq != &nseq->sseqs[nseq->first_free]) { 499 while (sseq != &nseq->sseqs[nseq->first_free]) {
500 struct publication *zl = sseq->zone_list; 500 struct publication *zl = sseq->zone_list;
501 if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) { 501 if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
502 struct publication *crs = zl; 502 struct publication *crs = zl;
503 int must_report = 1; 503 int must_report = 1;
504 504
505 do { 505 do {
506 subscr_report_overlap(s, 506 tipc_subscr_report_overlap(s,
507 sseq->lower, 507 sseq->lower,
508 sseq->upper, 508 sseq->upper,
509 TIPC_PUBLISHED, 509 TIPC_PUBLISHED,
510 crs->ref, 510 crs->ref,
511 crs->node, 511 crs->node,
512 must_report); 512 must_report);
513 must_report = 0; 513 must_report = 0;
514 crs = crs->zone_list_next; 514 crs = crs->zone_list_next;
515 } while (crs != zl); 515 } while (crs != zl);
@@ -538,8 +538,8 @@ static struct name_seq *nametbl_find_seq(u32 type)
538 return 0; 538 return 0;
539}; 539};
540 540
541struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper, 541struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
542 u32 scope, u32 node, u32 port, u32 key) 542 u32 scope, u32 node, u32 port, u32 key)
543{ 543{
544 struct name_seq *seq = nametbl_find_seq(type); 544 struct name_seq *seq = nametbl_find_seq(type);
545 545
@@ -552,19 +552,19 @@ struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
552 552
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node); 553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
554 if (!seq) { 554 if (!seq) {
555 seq = nameseq_create(type, &table.types[hash(type)]); 555 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
556 dbg("nametbl_insert_publ: created %x\n", seq); 556 dbg("tipc_nametbl_insert_publ: created %x\n", seq);
557 } 557 }
558 if (!seq) 558 if (!seq)
559 return 0; 559 return 0;
560 560
561 assert(seq->type == type); 561 assert(seq->type == type);
562 return nameseq_insert_publ(seq, type, lower, upper, 562 return tipc_nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key); 563 scope, node, port, key);
564} 564}
565 565
566struct publication *nametbl_remove_publ(u32 type, u32 lower, 566struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
567 u32 node, u32 ref, u32 key) 567 u32 node, u32 ref, u32 key)
568{ 568{
569 struct publication *publ; 569 struct publication *publ;
570 struct name_seq *seq = nametbl_find_seq(type); 570 struct name_seq *seq = nametbl_find_seq(type);
@@ -573,7 +573,7 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
573 return 0; 573 return 0;
574 574
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node); 575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
576 publ = nameseq_remove_publ(seq, lower, node, ref, key); 576 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
577 577
578 if (!seq->first_free && list_empty(&seq->subscriptions)) { 578 if (!seq->first_free && list_empty(&seq->subscriptions)) {
579 hlist_del_init(&seq->ns_list); 579 hlist_del_init(&seq->ns_list);
@@ -584,14 +584,14 @@ struct publication *nametbl_remove_publ(u32 type, u32 lower,
584} 584}
585 585
586/* 586/*
587 * nametbl_translate(): Translate tipc_name -> tipc_portid. 587 * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
588 * Very time-critical. 588 * Very time-critical.
589 * 589 *
590 * Note: on entry 'destnode' is the search domain used during translation; 590 * Note: on entry 'destnode' is the search domain used during translation;
591 * on exit it passes back the node address of the matching port (if any) 591 * on exit it passes back the node address of the matching port (if any)
592 */ 592 */
593 593
594u32 nametbl_translate(u32 type, u32 instance, u32 *destnode) 594u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
595{ 595{
596 struct sub_seq *sseq; 596 struct sub_seq *sseq;
597 struct publication *publ = 0; 597 struct publication *publ = 0;
@@ -601,7 +601,7 @@ u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
601 if (!in_scope(*destnode, tipc_own_addr)) 601 if (!in_scope(*destnode, tipc_own_addr))
602 return 0; 602 return 0;
603 603
604 read_lock_bh(&nametbl_lock); 604 read_lock_bh(&tipc_nametbl_lock);
605 seq = nametbl_find_seq(type); 605 seq = nametbl_find_seq(type);
606 if (unlikely(!seq)) 606 if (unlikely(!seq))
607 goto not_found; 607 goto not_found;
@@ -619,7 +619,7 @@ found:
619 ref = publ->ref; 619 ref = publ->ref;
620 *destnode = publ->node; 620 *destnode = publ->node;
621 spin_unlock_bh(&seq->lock); 621 spin_unlock_bh(&seq->lock);
622 read_unlock_bh(&nametbl_lock); 622 read_unlock_bh(&tipc_nametbl_lock);
623 return ref; 623 return ref;
624 } 624 }
625 publ = sseq->cluster_list; 625 publ = sseq->cluster_list;
@@ -657,12 +657,12 @@ found:
657 spin_unlock_bh(&seq->lock); 657 spin_unlock_bh(&seq->lock);
658not_found: 658not_found:
659 *destnode = 0; 659 *destnode = 0;
660 read_unlock_bh(&nametbl_lock); 660 read_unlock_bh(&tipc_nametbl_lock);
661 return 0; 661 return 0;
662} 662}
663 663
664/** 664/**
665 * nametbl_mc_translate - find multicast destinations 665 * tipc_nametbl_mc_translate - find multicast destinations
666 * 666 *
667 * Creates list of all local ports that overlap the given multicast address; 667 * Creates list of all local ports that overlap the given multicast address;
668 * also determines if any off-node ports overlap. 668 * also determines if any off-node ports overlap.
@@ -674,15 +674,15 @@ not_found:
674 * Returns non-zero if any off-node ports overlap 674 * Returns non-zero if any off-node ports overlap
675 */ 675 */
676 676
677int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 677int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
678 struct port_list *dports) 678 struct port_list *dports)
679{ 679{
680 struct name_seq *seq; 680 struct name_seq *seq;
681 struct sub_seq *sseq; 681 struct sub_seq *sseq;
682 struct sub_seq *sseq_stop; 682 struct sub_seq *sseq_stop;
683 int res = 0; 683 int res = 0;
684 684
685 read_lock_bh(&nametbl_lock); 685 read_lock_bh(&tipc_nametbl_lock);
686 seq = nametbl_find_seq(type); 686 seq = nametbl_find_seq(type);
687 if (!seq) 687 if (!seq)
688 goto exit; 688 goto exit;
@@ -700,7 +700,7 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
700 if (publ && (publ->scope <= limit)) 700 if (publ && (publ->scope <= limit))
701 do { 701 do {
702 if (publ->node == tipc_own_addr) 702 if (publ->node == tipc_own_addr)
703 port_list_add(dports, publ->ref); 703 tipc_port_list_add(dports, publ->ref);
704 else 704 else
705 res = 1; 705 res = 1;
706 publ = publ->cluster_list_next; 706 publ = publ->cluster_list_next;
@@ -709,15 +709,15 @@ int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
709 709
710 spin_unlock_bh(&seq->lock); 710 spin_unlock_bh(&seq->lock);
711exit: 711exit:
712 read_unlock_bh(&nametbl_lock); 712 read_unlock_bh(&tipc_nametbl_lock);
713 return res; 713 return res;
714} 714}
715 715
716/** 716/**
717 * nametbl_publish_rsv - publish port name using a reserved name type 717 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
718 */ 718 */
719 719
720int nametbl_publish_rsv(u32 ref, unsigned int scope, 720int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
721 struct tipc_name_seq const *seq) 721 struct tipc_name_seq const *seq)
722{ 722{
723 int res; 723 int res;
@@ -729,10 +729,10 @@ int nametbl_publish_rsv(u32 ref, unsigned int scope,
729} 729}
730 730
731/** 731/**
732 * nametbl_publish - add name publication to network name tables 732 * tipc_nametbl_publish - add name publication to network name tables
733 */ 733 */
734 734
735struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 735struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
736 u32 scope, u32 port_ref, u32 key) 736 u32 scope, u32 port_ref, u32 key)
737{ 737{
738 struct publication *publ; 738 struct publication *publ;
@@ -748,77 +748,77 @@ struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
748 return 0; 748 return 0;
749 } 749 }
750 750
751 write_lock_bh(&nametbl_lock); 751 write_lock_bh(&tipc_nametbl_lock);
752 table.local_publ_count++; 752 table.local_publ_count++;
753 publ = nametbl_insert_publ(type, lower, upper, scope, 753 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
754 tipc_own_addr, port_ref, key); 754 tipc_own_addr, port_ref, key);
755 if (publ && (scope != TIPC_NODE_SCOPE)) { 755 if (publ && (scope != TIPC_NODE_SCOPE)) {
756 named_publish(publ); 756 tipc_named_publish(publ);
757 } 757 }
758 write_unlock_bh(&nametbl_lock); 758 write_unlock_bh(&tipc_nametbl_lock);
759 return publ; 759 return publ;
760} 760}
761 761
762/** 762/**
763 * nametbl_withdraw - withdraw name publication from network name tables 763 * tipc_nametbl_withdraw - withdraw name publication from network name tables
764 */ 764 */
765 765
766int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 766int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{ 767{
768 struct publication *publ; 768 struct publication *publ;
769 769
770 dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key); 770 dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
771 write_lock_bh(&nametbl_lock); 771 write_lock_bh(&tipc_nametbl_lock);
772 publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 772 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) { 773 if (publ) {
774 table.local_publ_count--; 774 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE) 775 if (publ->scope != TIPC_NODE_SCOPE)
776 named_withdraw(publ); 776 tipc_named_withdraw(publ);
777 write_unlock_bh(&nametbl_lock); 777 write_unlock_bh(&tipc_nametbl_lock);
778 list_del_init(&publ->pport_list); 778 list_del_init(&publ->pport_list);
779 kfree(publ); 779 kfree(publ);
780 return 1; 780 return 1;
781 } 781 }
782 write_unlock_bh(&nametbl_lock); 782 write_unlock_bh(&tipc_nametbl_lock);
783 return 0; 783 return 0;
784} 784}
785 785
786/** 786/**
787 * nametbl_subscribe - add a subscription object to the name table 787 * tipc_nametbl_subscribe - add a subscription object to the name table
788 */ 788 */
789 789
790void 790void
791nametbl_subscribe(struct subscription *s) 791tipc_nametbl_subscribe(struct subscription *s)
792{ 792{
793 u32 type = s->seq.type; 793 u32 type = s->seq.type;
794 struct name_seq *seq; 794 struct name_seq *seq;
795 795
796 write_lock_bh(&nametbl_lock); 796 write_lock_bh(&tipc_nametbl_lock);
797 seq = nametbl_find_seq(type); 797 seq = nametbl_find_seq(type);
798 if (!seq) { 798 if (!seq) {
799 seq = nameseq_create(type, &table.types[hash(type)]); 799 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
800 } 800 }
801 if (seq){ 801 if (seq){
802 spin_lock_bh(&seq->lock); 802 spin_lock_bh(&seq->lock);
803 dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n", 803 dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
804 seq, type, s->seq.lower, s->seq.upper); 804 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type); 805 assert(seq->type == type);
806 nameseq_subscribe(seq, s); 806 tipc_nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock); 807 spin_unlock_bh(&seq->lock);
808 } 808 }
809 write_unlock_bh(&nametbl_lock); 809 write_unlock_bh(&tipc_nametbl_lock);
810} 810}
811 811
812/** 812/**
813 * nametbl_unsubscribe - remove a subscription object from name table 813 * tipc_nametbl_unsubscribe - remove a subscription object from name table
814 */ 814 */
815 815
816void 816void
817nametbl_unsubscribe(struct subscription *s) 817tipc_nametbl_unsubscribe(struct subscription *s)
818{ 818{
819 struct name_seq *seq; 819 struct name_seq *seq;
820 820
821 write_lock_bh(&nametbl_lock); 821 write_lock_bh(&tipc_nametbl_lock);
822 seq = nametbl_find_seq(s->seq.type); 822 seq = nametbl_find_seq(s->seq.type);
823 if (seq != NULL){ 823 if (seq != NULL){
824 spin_lock_bh(&seq->lock); 824 spin_lock_bh(&seq->lock);
@@ -830,7 +830,7 @@ nametbl_unsubscribe(struct subscription *s)
830 kfree(seq); 830 kfree(seq);
831 } 831 }
832 } 832 }
833 write_unlock_bh(&nametbl_lock); 833 write_unlock_bh(&tipc_nametbl_lock);
834} 834}
835 835
836 836
@@ -983,17 +983,17 @@ static void nametbl_list(struct print_buf *buf, u32 depth_info,
983 } 983 }
984} 984}
985 985
986void nametbl_print(struct print_buf *buf, const char *str) 986void tipc_nametbl_print(struct print_buf *buf, const char *str)
987{ 987{
988 tipc_printf(buf, str); 988 tipc_printf(buf, str);
989 read_lock_bh(&nametbl_lock); 989 read_lock_bh(&tipc_nametbl_lock);
990 nametbl_list(buf, 0, 0, 0, 0); 990 nametbl_list(buf, 0, 0, 0, 0);
991 read_unlock_bh(&nametbl_lock); 991 read_unlock_bh(&tipc_nametbl_lock);
992} 992}
993 993
994#define MAX_NAME_TBL_QUERY 32768 994#define MAX_NAME_TBL_QUERY 32768
995 995
996struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space) 996struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
997{ 997{
998 struct sk_buff *buf; 998 struct sk_buff *buf;
999 struct tipc_name_table_query *argv; 999 struct tipc_name_table_query *argv;
@@ -1002,20 +1002,20 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
1002 int str_len; 1002 int str_len;
1003 1003
1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY)) 1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
1005 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 1005 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
1006 1006
1007 buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY)); 1007 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
1008 if (!buf) 1008 if (!buf)
1009 return NULL; 1009 return NULL;
1010 1010
1011 rep_tlv = (struct tlv_desc *)buf->data; 1011 rep_tlv = (struct tlv_desc *)buf->data;
1012 printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY); 1012 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); 1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
1014 read_lock_bh(&nametbl_lock); 1014 read_lock_bh(&tipc_nametbl_lock);
1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type), 1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
1016 ntohl(argv->lowbound), ntohl(argv->upbound)); 1016 ntohl(argv->lowbound), ntohl(argv->upbound));
1017 read_unlock_bh(&nametbl_lock); 1017 read_unlock_bh(&tipc_nametbl_lock);
1018 str_len = printbuf_validate(&b); 1018 str_len = tipc_printbuf_validate(&b);
1019 1019
1020 skb_put(buf, TLV_SPACE(str_len)); 1020 skb_put(buf, TLV_SPACE(str_len));
1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -1023,12 +1023,12 @@ struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
1023 return buf; 1023 return buf;
1024} 1024}
1025 1025
1026void nametbl_dump(void) 1026void tipc_nametbl_dump(void)
1027{ 1027{
1028 nametbl_list(CONS, 0, 0, 0, 0); 1028 nametbl_list(TIPC_CONS, 0, 0, 0, 0);
1029} 1029}
1030 1030
1031int nametbl_init(void) 1031int tipc_nametbl_init(void)
1032{ 1032{
1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size; 1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1034 1034
@@ -1036,14 +1036,14 @@ int nametbl_init(void)
1036 if (!table.types) 1036 if (!table.types)
1037 return -ENOMEM; 1037 return -ENOMEM;
1038 1038
1039 write_lock_bh(&nametbl_lock); 1039 write_lock_bh(&tipc_nametbl_lock);
1040 memset(table.types, 0, array_size); 1040 memset(table.types, 0, array_size);
1041 table.local_publ_count = 0; 1041 table.local_publ_count = 0;
1042 write_unlock_bh(&nametbl_lock); 1042 write_unlock_bh(&tipc_nametbl_lock);
1043 return 0; 1043 return 0;
1044} 1044}
1045 1045
1046void nametbl_stop(void) 1046void tipc_nametbl_stop(void)
1047{ 1047{
1048 struct hlist_head *seq_head; 1048 struct hlist_head *seq_head;
1049 struct hlist_node *seq_node; 1049 struct hlist_node *seq_node;
@@ -1054,7 +1054,7 @@ void nametbl_stop(void)
1054 if (!table.types) 1054 if (!table.types)
1055 return; 1055 return;
1056 1056
1057 write_lock_bh(&nametbl_lock); 1057 write_lock_bh(&tipc_nametbl_lock);
1058 for (i = 0; i < tipc_nametbl_size; i++) { 1058 for (i = 0; i < tipc_nametbl_size; i++) {
1059 seq_head = &table.types[i]; 1059 seq_head = &table.types[i];
1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) { 1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
@@ -1075,5 +1075,5 @@ void nametbl_stop(void)
1075 } 1075 }
1076 kfree(table.types); 1076 kfree(table.types);
1077 table.types = NULL; 1077 table.types = NULL;
1078 write_unlock_bh(&nametbl_lock); 1078 write_unlock_bh(&tipc_nametbl_lock);
1079} 1079}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index f82693384f60..e8a3d71763ce 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -85,24 +85,24 @@ struct publication {
85}; 85};
86 86
87 87
88extern rwlock_t nametbl_lock; 88extern rwlock_t tipc_nametbl_lock;
89 89
90struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space); 90struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 nametbl_translate(u32 type, u32 instance, u32 *node); 91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct port_list *dports); 93 struct port_list *dports);
94int nametbl_publish_rsv(u32 ref, unsigned int scope, 94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq); 95 struct tipc_name_seq const *seq);
96struct publication *nametbl_publish(u32 type, u32 lower, u32 upper, 96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key); 97 u32 scope, u32 port_ref, u32 key);
98int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
99struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper, 99struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
100 u32 scope, u32 node, u32 ref, u32 key); 100 u32 scope, u32 node, u32 ref, u32 key);
101struct publication *nametbl_remove_publ(u32 type, u32 lower, 101struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
102 u32 node, u32 ref, u32 key); 102 u32 node, u32 ref, u32 key);
103void nametbl_subscribe(struct subscription *s); 103void tipc_nametbl_subscribe(struct subscription *s);
104void nametbl_unsubscribe(struct subscription *s); 104void tipc_nametbl_unsubscribe(struct subscription *s);
105int nametbl_init(void); 105int tipc_nametbl_init(void);
106void nametbl_stop(void); 106void tipc_nametbl_stop(void);
107 107
108#endif 108#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 6826b493c1d6..074891ad4f09 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -58,25 +58,25 @@
58 * 1: The routing hierarchy. 58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link' 59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big 60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, net_lock, to enssure that nothing is added 61 * read/write lock, tipc_net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures. 62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they 63 * This layer must not be called from the two others while they
64 * hold any of their own locks. 64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before 65 * Neither must it itself do any upcalls to the other two before
66 * it has released net_lock and other protective locks. 66 * it has released tipc_net_lock and other protective locks.
67 * 67 *
68 * Within the net_lock domain there are two sub-domains;'node' and 68 * Within the tipc_net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted, 69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks 70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding net_lock(read) and a node spin_lock 71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its 72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues, 73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the 74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link 75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure. 76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a 77 * Correspondingly, individual bearers may change status within a
78 * net_lock(read), protected by an individual spin_lock ber bearer 78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs net_lock(write) to remove/add any bearers. 79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
80 * 80 *
81 * 81 *
82 * 2: The transport level of the protocol. 82 * 2: The transport level of the protocol.
@@ -97,91 +97,91 @@
97 * (Nobody is using read-only access to this, so it can just as 97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock) 98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c) 99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (port_lock), which only task is to ensure 100 * - A global spin_lock (tipc_port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation, 101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports. 102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management, 103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion. 104 * and 'wait_list', which is used to queue ports during congestion.
105 * 105 *
106 * 3: The name table (name_table.c, name_distr.c, subscription.c) 106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (nametbl_lock) protecting the 107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to 108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it. 109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen 110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the nametbl_lock domain. It is used only 111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
112 * for translation operations, and is needed because a translation 112 * for translation operations, and is needed because a translation
113 * steps the root of the 'publication' linked list between each lookup. 113 * steps the root of the 'publication' linked list between each lookup.
114 * This is always used within the scope of a nametbl_lock(read). 114 * This is always used within the scope of a tipc_nametbl_lock(read).
115 * - A local spin_lock protecting the queue of subscriber events. 115 * - A local spin_lock protecting the queue of subscriber events.
116*/ 116*/
117 117
118rwlock_t net_lock = RW_LOCK_UNLOCKED; 118rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
119struct network net = { 0 }; 119struct network tipc_net = { 0 };
120 120
121struct node *net_select_remote_node(u32 addr, u32 ref) 121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{ 122{
123 return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref); 123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
124} 124}
125 125
126u32 net_select_router(u32 addr, u32 ref) 126u32 tipc_net_select_router(u32 addr, u32 ref)
127{ 127{
128 return zone_select_router(net.zones[tipc_zone(addr)], addr, ref); 128 return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
129} 129}
130 130
131 131
132u32 net_next_node(u32 a) 132u32 tipc_net_next_node(u32 a)
133{ 133{
134 if (net.zones[tipc_zone(a)]) 134 if (tipc_net.zones[tipc_zone(a)])
135 return zone_next_node(a); 135 return tipc_zone_next_node(a);
136 return 0; 136 return 0;
137} 137}
138 138
139void net_remove_as_router(u32 router) 139void tipc_net_remove_as_router(u32 router)
140{ 140{
141 u32 z_num; 141 u32 z_num;
142 142
143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
144 if (!net.zones[z_num]) 144 if (!tipc_net.zones[z_num])
145 continue; 145 continue;
146 zone_remove_as_router(net.zones[z_num], router); 146 tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
147 } 147 }
148} 148}
149 149
150void net_send_external_routes(u32 dest) 150void tipc_net_send_external_routes(u32 dest)
151{ 151{
152 u32 z_num; 152 u32 z_num;
153 153
154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
155 if (net.zones[z_num]) 155 if (tipc_net.zones[z_num])
156 zone_send_external_routes(net.zones[z_num], dest); 156 tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
157 } 157 }
158} 158}
159 159
160int net_init(void) 160static int net_init(void)
161{ 161{
162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1); 162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
163 163
164 memset(&net, 0, sizeof(net)); 164 memset(&tipc_net, 0, sizeof(tipc_net));
165 net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC); 165 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
166 if (!net.zones) { 166 if (!tipc_net.zones) {
167 return -ENOMEM; 167 return -ENOMEM;
168 } 168 }
169 memset(net.zones, 0, sz); 169 memset(tipc_net.zones, 0, sz);
170 return TIPC_OK; 170 return TIPC_OK;
171} 171}
172 172
173void net_stop(void) 173static void net_stop(void)
174{ 174{
175 u32 z_num; 175 u32 z_num;
176 176
177 if (!net.zones) 177 if (!tipc_net.zones)
178 return; 178 return;
179 179
180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) { 180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
181 zone_delete(net.zones[z_num]); 181 tipc_zone_delete(tipc_net.zones[z_num]);
182 } 182 }
183 kfree(net.zones); 183 kfree(tipc_net.zones);
184 net.zones = 0; 184 tipc_net.zones = 0;
185} 185}
186 186
187static void net_route_named_msg(struct sk_buff *buf) 187static void net_route_named_msg(struct sk_buff *buf)
@@ -191,26 +191,26 @@ static void net_route_named_msg(struct sk_buff *buf)
191 u32 dport; 191 u32 dport;
192 192
193 if (!msg_named(msg)) { 193 if (!msg_named(msg)) {
194 msg_dbg(msg, "net->drop_nam:"); 194 msg_dbg(msg, "tipc_net->drop_nam:");
195 buf_discard(buf); 195 buf_discard(buf);
196 return; 196 return;
197 } 197 }
198 198
199 dnode = addr_domain(msg_lookup_scope(msg)); 199 dnode = addr_domain(msg_lookup_scope(msg));
200 dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode); 200 dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
201 dbg("net->lookup<%u,%u>-><%u,%x>\n", 201 dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
202 msg_nametype(msg), msg_nameinst(msg), dport, dnode); 202 msg_nametype(msg), msg_nameinst(msg), dport, dnode);
203 if (dport) { 203 if (dport) {
204 msg_set_destnode(msg, dnode); 204 msg_set_destnode(msg, dnode);
205 msg_set_destport(msg, dport); 205 msg_set_destport(msg, dport);
206 net_route_msg(buf); 206 tipc_net_route_msg(buf);
207 return; 207 return;
208 } 208 }
209 msg_dbg(msg, "net->rej:NO NAME: "); 209 msg_dbg(msg, "tipc_net->rej:NO NAME: ");
210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME); 210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
211} 211}
212 212
213void net_route_msg(struct sk_buff *buf) 213void tipc_net_route_msg(struct sk_buff *buf)
214{ 214{
215 struct tipc_msg *msg; 215 struct tipc_msg *msg;
216 u32 dnode; 216 u32 dnode;
@@ -232,29 +232,29 @@ void net_route_msg(struct sk_buff *buf)
232 return; 232 return;
233 } 233 }
234 234
235 msg_dbg(msg, "net->rout: "); 235 msg_dbg(msg, "tipc_net->rout: ");
236 236
237 /* Handle message for this node */ 237 /* Handle message for this node */
238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg); 238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
239 if (in_scope(dnode, tipc_own_addr)) { 239 if (in_scope(dnode, tipc_own_addr)) {
240 if (msg_isdata(msg)) { 240 if (msg_isdata(msg)) {
241 if (msg_mcast(msg)) 241 if (msg_mcast(msg))
242 port_recv_mcast(buf, NULL); 242 tipc_port_recv_mcast(buf, NULL);
243 else if (msg_destport(msg)) 243 else if (msg_destport(msg))
244 port_recv_msg(buf); 244 tipc_port_recv_msg(buf);
245 else 245 else
246 net_route_named_msg(buf); 246 net_route_named_msg(buf);
247 return; 247 return;
248 } 248 }
249 switch (msg_user(msg)) { 249 switch (msg_user(msg)) {
250 case ROUTE_DISTRIBUTOR: 250 case ROUTE_DISTRIBUTOR:
251 cluster_recv_routing_table(buf); 251 tipc_cltr_recv_routing_table(buf);
252 break; 252 break;
253 case NAME_DISTRIBUTOR: 253 case NAME_DISTRIBUTOR:
254 named_recv(buf); 254 tipc_named_recv(buf);
255 break; 255 break;
256 case CONN_MANAGER: 256 case CONN_MANAGER:
257 port_recv_proto_msg(buf); 257 tipc_port_recv_proto_msg(buf);
258 break; 258 break;
259 default: 259 default:
260 msg_dbg(msg,"DROP/NET/<REC<"); 260 msg_dbg(msg,"DROP/NET/<REC<");
@@ -265,10 +265,10 @@ void net_route_msg(struct sk_buff *buf)
265 265
266 /* Handle message for another node */ 266 /* Handle message for another node */
267 msg_dbg(msg, "NET>SEND>: "); 267 msg_dbg(msg, "NET>SEND>: ");
268 link_send(buf, dnode, msg_link_selector(msg)); 268 tipc_link_send(buf, dnode, msg_link_selector(msg));
269} 269}
270 270
271int tipc_start_net(void) 271int tipc_net_start(void)
272{ 272{
273 char addr_string[16]; 273 char addr_string[16];
274 int res; 274 int res;
@@ -277,35 +277,35 @@ int tipc_start_net(void)
277 return -ENOPROTOOPT; 277 return -ENOPROTOOPT;
278 278
279 tipc_mode = TIPC_NET_MODE; 279 tipc_mode = TIPC_NET_MODE;
280 named_reinit(); 280 tipc_named_reinit();
281 port_reinit(); 281 tipc_port_reinit();
282 282
283 if ((res = bearer_init()) || 283 if ((res = tipc_bearer_init()) ||
284 (res = net_init()) || 284 (res = net_init()) ||
285 (res = cluster_init()) || 285 (res = tipc_cltr_init()) ||
286 (res = bclink_init())) { 286 (res = tipc_bclink_init())) {
287 return res; 287 return res;
288 } 288 }
289 subscr_stop(); 289 tipc_subscr_stop();
290 cfg_stop(); 290 tipc_cfg_stop();
291 k_signal((Handler)subscr_start, 0); 291 tipc_k_signal((Handler)tipc_subscr_start, 0);
292 k_signal((Handler)cfg_init, 0); 292 tipc_k_signal((Handler)tipc_cfg_init, 0);
293 info("Started in network mode\n"); 293 info("Started in network mode\n");
294 info("Own node address %s, network identity %u\n", 294 info("Own node address %s, network identity %u\n",
295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
296 return TIPC_OK; 296 return TIPC_OK;
297} 297}
298 298
299void tipc_stop_net(void) 299void tipc_net_stop(void)
300{ 300{
301 if (tipc_mode != TIPC_NET_MODE) 301 if (tipc_mode != TIPC_NET_MODE)
302 return; 302 return;
303 write_lock_bh(&net_lock); 303 write_lock_bh(&tipc_net_lock);
304 bearer_stop(); 304 tipc_bearer_stop();
305 tipc_mode = TIPC_NODE_MODE; 305 tipc_mode = TIPC_NODE_MODE;
306 bclink_stop(); 306 tipc_bclink_stop();
307 net_stop(); 307 net_stop();
308 write_unlock_bh(&net_lock); 308 write_unlock_bh(&tipc_net_lock);
309 info("Left network mode \n"); 309 info("Left network mode \n");
310} 310}
311 311
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 948c6d42102c..f3e0b85e6475 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -49,18 +49,16 @@ struct network {
49}; 49};
50 50
51 51
52extern struct network net; 52extern struct network tipc_net;
53extern rwlock_t net_lock; 53extern rwlock_t tipc_net_lock;
54 54
55int net_init(void); 55void tipc_net_remove_as_router(u32 router);
56void net_stop(void); 56void tipc_net_send_external_routes(u32 dest);
57void net_remove_as_router(u32 router); 57void tipc_net_route_msg(struct sk_buff *buf);
58void net_send_external_routes(u32 dest); 58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59void net_route_msg(struct sk_buff *buf); 59u32 tipc_net_select_router(u32 addr, u32 ref);
60struct node *net_select_remote_node(u32 addr, u32 ref);
61u32 net_select_router(u32 addr, u32 ref);
62 60
63int tipc_start_net(void); 61int tipc_net_start(void);
64void tipc_stop_net(void); 62void tipc_net_stop(void);
65 63
66#endif 64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 19b3f4022532..eb1bb4dce7af 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -47,13 +47,13 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); 47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48 48
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) 49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); 50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
51 else 51 else
52 rep_buf = cfg_do_cmd(req_userhdr->dest, 52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest,
53 req_userhdr->cmd, 53 req_userhdr->cmd,
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, 54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), 55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
56 hdr_space); 56 hdr_space);
57 57
58 if (rep_buf) { 58 if (rep_buf) {
59 skb_push(rep_buf, hdr_space); 59 skb_push(rep_buf, hdr_space);
@@ -81,7 +81,7 @@ static struct genl_ops ops = {
81 81
82static int family_registered = 0; 82static int family_registered = 0;
83 83
84int netlink_start(void) 84int tipc_netlink_start(void)
85{ 85{
86 86
87 87
@@ -103,7 +103,7 @@ int netlink_start(void)
103 return -EFAULT; 103 return -EFAULT;
104} 104}
105 105
106void netlink_stop(void) 106void tipc_netlink_stop(void)
107{ 107{
108 if (family_registered) { 108 if (family_registered) {
109 genl_unregister_family(&family); 109 genl_unregister_family(&family);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 05688d01138b..6d65010e5fa1 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -45,17 +45,16 @@
45#include "port.h" 45#include "port.h"
46#include "bearer.h" 46#include "bearer.h"
47#include "name_distr.h" 47#include "name_distr.h"
48#include "net.h"
49 48
50void node_print(struct print_buf *buf, struct node *n_ptr, char *str); 49void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
51static void node_lost_contact(struct node *n_ptr); 50static void node_lost_contact(struct node *n_ptr);
52static void node_established_contact(struct node *n_ptr); 51static void node_established_contact(struct node *n_ptr);
53 52
54struct node *nodes = NULL; /* sorted list of nodes within cluster */ 53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
55 54
56u32 tipc_own_tag = 0; 55u32 tipc_own_tag = 0;
57 56
58struct node *node_create(u32 addr) 57struct node *tipc_node_create(u32 addr)
59{ 58{
60 struct cluster *c_ptr; 59 struct cluster *c_ptr;
61 struct node *n_ptr; 60 struct node *n_ptr;
@@ -68,16 +67,16 @@ struct node *node_create(u32 addr)
68 n_ptr->lock = SPIN_LOCK_UNLOCKED; 67 n_ptr->lock = SPIN_LOCK_UNLOCKED;
69 INIT_LIST_HEAD(&n_ptr->nsub); 68 INIT_LIST_HEAD(&n_ptr->nsub);
70 69
71 c_ptr = cluster_find(addr); 70 c_ptr = tipc_cltr_find(addr);
72 if (c_ptr == NULL) 71 if (c_ptr == NULL)
73 c_ptr = cluster_create(addr); 72 c_ptr = tipc_cltr_create(addr);
74 if (c_ptr != NULL) { 73 if (c_ptr != NULL) {
75 n_ptr->owner = c_ptr; 74 n_ptr->owner = c_ptr;
76 cluster_attach_node(c_ptr, n_ptr); 75 tipc_cltr_attach_node(c_ptr, n_ptr);
77 n_ptr->last_router = -1; 76 n_ptr->last_router = -1;
78 77
79 /* Insert node into ordered list */ 78 /* Insert node into ordered list */
80 for (curr_node = &nodes; *curr_node; 79 for (curr_node = &tipc_nodes; *curr_node;
81 curr_node = &(*curr_node)->next) { 80 curr_node = &(*curr_node)->next) {
82 if (addr < (*curr_node)->addr) { 81 if (addr < (*curr_node)->addr) {
83 n_ptr->next = *curr_node; 82 n_ptr->next = *curr_node;
@@ -93,13 +92,13 @@ struct node *node_create(u32 addr)
93 return n_ptr; 92 return n_ptr;
94} 93}
95 94
96void node_delete(struct node *n_ptr) 95void tipc_node_delete(struct node *n_ptr)
97{ 96{
98 if (!n_ptr) 97 if (!n_ptr)
99 return; 98 return;
100 99
101#if 0 100#if 0
102 /* Not needed because links are already deleted via bearer_stop() */ 101 /* Not needed because links are already deleted via tipc_bearer_stop() */
103 102
104 u32 l_num; 103 u32 l_num;
105 104
@@ -114,12 +113,12 @@ void node_delete(struct node *n_ptr)
114 113
115 114
116/** 115/**
117 * node_link_up - handle addition of link 116 * tipc_node_link_up - handle addition of link
118 * 117 *
119 * Link becomes active (alone or shared) or standby, depending on its priority. 118 * Link becomes active (alone or shared) or standby, depending on its priority.
120 */ 119 */
121 120
122void node_link_up(struct node *n_ptr, struct link *l_ptr) 121void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
123{ 122{
124 struct link **active = &n_ptr->active_links[0]; 123 struct link **active = &n_ptr->active_links[0];
125 124
@@ -136,7 +135,7 @@ void node_link_up(struct node *n_ptr, struct link *l_ptr)
136 info("Link is standby\n"); 135 info("Link is standby\n");
137 return; 136 return;
138 } 137 }
139 link_send_duplicate(active[0], l_ptr); 138 tipc_link_send_duplicate(active[0], l_ptr);
140 if (l_ptr->priority == active[0]->priority) { 139 if (l_ptr->priority == active[0]->priority) {
141 active[0] = l_ptr; 140 active[0] = l_ptr;
142 return; 141 return;
@@ -161,7 +160,7 @@ static void node_select_active_links(struct node *n_ptr)
161 for (i = 0; i < MAX_BEARERS; i++) { 160 for (i = 0; i < MAX_BEARERS; i++) {
162 struct link *l_ptr = n_ptr->links[i]; 161 struct link *l_ptr = n_ptr->links[i];
163 162
164 if (!l_ptr || !link_is_up(l_ptr) || 163 if (!l_ptr || !tipc_link_is_up(l_ptr) ||
165 (l_ptr->priority < highest_prio)) 164 (l_ptr->priority < highest_prio))
166 continue; 165 continue;
167 166
@@ -175,14 +174,14 @@ static void node_select_active_links(struct node *n_ptr)
175} 174}
176 175
177/** 176/**
178 * node_link_down - handle loss of link 177 * tipc_node_link_down - handle loss of link
179 */ 178 */
180 179
181void node_link_down(struct node *n_ptr, struct link *l_ptr) 180void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
182{ 181{
183 struct link **active; 182 struct link **active;
184 183
185 if (!link_is_active(l_ptr)) { 184 if (!tipc_link_is_active(l_ptr)) {
186 info("Lost standby link <%s> on network plane %c\n", 185 info("Lost standby link <%s> on network plane %c\n",
187 l_ptr->name, l_ptr->b_ptr->net_plane); 186 l_ptr->name, l_ptr->b_ptr->net_plane);
188 return; 187 return;
@@ -197,40 +196,40 @@ void node_link_down(struct node *n_ptr, struct link *l_ptr)
197 active[1] = active[0]; 196 active[1] = active[0];
198 if (active[0] == l_ptr) 197 if (active[0] == l_ptr)
199 node_select_active_links(n_ptr); 198 node_select_active_links(n_ptr);
200 if (node_is_up(n_ptr)) 199 if (tipc_node_is_up(n_ptr))
201 link_changeover(l_ptr); 200 tipc_link_changeover(l_ptr);
202 else 201 else
203 node_lost_contact(n_ptr); 202 node_lost_contact(n_ptr);
204} 203}
205 204
206int node_has_active_links(struct node *n_ptr) 205int tipc_node_has_active_links(struct node *n_ptr)
207{ 206{
208 return (n_ptr && 207 return (n_ptr &&
209 ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); 208 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
210} 209}
211 210
212int node_has_redundant_links(struct node *n_ptr) 211int tipc_node_has_redundant_links(struct node *n_ptr)
213{ 212{
214 return (node_has_active_links(n_ptr) && 213 return (tipc_node_has_active_links(n_ptr) &&
215 (n_ptr->active_links[0] != n_ptr->active_links[1])); 214 (n_ptr->active_links[0] != n_ptr->active_links[1]));
216} 215}
217 216
218int node_has_active_routes(struct node *n_ptr) 217int tipc_node_has_active_routes(struct node *n_ptr)
219{ 218{
220 return (n_ptr && (n_ptr->last_router >= 0)); 219 return (n_ptr && (n_ptr->last_router >= 0));
221} 220}
222 221
223int node_is_up(struct node *n_ptr) 222int tipc_node_is_up(struct node *n_ptr)
224{ 223{
225 return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr)); 224 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
226} 225}
227 226
228struct node *node_attach_link(struct link *l_ptr) 227struct node *tipc_node_attach_link(struct link *l_ptr)
229{ 228{
230 struct node *n_ptr = node_find(l_ptr->addr); 229 struct node *n_ptr = tipc_node_find(l_ptr->addr);
231 230
232 if (!n_ptr) 231 if (!n_ptr)
233 n_ptr = node_create(l_ptr->addr); 232 n_ptr = tipc_node_create(l_ptr->addr);
234 if (n_ptr) { 233 if (n_ptr) {
235 u32 bearer_id = l_ptr->b_ptr->identity; 234 u32 bearer_id = l_ptr->b_ptr->identity;
236 char addr_string[16]; 235 char addr_string[16];
@@ -246,7 +245,7 @@ struct node *node_attach_link(struct link *l_ptr)
246 245
247 if (!n_ptr->links[bearer_id]) { 246 if (!n_ptr->links[bearer_id]) {
248 n_ptr->links[bearer_id] = l_ptr; 247 n_ptr->links[bearer_id] = l_ptr;
249 net.zones[tipc_zone(l_ptr->addr)]->links++; 248 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
250 n_ptr->link_cnt++; 249 n_ptr->link_cnt++;
251 return n_ptr; 250 return n_ptr;
252 } 251 }
@@ -257,10 +256,10 @@ struct node *node_attach_link(struct link *l_ptr)
257 return 0; 256 return 0;
258} 257}
259 258
260void node_detach_link(struct node *n_ptr, struct link *l_ptr) 259void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
261{ 260{
262 n_ptr->links[l_ptr->b_ptr->identity] = 0; 261 n_ptr->links[l_ptr->b_ptr->identity] = 0;
263 net.zones[tipc_zone(l_ptr->addr)]->links--; 262 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
264 n_ptr->link_cnt--; 263 n_ptr->link_cnt--;
265} 264}
266 265
@@ -315,45 +314,45 @@ static void node_established_contact(struct node *n_ptr)
315 struct cluster *c_ptr; 314 struct cluster *c_ptr;
316 315
317 dbg("node_established_contact:-> %x\n", n_ptr->addr); 316 dbg("node_established_contact:-> %x\n", n_ptr->addr);
318 if (!node_has_active_routes(n_ptr)) { 317 if (!tipc_node_has_active_routes(n_ptr)) {
319 k_signal((Handler)named_node_up, n_ptr->addr); 318 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
320 } 319 }
321 320
322 /* Syncronize broadcast acks */ 321 /* Syncronize broadcast acks */
323 n_ptr->bclink.acked = bclink_get_last_sent(); 322 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
324 323
325 if (is_slave(tipc_own_addr)) 324 if (is_slave(tipc_own_addr))
326 return; 325 return;
327 if (!in_own_cluster(n_ptr->addr)) { 326 if (!in_own_cluster(n_ptr->addr)) {
328 /* Usage case 1 (see above) */ 327 /* Usage case 1 (see above) */
329 c_ptr = cluster_find(tipc_own_addr); 328 c_ptr = tipc_cltr_find(tipc_own_addr);
330 if (!c_ptr) 329 if (!c_ptr)
331 c_ptr = cluster_create(tipc_own_addr); 330 c_ptr = tipc_cltr_create(tipc_own_addr);
332 if (c_ptr) 331 if (c_ptr)
333 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, 332 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
334 tipc_max_nodes); 333 tipc_max_nodes);
335 return; 334 return;
336 } 335 }
337 336
338 c_ptr = n_ptr->owner; 337 c_ptr = n_ptr->owner;
339 if (is_slave(n_ptr->addr)) { 338 if (is_slave(n_ptr->addr)) {
340 /* Usage case 2 (see above) */ 339 /* Usage case 2 (see above) */
341 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); 340 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
342 cluster_send_local_routes(c_ptr, n_ptr->addr); 341 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
343 return; 342 return;
344 } 343 }
345 344
346 if (n_ptr->bclink.supported) { 345 if (n_ptr->bclink.supported) {
347 nmap_add(&cluster_bcast_nodes, n_ptr->addr); 346 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
348 if (n_ptr->addr < tipc_own_addr) 347 if (n_ptr->addr < tipc_own_addr)
349 tipc_own_tag++; 348 tipc_own_tag++;
350 } 349 }
351 350
352 /* Case 3 (see above) */ 351 /* Case 3 (see above) */
353 net_send_external_routes(n_ptr->addr); 352 tipc_net_send_external_routes(n_ptr->addr);
354 cluster_send_slave_routes(c_ptr, n_ptr->addr); 353 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
355 cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, 354 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
356 highest_allowed_slave); 355 tipc_highest_allowed_slave);
357} 356}
358 357
359static void node_lost_contact(struct node *n_ptr) 358static void node_lost_contact(struct node *n_ptr)
@@ -375,39 +374,39 @@ static void node_lost_contact(struct node *n_ptr)
375 n_ptr->bclink.defragm = NULL; 374 n_ptr->bclink.defragm = NULL;
376 } 375 }
377 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { 376 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
378 bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); 377 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
379 } 378 }
380 379
381 /* Update routing tables */ 380 /* Update routing tables */
382 if (is_slave(tipc_own_addr)) { 381 if (is_slave(tipc_own_addr)) {
383 net_remove_as_router(n_ptr->addr); 382 tipc_net_remove_as_router(n_ptr->addr);
384 } else { 383 } else {
385 if (!in_own_cluster(n_ptr->addr)) { 384 if (!in_own_cluster(n_ptr->addr)) {
386 /* Case 4 (see above) */ 385 /* Case 4 (see above) */
387 c_ptr = cluster_find(tipc_own_addr); 386 c_ptr = tipc_cltr_find(tipc_own_addr);
388 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, 387 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
389 tipc_max_nodes); 388 tipc_max_nodes);
390 } else { 389 } else {
391 /* Case 5 (see above) */ 390 /* Case 5 (see above) */
392 c_ptr = cluster_find(n_ptr->addr); 391 c_ptr = tipc_cltr_find(n_ptr->addr);
393 if (is_slave(n_ptr->addr)) { 392 if (is_slave(n_ptr->addr)) {
394 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, 393 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
395 tipc_max_nodes); 394 tipc_max_nodes);
396 } else { 395 } else {
397 if (n_ptr->bclink.supported) { 396 if (n_ptr->bclink.supported) {
398 nmap_remove(&cluster_bcast_nodes, 397 tipc_nmap_remove(&tipc_cltr_bcast_nodes,
399 n_ptr->addr); 398 n_ptr->addr);
400 if (n_ptr->addr < tipc_own_addr) 399 if (n_ptr->addr < tipc_own_addr)
401 tipc_own_tag--; 400 tipc_own_tag--;
402 } 401 }
403 net_remove_as_router(n_ptr->addr); 402 tipc_net_remove_as_router(n_ptr->addr);
404 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 403 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
405 LOWEST_SLAVE, 404 LOWEST_SLAVE,
406 highest_allowed_slave); 405 tipc_highest_allowed_slave);
407 } 406 }
408 } 407 }
409 } 408 }
410 if (node_has_active_routes(n_ptr)) 409 if (tipc_node_has_active_routes(n_ptr))
411 return; 410 return;
412 411
413 info("Lost contact with %s\n", 412 info("Lost contact with %s\n",
@@ -420,35 +419,35 @@ static void node_lost_contact(struct node *n_ptr)
420 continue; 419 continue;
421 l_ptr->reset_checkpoint = l_ptr->next_in_no; 420 l_ptr->reset_checkpoint = l_ptr->next_in_no;
422 l_ptr->exp_msg_count = 0; 421 l_ptr->exp_msg_count = 0;
423 link_reset_fragments(l_ptr); 422 tipc_link_reset_fragments(l_ptr);
424 } 423 }
425 424
426 /* Notify subscribers */ 425 /* Notify subscribers */
427 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { 426 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
428 ns->node = 0; 427 ns->node = 0;
429 list_del_init(&ns->nodesub_list); 428 list_del_init(&ns->nodesub_list);
430 k_signal((Handler)ns->handle_node_down, 429 tipc_k_signal((Handler)ns->handle_node_down,
431 (unsigned long)ns->usr_handle); 430 (unsigned long)ns->usr_handle);
432 } 431 }
433} 432}
434 433
435/** 434/**
436 * node_select_next_hop - find the next-hop node for a message 435 * tipc_node_select_next_hop - find the next-hop node for a message
437 * 436 *
438 * Called by when cluster local lookup has failed. 437 * Called by when cluster local lookup has failed.
439 */ 438 */
440 439
441struct node *node_select_next_hop(u32 addr, u32 selector) 440struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
442{ 441{
443 struct node *n_ptr; 442 struct node *n_ptr;
444 u32 router_addr; 443 u32 router_addr;
445 444
446 if (!addr_domain_valid(addr)) 445 if (!tipc_addr_domain_valid(addr))
447 return 0; 446 return 0;
448 447
449 /* Look for direct link to destination processsor */ 448 /* Look for direct link to destination processsor */
450 n_ptr = node_find(addr); 449 n_ptr = tipc_node_find(addr);
451 if (n_ptr && node_has_active_links(n_ptr)) 450 if (n_ptr && tipc_node_has_active_links(n_ptr))
452 return n_ptr; 451 return n_ptr;
453 452
454 /* Cluster local system nodes *must* have direct links */ 453 /* Cluster local system nodes *must* have direct links */
@@ -456,9 +455,9 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
456 return 0; 455 return 0;
457 456
458 /* Look for cluster local router with direct link to node */ 457 /* Look for cluster local router with direct link to node */
459 router_addr = node_select_router(n_ptr, selector); 458 router_addr = tipc_node_select_router(n_ptr, selector);
460 if (router_addr) 459 if (router_addr)
461 return node_select(router_addr, selector); 460 return tipc_node_select(router_addr, selector);
462 461
463 /* Slave nodes can only be accessed within own cluster via a 462 /* Slave nodes can only be accessed within own cluster via a
464 known router with direct link -- if no router was found,give up */ 463 known router with direct link -- if no router was found,give up */
@@ -467,25 +466,25 @@ struct node *node_select_next_hop(u32 addr, u32 selector)
467 466
468 /* Inter zone/cluster -- find any direct link to remote cluster */ 467 /* Inter zone/cluster -- find any direct link to remote cluster */
469 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); 468 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
470 n_ptr = net_select_remote_node(addr, selector); 469 n_ptr = tipc_net_select_remote_node(addr, selector);
471 if (n_ptr && node_has_active_links(n_ptr)) 470 if (n_ptr && tipc_node_has_active_links(n_ptr))
472 return n_ptr; 471 return n_ptr;
473 472
474 /* Last resort -- look for any router to anywhere in remote zone */ 473 /* Last resort -- look for any router to anywhere in remote zone */
475 router_addr = net_select_router(addr, selector); 474 router_addr = tipc_net_select_router(addr, selector);
476 if (router_addr) 475 if (router_addr)
477 return node_select(router_addr, selector); 476 return tipc_node_select(router_addr, selector);
478 477
479 return 0; 478 return 0;
480} 479}
481 480
482/** 481/**
483 * node_select_router - select router to reach specified node 482 * tipc_node_select_router - select router to reach specified node
484 * 483 *
485 * Uses a deterministic and fair algorithm for selecting router node. 484 * Uses a deterministic and fair algorithm for selecting router node.
486 */ 485 */
487 486
488u32 node_select_router(struct node *n_ptr, u32 ref) 487u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
489{ 488{
490 u32 ulim; 489 u32 ulim;
491 u32 mask; 490 u32 mask;
@@ -523,7 +522,7 @@ u32 node_select_router(struct node *n_ptr, u32 ref)
523 return tipc_addr(own_zone(), own_cluster(), r); 522 return tipc_addr(own_zone(), own_cluster(), r);
524} 523}
525 524
526void node_add_router(struct node *n_ptr, u32 router) 525void tipc_node_add_router(struct node *n_ptr, u32 router)
527{ 526{
528 u32 r_num = tipc_node(router); 527 u32 r_num = tipc_node(router);
529 528
@@ -534,7 +533,7 @@ void node_add_router(struct node *n_ptr, u32 router)
534 !n_ptr->routers[n_ptr->last_router]); 533 !n_ptr->routers[n_ptr->last_router]);
535} 534}
536 535
537void node_remove_router(struct node *n_ptr, u32 router) 536void tipc_node_remove_router(struct node *n_ptr, u32 router)
538{ 537{
539 u32 r_num = tipc_node(router); 538 u32 r_num = tipc_node(router);
540 539
@@ -547,7 +546,7 @@ void node_remove_router(struct node *n_ptr, u32 router)
547 while ((--n_ptr->last_router >= 0) && 546 while ((--n_ptr->last_router >= 0) &&
548 !n_ptr->routers[n_ptr->last_router]); 547 !n_ptr->routers[n_ptr->last_router]);
549 548
550 if (!node_is_up(n_ptr)) 549 if (!tipc_node_is_up(n_ptr))
551 node_lost_contact(n_ptr); 550 node_lost_contact(n_ptr);
552} 551}
553 552
@@ -572,16 +571,16 @@ u32 tipc_available_nodes(const u32 domain)
572 struct node *n_ptr; 571 struct node *n_ptr;
573 u32 cnt = 0; 572 u32 cnt = 0;
574 573
575 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 574 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
576 if (!in_scope(domain, n_ptr->addr)) 575 if (!in_scope(domain, n_ptr->addr))
577 continue; 576 continue;
578 if (node_is_up(n_ptr)) 577 if (tipc_node_is_up(n_ptr))
579 cnt++; 578 cnt++;
580 } 579 }
581 return cnt; 580 return cnt;
582} 581}
583 582
584struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space) 583struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
585{ 584{
586 u32 domain; 585 u32 domain;
587 struct sk_buff *buf; 586 struct sk_buff *buf;
@@ -589,40 +588,40 @@ struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
589 struct tipc_node_info node_info; 588 struct tipc_node_info node_info;
590 589
591 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 590 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
592 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 591 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
593 592
594 domain = *(u32 *)TLV_DATA(req_tlv_area); 593 domain = *(u32 *)TLV_DATA(req_tlv_area);
595 domain = ntohl(domain); 594 domain = ntohl(domain);
596 if (!addr_domain_valid(domain)) 595 if (!tipc_addr_domain_valid(domain))
597 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 596 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
598 " (network address)"); 597 " (network address)");
599 598
600 if (!nodes) 599 if (!tipc_nodes)
601 return cfg_reply_none(); 600 return tipc_cfg_reply_none();
602 601
603 /* For now, get space for all other nodes 602 /* For now, get space for all other nodes
604 (will need to modify this when slave nodes are supported */ 603 (will need to modify this when slave nodes are supported */
605 604
606 buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * 605 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
607 (tipc_max_nodes - 1)); 606 (tipc_max_nodes - 1));
608 if (!buf) 607 if (!buf)
609 return NULL; 608 return NULL;
610 609
611 /* Add TLVs for all nodes in scope */ 610 /* Add TLVs for all nodes in scope */
612 611
613 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 612 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
614 if (!in_scope(domain, n_ptr->addr)) 613 if (!in_scope(domain, n_ptr->addr))
615 continue; 614 continue;
616 node_info.addr = htonl(n_ptr->addr); 615 node_info.addr = htonl(n_ptr->addr);
617 node_info.up = htonl(node_is_up(n_ptr)); 616 node_info.up = htonl(tipc_node_is_up(n_ptr));
618 cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 617 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
619 &node_info, sizeof(node_info)); 618 &node_info, sizeof(node_info));
620 } 619 }
621 620
622 return buf; 621 return buf;
623} 622}
624 623
625struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space) 624struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
626{ 625{
627 u32 domain; 626 u32 domain;
628 struct sk_buff *buf; 627 struct sk_buff *buf;
@@ -630,22 +629,22 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
630 struct tipc_link_info link_info; 629 struct tipc_link_info link_info;
631 630
632 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) 631 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
633 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); 632 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
634 633
635 domain = *(u32 *)TLV_DATA(req_tlv_area); 634 domain = *(u32 *)TLV_DATA(req_tlv_area);
636 domain = ntohl(domain); 635 domain = ntohl(domain);
637 if (!addr_domain_valid(domain)) 636 if (!tipc_addr_domain_valid(domain))
638 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 637 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
639 " (network address)"); 638 " (network address)");
640 639
641 if (!nodes) 640 if (!tipc_nodes)
642 return cfg_reply_none(); 641 return tipc_cfg_reply_none();
643 642
644 /* For now, get space for 2 links to all other nodes + bcast link 643 /* For now, get space for 2 links to all other nodes + bcast link
645 (will need to modify this when slave nodes are supported */ 644 (will need to modify this when slave nodes are supported */
646 645
647 buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * 646 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
648 (2 * (tipc_max_nodes - 1) + 1)); 647 (2 * (tipc_max_nodes - 1) + 1));
649 if (!buf) 648 if (!buf)
650 return NULL; 649 return NULL;
651 650
@@ -654,12 +653,12 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
654 link_info.dest = tipc_own_addr & 0xfffff00; 653 link_info.dest = tipc_own_addr & 0xfffff00;
655 link_info.dest = htonl(link_info.dest); 654 link_info.dest = htonl(link_info.dest);
656 link_info.up = htonl(1); 655 link_info.up = htonl(1);
657 sprintf(link_info.str, bc_link_name); 656 sprintf(link_info.str, tipc_bclink_name);
658 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 657 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
659 658
660 /* Add TLVs for any other links in scope */ 659 /* Add TLVs for any other links in scope */
661 660
662 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { 661 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
663 u32 i; 662 u32 i;
664 663
665 if (!in_scope(domain, n_ptr->addr)) 664 if (!in_scope(domain, n_ptr->addr))
@@ -668,10 +667,10 @@ struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
668 if (!n_ptr->links[i]) 667 if (!n_ptr->links[i])
669 continue; 668 continue;
670 link_info.dest = htonl(n_ptr->addr); 669 link_info.dest = htonl(n_ptr->addr);
671 link_info.up = htonl(link_is_up(n_ptr->links[i])); 670 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
672 strcpy(link_info.str, n_ptr->links[i]->name); 671 strcpy(link_info.str, n_ptr->links[i]->name);
673 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, 672 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
674 &link_info, sizeof(link_info)); 673 &link_info, sizeof(link_info));
675 } 674 }
676 } 675 }
677 676
diff --git a/net/tipc/node.h b/net/tipc/node.h
index b39442badccf..29f7ae6992d4 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -92,31 +92,31 @@ struct node {
92 } bclink; 92 } bclink;
93}; 93};
94 94
95extern struct node *nodes; 95extern struct node *tipc_nodes;
96extern u32 tipc_own_tag; 96extern u32 tipc_own_tag;
97 97
98struct node *node_create(u32 addr); 98struct node *tipc_node_create(u32 addr);
99void node_delete(struct node *n_ptr); 99void tipc_node_delete(struct node *n_ptr);
100struct node *node_attach_link(struct link *l_ptr); 100struct node *tipc_node_attach_link(struct link *l_ptr);
101void node_detach_link(struct node *n_ptr, struct link *l_ptr); 101void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr);
102void node_link_down(struct node *n_ptr, struct link *l_ptr); 102void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr);
103void node_link_up(struct node *n_ptr, struct link *l_ptr); 103void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr);
104int node_has_active_links(struct node *n_ptr); 104int tipc_node_has_active_links(struct node *n_ptr);
105int node_has_redundant_links(struct node *n_ptr); 105int tipc_node_has_redundant_links(struct node *n_ptr);
106u32 node_select_router(struct node *n_ptr, u32 ref); 106u32 tipc_node_select_router(struct node *n_ptr, u32 ref);
107struct node *node_select_next_hop(u32 addr, u32 selector); 107struct node *tipc_node_select_next_hop(u32 addr, u32 selector);
108int node_is_up(struct node *n_ptr); 108int tipc_node_is_up(struct node *n_ptr);
109void node_add_router(struct node *n_ptr, u32 router); 109void tipc_node_add_router(struct node *n_ptr, u32 router);
110void node_remove_router(struct node *n_ptr, u32 router); 110void tipc_node_remove_router(struct node *n_ptr, u32 router);
111struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space); 111struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
112struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space); 112struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
113 113
114static inline struct node *node_find(u32 addr) 114static inline struct node *tipc_node_find(u32 addr)
115{ 115{
116 if (likely(in_own_cluster(addr))) 116 if (likely(in_own_cluster(addr)))
117 return local_nodes[tipc_node(addr)]; 117 return tipc_local_nodes[tipc_node(addr)];
118 else if (addr_domain_valid(addr)) { 118 else if (tipc_addr_domain_valid(addr)) {
119 struct cluster *c_ptr = cluster_find(addr); 119 struct cluster *c_ptr = tipc_cltr_find(addr);
120 120
121 if (c_ptr) 121 if (c_ptr)
122 return c_ptr->nodes[tipc_node(addr)]; 122 return c_ptr->nodes[tipc_node(addr)];
@@ -124,19 +124,19 @@ static inline struct node *node_find(u32 addr)
124 return 0; 124 return 0;
125} 125}
126 126
127static inline struct node *node_select(u32 addr, u32 selector) 127static inline struct node *tipc_node_select(u32 addr, u32 selector)
128{ 128{
129 if (likely(in_own_cluster(addr))) 129 if (likely(in_own_cluster(addr)))
130 return local_nodes[tipc_node(addr)]; 130 return tipc_local_nodes[tipc_node(addr)];
131 return node_select_next_hop(addr, selector); 131 return tipc_node_select_next_hop(addr, selector);
132} 132}
133 133
134static inline void node_lock(struct node *n_ptr) 134static inline void tipc_node_lock(struct node *n_ptr)
135{ 135{
136 spin_lock_bh(&n_ptr->lock); 136 spin_lock_bh(&n_ptr->lock);
137} 137}
138 138
139static inline void node_unlock(struct node *n_ptr) 139static inline void tipc_node_unlock(struct node *n_ptr)
140{ 140{
141 spin_unlock_bh(&n_ptr->lock); 141 spin_unlock_bh(&n_ptr->lock);
142} 142}
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 79375927916f..afeea121d8be 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -41,39 +41,39 @@
41#include "addr.h" 41#include "addr.h"
42 42
43/** 43/**
44 * nodesub_subscribe - create "node down" subscription for specified node 44 * tipc_nodesub_subscribe - create "node down" subscription for specified node
45 */ 45 */
46 46
47void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down) 48 void *usr_handle, net_ev_handler handle_down)
49{ 49{
50 node_sub->node = 0; 50 node_sub->node = 0;
51 if (addr == tipc_own_addr) 51 if (addr == tipc_own_addr)
52 return; 52 return;
53 if (!addr_node_valid(addr)) { 53 if (!tipc_addr_node_valid(addr)) {
54 warn("node_subscr with illegal %x\n", addr); 54 warn("node_subscr with illegal %x\n", addr);
55 return; 55 return;
56 } 56 }
57 57
58 node_sub->handle_node_down = handle_down; 58 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle; 59 node_sub->usr_handle = usr_handle;
60 node_sub->node = node_find(addr); 60 node_sub->node = tipc_node_find(addr);
61 assert(node_sub->node); 61 assert(node_sub->node);
62 node_lock(node_sub->node); 62 tipc_node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub); 63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 node_unlock(node_sub->node); 64 tipc_node_unlock(node_sub->node);
65} 65}
66 66
67/** 67/**
68 * nodesub_unsubscribe - cancel "node down" subscription (if any) 68 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
69 */ 69 */
70 70
71void nodesub_unsubscribe(struct node_subscr *node_sub) 71void tipc_nodesub_unsubscribe(struct node_subscr *node_sub)
72{ 72{
73 if (!node_sub->node) 73 if (!node_sub->node)
74 return; 74 return;
75 75
76 node_lock(node_sub->node); 76 tipc_node_lock(node_sub->node);
77 list_del_init(&node_sub->nodesub_list); 77 list_del_init(&node_sub->nodesub_list);
78 node_unlock(node_sub->node); 78 tipc_node_unlock(node_sub->node);
79} 79}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index a3b87ac4859b..01751c4fbb43 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -56,8 +56,8 @@ struct node_subscr {
56 struct list_head nodesub_list; 56 struct list_head nodesub_list;
57}; 57};
58 58
59void nodesub_subscribe(struct node_subscr *node_sub, u32 addr, 59void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
60 void *usr_handle, net_ev_handler handle_down); 60 void *usr_handle, net_ev_handler handle_down);
61void nodesub_unsubscribe(struct node_subscr *node_sub); 61void tipc_nodesub_unsubscribe(struct node_subscr *node_sub);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 66caca7abe92..72aae52bfec1 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -57,10 +57,10 @@
57static struct sk_buff *msg_queue_head = 0; 57static struct sk_buff *msg_queue_head = 0;
58static struct sk_buff *msg_queue_tail = 0; 58static struct sk_buff *msg_queue_tail = 0;
59 59
60spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED; 60spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED; 61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
62 62
63LIST_HEAD(ports); 63static LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref); 64static void port_handle_node_down(unsigned long ref);
65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err); 65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err); 66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
@@ -107,7 +107,7 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
107 struct sk_buff *buf; 107 struct sk_buff *buf;
108 struct sk_buff *ibuf = NULL; 108 struct sk_buff *ibuf = NULL;
109 struct port_list dports = {0, NULL, }; 109 struct port_list dports = {0, NULL, };
110 struct port *oport = port_deref(ref); 110 struct port *oport = tipc_port_deref(ref);
111 int ext_targets; 111 int ext_targets;
112 int res; 112 int res;
113 113
@@ -129,8 +129,8 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
129 129
130 /* Figure out where to send multicast message */ 130 /* Figure out where to send multicast message */
131 131
132 ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper, 132 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
133 TIPC_NODE_SCOPE, &dports); 133 TIPC_NODE_SCOPE, &dports);
134 134
135 /* Send message to destinations (duplicate it only if necessary) */ 135 /* Send message to destinations (duplicate it only if necessary) */
136 136
@@ -138,12 +138,12 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
138 if (dports.count != 0) { 138 if (dports.count != 0) {
139 ibuf = skb_copy(buf, GFP_ATOMIC); 139 ibuf = skb_copy(buf, GFP_ATOMIC);
140 if (ibuf == NULL) { 140 if (ibuf == NULL) {
141 port_list_free(&dports); 141 tipc_port_list_free(&dports);
142 buf_discard(buf); 142 buf_discard(buf);
143 return -ENOMEM; 143 return -ENOMEM;
144 } 144 }
145 } 145 }
146 res = bclink_send_msg(buf); 146 res = tipc_bclink_send_msg(buf);
147 if ((res < 0) && (dports.count != 0)) { 147 if ((res < 0) && (dports.count != 0)) {
148 buf_discard(ibuf); 148 buf_discard(ibuf);
149 } 149 }
@@ -153,20 +153,20 @@ int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
153 153
154 if (res >= 0) { 154 if (res >= 0) {
155 if (ibuf) 155 if (ibuf)
156 port_recv_mcast(ibuf, &dports); 156 tipc_port_recv_mcast(ibuf, &dports);
157 } else { 157 } else {
158 port_list_free(&dports); 158 tipc_port_list_free(&dports);
159 } 159 }
160 return res; 160 return res;
161} 161}
162 162
163/** 163/**
164 * port_recv_mcast - deliver multicast message to all destination ports 164 * tipc_port_recv_mcast - deliver multicast message to all destination ports
165 * 165 *
166 * If there is no port list, perform a lookup to create one 166 * If there is no port list, perform a lookup to create one
167 */ 167 */
168 168
169void port_recv_mcast(struct sk_buff *buf, struct port_list *dp) 169void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
170{ 170{
171 struct tipc_msg* msg; 171 struct tipc_msg* msg;
172 struct port_list dports = {0, NULL, }; 172 struct port_list dports = {0, NULL, };
@@ -179,7 +179,7 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
179 /* Create destination port list, if one wasn't supplied */ 179 /* Create destination port list, if one wasn't supplied */
180 180
181 if (dp == NULL) { 181 if (dp == NULL) {
182 nametbl_mc_translate(msg_nametype(msg), 182 tipc_nametbl_mc_translate(msg_nametype(msg),
183 msg_namelower(msg), 183 msg_namelower(msg),
184 msg_nameupper(msg), 184 msg_nameupper(msg),
185 TIPC_CLUSTER_SCOPE, 185 TIPC_CLUSTER_SCOPE,
@@ -192,8 +192,8 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
192 if (dp->count != 0) { 192 if (dp->count != 0) {
193 if (dp->count == 1) { 193 if (dp->count == 1) {
194 msg_set_destport(msg, dp->ports[0]); 194 msg_set_destport(msg, dp->ports[0]);
195 port_recv_msg(buf); 195 tipc_port_recv_msg(buf);
196 port_list_free(dp); 196 tipc_port_list_free(dp);
197 return; 197 return;
198 } 198 }
199 for (; cnt < dp->count; cnt++) { 199 for (; cnt < dp->count; cnt++) {
@@ -209,12 +209,12 @@ void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
209 item = item->next; 209 item = item->next;
210 } 210 }
211 msg_set_destport(buf_msg(b),item->ports[index]); 211 msg_set_destport(buf_msg(b),item->ports[index]);
212 port_recv_msg(b); 212 tipc_port_recv_msg(b);
213 } 213 }
214 } 214 }
215exit: 215exit:
216 buf_discard(buf); 216 buf_discard(buf);
217 port_list_free(dp); 217 tipc_port_list_free(dp);
218} 218}
219 219
220/** 220/**
@@ -238,14 +238,14 @@ u32 tipc_createport_raw(void *usr_handle,
238 return 0; 238 return 0;
239 } 239 }
240 memset(p_ptr, 0, sizeof(*p_ptr)); 240 memset(p_ptr, 0, sizeof(*p_ptr));
241 ref = ref_acquire(p_ptr, &p_ptr->publ.lock); 241 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
242 if (!ref) { 242 if (!ref) {
243 warn("Reference Table Exhausted\n"); 243 warn("Reference Table Exhausted\n");
244 kfree(p_ptr); 244 kfree(p_ptr);
245 return 0; 245 return 0;
246 } 246 }
247 247
248 port_lock(ref); 248 tipc_port_lock(ref);
249 p_ptr->publ.ref = ref; 249 p_ptr->publ.ref = ref;
250 msg = &p_ptr->publ.phdr; 250 msg = &p_ptr->publ.phdr;
251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0); 251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
@@ -264,12 +264,12 @@ u32 tipc_createport_raw(void *usr_handle,
264 p_ptr->wakeup = wakeup; 264 p_ptr->wakeup = wakeup;
265 p_ptr->user_port = 0; 265 p_ptr->user_port = 0;
266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref); 266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
267 spin_lock_bh(&port_list_lock); 267 spin_lock_bh(&tipc_port_list_lock);
268 INIT_LIST_HEAD(&p_ptr->publications); 268 INIT_LIST_HEAD(&p_ptr->publications);
269 INIT_LIST_HEAD(&p_ptr->port_list); 269 INIT_LIST_HEAD(&p_ptr->port_list);
270 list_add_tail(&p_ptr->port_list, &ports); 270 list_add_tail(&p_ptr->port_list, &ports);
271 spin_unlock_bh(&port_list_lock); 271 spin_unlock_bh(&tipc_port_list_lock);
272 port_unlock(p_ptr); 272 tipc_port_unlock(p_ptr);
273 return ref; 273 return ref;
274} 274}
275 275
@@ -279,31 +279,31 @@ int tipc_deleteport(u32 ref)
279 struct sk_buff *buf = 0; 279 struct sk_buff *buf = 0;
280 280
281 tipc_withdraw(ref, 0, 0); 281 tipc_withdraw(ref, 0, 0);
282 p_ptr = port_lock(ref); 282 p_ptr = tipc_port_lock(ref);
283 if (!p_ptr) 283 if (!p_ptr)
284 return -EINVAL; 284 return -EINVAL;
285 285
286 ref_discard(ref); 286 tipc_ref_discard(ref);
287 port_unlock(p_ptr); 287 tipc_port_unlock(p_ptr);
288 288
289 k_cancel_timer(&p_ptr->timer); 289 k_cancel_timer(&p_ptr->timer);
290 if (p_ptr->publ.connected) { 290 if (p_ptr->publ.connected) {
291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT); 291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
292 nodesub_unsubscribe(&p_ptr->subscription); 292 tipc_nodesub_unsubscribe(&p_ptr->subscription);
293 } 293 }
294 if (p_ptr->user_port) { 294 if (p_ptr->user_port) {
295 reg_remove_port(p_ptr->user_port); 295 tipc_reg_remove_port(p_ptr->user_port);
296 kfree(p_ptr->user_port); 296 kfree(p_ptr->user_port);
297 } 297 }
298 298
299 spin_lock_bh(&port_list_lock); 299 spin_lock_bh(&tipc_port_list_lock);
300 list_del(&p_ptr->port_list); 300 list_del(&p_ptr->port_list);
301 list_del(&p_ptr->wait_list); 301 list_del(&p_ptr->wait_list);
302 spin_unlock_bh(&port_list_lock); 302 spin_unlock_bh(&tipc_port_list_lock);
303 k_term_timer(&p_ptr->timer); 303 k_term_timer(&p_ptr->timer);
304 kfree(p_ptr); 304 kfree(p_ptr);
305 dbg("Deleted port %u\n", ref); 305 dbg("Deleted port %u\n", ref);
306 net_route_msg(buf); 306 tipc_net_route_msg(buf);
307 return TIPC_OK; 307 return TIPC_OK;
308} 308}
309 309
@@ -315,7 +315,7 @@ int tipc_deleteport(u32 ref)
315 315
316struct tipc_port *tipc_get_port(const u32 ref) 316struct tipc_port *tipc_get_port(const u32 ref)
317{ 317{
318 return (struct tipc_port *)ref_deref(ref); 318 return (struct tipc_port *)tipc_ref_deref(ref);
319} 319}
320 320
321/** 321/**
@@ -327,11 +327,11 @@ void *tipc_get_handle(const u32 ref)
327 struct port *p_ptr; 327 struct port *p_ptr;
328 void * handle; 328 void * handle;
329 329
330 p_ptr = port_lock(ref); 330 p_ptr = tipc_port_lock(ref);
331 if (!p_ptr) 331 if (!p_ptr)
332 return 0; 332 return 0;
333 handle = p_ptr->publ.usr_handle; 333 handle = p_ptr->publ.usr_handle;
334 port_unlock(p_ptr); 334 tipc_port_unlock(p_ptr);
335 return handle; 335 return handle;
336} 336}
337 337
@@ -344,7 +344,7 @@ int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
344{ 344{
345 struct port *p_ptr; 345 struct port *p_ptr;
346 346
347 p_ptr = port_lock(ref); 347 p_ptr = tipc_port_lock(ref);
348 if (!p_ptr) 348 if (!p_ptr)
349 return -EINVAL; 349 return -EINVAL;
350 *isunreliable = port_unreliable(p_ptr); 350 *isunreliable = port_unreliable(p_ptr);
@@ -356,11 +356,11 @@ int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
356{ 356{
357 struct port *p_ptr; 357 struct port *p_ptr;
358 358
359 p_ptr = port_lock(ref); 359 p_ptr = tipc_port_lock(ref);
360 if (!p_ptr) 360 if (!p_ptr)
361 return -EINVAL; 361 return -EINVAL;
362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0)); 362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
363 port_unlock(p_ptr); 363 tipc_port_unlock(p_ptr);
364 return TIPC_OK; 364 return TIPC_OK;
365} 365}
366 366
@@ -373,7 +373,7 @@ int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
373{ 373{
374 struct port *p_ptr; 374 struct port *p_ptr;
375 375
376 p_ptr = port_lock(ref); 376 p_ptr = tipc_port_lock(ref);
377 if (!p_ptr) 377 if (!p_ptr)
378 return -EINVAL; 378 return -EINVAL;
379 *isunrejectable = port_unreturnable(p_ptr); 379 *isunrejectable = port_unreturnable(p_ptr);
@@ -385,11 +385,11 @@ int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
385{ 385{
386 struct port *p_ptr; 386 struct port *p_ptr;
387 387
388 p_ptr = port_lock(ref); 388 p_ptr = tipc_port_lock(ref);
389 if (!p_ptr) 389 if (!p_ptr)
390 return -EINVAL; 390 return -EINVAL;
391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0)); 391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
392 port_unlock(p_ptr); 392 tipc_port_unlock(p_ptr);
393 return TIPC_OK; 393 return TIPC_OK;
394} 394}
395 395
@@ -476,25 +476,25 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
476 /* send self-abort message when rejecting on a connected port */ 476 /* send self-abort message when rejecting on a connected port */
477 if (msg_connected(msg)) { 477 if (msg_connected(msg)) {
478 struct sk_buff *abuf = 0; 478 struct sk_buff *abuf = 0;
479 struct port *p_ptr = port_lock(msg_destport(msg)); 479 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
480 480
481 if (p_ptr) { 481 if (p_ptr) {
482 if (p_ptr->publ.connected) 482 if (p_ptr->publ.connected)
483 abuf = port_build_self_abort_msg(p_ptr, err); 483 abuf = port_build_self_abort_msg(p_ptr, err);
484 port_unlock(p_ptr); 484 tipc_port_unlock(p_ptr);
485 } 485 }
486 net_route_msg(abuf); 486 tipc_net_route_msg(abuf);
487 } 487 }
488 488
489 /* send rejected message */ 489 /* send rejected message */
490 buf_discard(buf); 490 buf_discard(buf);
491 net_route_msg(rbuf); 491 tipc_net_route_msg(rbuf);
492 return data_sz; 492 return data_sz;
493} 493}
494 494
495int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 495int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
496 struct iovec const *msg_sect, u32 num_sect, 496 struct iovec const *msg_sect, u32 num_sect,
497 int err) 497 int err)
498{ 498{
499 struct sk_buff *buf; 499 struct sk_buff *buf;
500 int res; 500 int res;
@@ -509,7 +509,7 @@ int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
509 509
510static void port_timeout(unsigned long ref) 510static void port_timeout(unsigned long ref)
511{ 511{
512 struct port *p_ptr = port_lock(ref); 512 struct port *p_ptr = tipc_port_lock(ref);
513 struct sk_buff *buf = 0; 513 struct sk_buff *buf = 0;
514 514
515 if (!p_ptr || !p_ptr->publ.connected) 515 if (!p_ptr || !p_ptr->publ.connected)
@@ -532,21 +532,21 @@ static void port_timeout(unsigned long ref)
532 p_ptr->probing_state = PROBING; 532 p_ptr->probing_state = PROBING;
533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
534 } 534 }
535 port_unlock(p_ptr); 535 tipc_port_unlock(p_ptr);
536 net_route_msg(buf); 536 tipc_net_route_msg(buf);
537} 537}
538 538
539 539
540static void port_handle_node_down(unsigned long ref) 540static void port_handle_node_down(unsigned long ref)
541{ 541{
542 struct port *p_ptr = port_lock(ref); 542 struct port *p_ptr = tipc_port_lock(ref);
543 struct sk_buff* buf = 0; 543 struct sk_buff* buf = 0;
544 544
545 if (!p_ptr) 545 if (!p_ptr)
546 return; 546 return;
547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE); 547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
548 port_unlock(p_ptr); 548 tipc_port_unlock(p_ptr);
549 net_route_msg(buf); 549 tipc_net_route_msg(buf);
550} 550}
551 551
552 552
@@ -589,10 +589,10 @@ static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
589 0); 589 0);
590} 590}
591 591
592void port_recv_proto_msg(struct sk_buff *buf) 592void tipc_port_recv_proto_msg(struct sk_buff *buf)
593{ 593{
594 struct tipc_msg *msg = buf_msg(buf); 594 struct tipc_msg *msg = buf_msg(buf);
595 struct port *p_ptr = port_lock(msg_destport(msg)); 595 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
596 u32 err = TIPC_OK; 596 u32 err = TIPC_OK;
597 struct sk_buff *r_buf = 0; 597 struct sk_buff *r_buf = 0;
598 struct sk_buff *abort_buf = 0; 598 struct sk_buff *abort_buf = 0;
@@ -615,11 +615,11 @@ void port_recv_proto_msg(struct sk_buff *buf)
615 } 615 }
616 } 616 }
617 if (msg_type(msg) == CONN_ACK) { 617 if (msg_type(msg) == CONN_ACK) {
618 int wakeup = port_congested(p_ptr) && 618 int wakeup = tipc_port_congested(p_ptr) &&
619 p_ptr->publ.congested && 619 p_ptr->publ.congested &&
620 p_ptr->wakeup; 620 p_ptr->wakeup;
621 p_ptr->acked += msg_msgcnt(msg); 621 p_ptr->acked += msg_msgcnt(msg);
622 if (port_congested(p_ptr)) 622 if (tipc_port_congested(p_ptr))
623 goto exit; 623 goto exit;
624 p_ptr->publ.congested = 0; 624 p_ptr->publ.congested = 0;
625 if (!wakeup) 625 if (!wakeup)
@@ -659,9 +659,9 @@ void port_recv_proto_msg(struct sk_buff *buf)
659 port_incr_out_seqno(p_ptr); 659 port_incr_out_seqno(p_ptr);
660exit: 660exit:
661 if (p_ptr) 661 if (p_ptr)
662 port_unlock(p_ptr); 662 tipc_port_unlock(p_ptr);
663 net_route_msg(r_buf); 663 tipc_net_route_msg(r_buf);
664 net_route_msg(abort_buf); 664 tipc_net_route_msg(abort_buf);
665 buf_discard(buf); 665 buf_discard(buf);
666} 666}
667 667
@@ -704,7 +704,7 @@ static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
704 704
705#define MAX_PORT_QUERY 32768 705#define MAX_PORT_QUERY 32768
706 706
707struct sk_buff *port_get_ports(void) 707struct sk_buff *tipc_port_get_ports(void)
708{ 708{
709 struct sk_buff *buf; 709 struct sk_buff *buf;
710 struct tlv_desc *rep_tlv; 710 struct tlv_desc *rep_tlv;
@@ -712,20 +712,20 @@ struct sk_buff *port_get_ports(void)
712 struct port *p_ptr; 712 struct port *p_ptr;
713 int str_len; 713 int str_len;
714 714
715 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY)); 715 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
716 if (!buf) 716 if (!buf)
717 return NULL; 717 return NULL;
718 rep_tlv = (struct tlv_desc *)buf->data; 718 rep_tlv = (struct tlv_desc *)buf->data;
719 719
720 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY); 720 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
721 spin_lock_bh(&port_list_lock); 721 spin_lock_bh(&tipc_port_list_lock);
722 list_for_each_entry(p_ptr, &ports, port_list) { 722 list_for_each_entry(p_ptr, &ports, port_list) {
723 spin_lock_bh(p_ptr->publ.lock); 723 spin_lock_bh(p_ptr->publ.lock);
724 port_print(p_ptr, &pb, 0); 724 port_print(p_ptr, &pb, 0);
725 spin_unlock_bh(p_ptr->publ.lock); 725 spin_unlock_bh(p_ptr->publ.lock);
726 } 726 }
727 spin_unlock_bh(&port_list_lock); 727 spin_unlock_bh(&tipc_port_list_lock);
728 str_len = printbuf_validate(&pb); 728 str_len = tipc_printbuf_validate(&pb);
729 729
730 skb_put(buf, TLV_SPACE(str_len)); 730 skb_put(buf, TLV_SPACE(str_len));
731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -752,22 +752,22 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
752 ref = *(u32 *)TLV_DATA(req_tlv_area); 752 ref = *(u32 *)TLV_DATA(req_tlv_area);
753 ref = ntohl(ref); 753 ref = ntohl(ref);
754 754
755 p_ptr = port_lock(ref); 755 p_ptr = tipc_port_lock(ref);
756 if (!p_ptr) 756 if (!p_ptr)
757 return cfg_reply_error_string("port not found"); 757 return cfg_reply_error_string("port not found");
758 758
759 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS)); 759 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
760 if (!buf) { 760 if (!buf) {
761 port_unlock(p_ptr); 761 tipc_port_unlock(p_ptr);
762 return NULL; 762 return NULL;
763 } 763 }
764 rep_tlv = (struct tlv_desc *)buf->data; 764 rep_tlv = (struct tlv_desc *)buf->data;
765 765
766 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS); 766 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
767 port_print(p_ptr, &pb, 1); 767 port_print(p_ptr, &pb, 1);
768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */ 768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
769 port_unlock(p_ptr); 769 tipc_port_unlock(p_ptr);
770 str_len = printbuf_validate(&pb); 770 str_len = tipc_printbuf_validate(&pb);
771 771
772 skb_put(buf, TLV_SPACE(str_len)); 772 skb_put(buf, TLV_SPACE(str_len));
773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -777,19 +777,19 @@ struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
777 777
778#endif 778#endif
779 779
780void port_reinit(void) 780void tipc_port_reinit(void)
781{ 781{
782 struct port *p_ptr; 782 struct port *p_ptr;
783 struct tipc_msg *msg; 783 struct tipc_msg *msg;
784 784
785 spin_lock_bh(&port_list_lock); 785 spin_lock_bh(&tipc_port_list_lock);
786 list_for_each_entry(p_ptr, &ports, port_list) { 786 list_for_each_entry(p_ptr, &ports, port_list) {
787 msg = &p_ptr->publ.phdr; 787 msg = &p_ptr->publ.phdr;
788 if (msg_orignode(msg) == tipc_own_addr) 788 if (msg_orignode(msg) == tipc_own_addr)
789 break; 789 break;
790 msg_set_orignode(msg, tipc_own_addr); 790 msg_set_orignode(msg, tipc_own_addr);
791 } 791 }
792 spin_unlock_bh(&port_list_lock); 792 spin_unlock_bh(&tipc_port_list_lock);
793} 793}
794 794
795 795
@@ -820,7 +820,7 @@ static void port_dispatcher_sigh(void *dummy)
820 struct tipc_msg *msg = buf_msg(buf); 820 struct tipc_msg *msg = buf_msg(buf);
821 u32 dref = msg_destport(msg); 821 u32 dref = msg_destport(msg);
822 822
823 p_ptr = port_lock(dref); 823 p_ptr = tipc_port_lock(dref);
824 if (!p_ptr) { 824 if (!p_ptr) {
825 /* Port deleted while msg in queue */ 825 /* Port deleted while msg in queue */
826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT); 826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
@@ -976,7 +976,7 @@ static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
976 msg_queue_tail = buf; 976 msg_queue_tail = buf;
977 } else { 977 } else {
978 msg_queue_tail = msg_queue_head = buf; 978 msg_queue_tail = msg_queue_head = buf;
979 k_signal((Handler)port_dispatcher_sigh, 0); 979 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
980 } 980 }
981 spin_unlock_bh(&queue_lock); 981 spin_unlock_bh(&queue_lock);
982 return TIPC_OK; 982 return TIPC_OK;
@@ -994,14 +994,14 @@ static void port_wakeup_sh(unsigned long ref)
994 tipc_continue_event cb = 0; 994 tipc_continue_event cb = 0;
995 void *uh = 0; 995 void *uh = 0;
996 996
997 p_ptr = port_lock(ref); 997 p_ptr = tipc_port_lock(ref);
998 if (p_ptr) { 998 if (p_ptr) {
999 up_ptr = p_ptr->user_port; 999 up_ptr = p_ptr->user_port;
1000 if (up_ptr) { 1000 if (up_ptr) {
1001 cb = up_ptr->continue_event_cb; 1001 cb = up_ptr->continue_event_cb;
1002 uh = up_ptr->usr_handle; 1002 uh = up_ptr->usr_handle;
1003 } 1003 }
1004 port_unlock(p_ptr); 1004 tipc_port_unlock(p_ptr);
1005 } 1005 }
1006 if (cb) 1006 if (cb)
1007 cb(uh, ref); 1007 cb(uh, ref);
@@ -1010,7 +1010,7 @@ static void port_wakeup_sh(unsigned long ref)
1010 1010
1011static void port_wakeup(struct tipc_port *p_ptr) 1011static void port_wakeup(struct tipc_port *p_ptr)
1012{ 1012{
1013 k_signal((Handler)port_wakeup_sh, p_ptr->ref); 1013 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1014} 1014}
1015 1015
1016void tipc_acknowledge(u32 ref, u32 ack) 1016void tipc_acknowledge(u32 ref, u32 ack)
@@ -1018,7 +1018,7 @@ void tipc_acknowledge(u32 ref, u32 ack)
1018 struct port *p_ptr; 1018 struct port *p_ptr;
1019 struct sk_buff *buf = 0; 1019 struct sk_buff *buf = 0;
1020 1020
1021 p_ptr = port_lock(ref); 1021 p_ptr = tipc_port_lock(ref);
1022 if (!p_ptr) 1022 if (!p_ptr)
1023 return; 1023 return;
1024 if (p_ptr->publ.connected) { 1024 if (p_ptr->publ.connected) {
@@ -1033,8 +1033,8 @@ void tipc_acknowledge(u32 ref, u32 ack)
1033 port_out_seqno(p_ptr), 1033 port_out_seqno(p_ptr),
1034 ack); 1034 ack);
1035 } 1035 }
1036 port_unlock(p_ptr); 1036 tipc_port_unlock(p_ptr);
1037 net_route_msg(buf); 1037 tipc_net_route_msg(buf);
1038} 1038}
1039 1039
1040/* 1040/*
@@ -1063,7 +1063,7 @@ int tipc_createport(u32 user_ref,
1063 return -ENOMEM; 1063 return -ENOMEM;
1064 } 1064 }
1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance); 1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
1066 p_ptr = port_lock(ref); 1066 p_ptr = tipc_port_lock(ref);
1067 if (!p_ptr) { 1067 if (!p_ptr) {
1068 kfree(up_ptr); 1068 kfree(up_ptr);
1069 return -ENOMEM; 1069 return -ENOMEM;
@@ -1081,10 +1081,10 @@ int tipc_createport(u32 user_ref,
1081 up_ptr->conn_msg_cb = conn_msg_cb; 1081 up_ptr->conn_msg_cb = conn_msg_cb;
1082 up_ptr->continue_event_cb = continue_event_cb; 1082 up_ptr->continue_event_cb = continue_event_cb;
1083 INIT_LIST_HEAD(&up_ptr->uport_list); 1083 INIT_LIST_HEAD(&up_ptr->uport_list);
1084 reg_add_port(up_ptr); 1084 tipc_reg_add_port(up_ptr);
1085 *portref = p_ptr->publ.ref; 1085 *portref = p_ptr->publ.ref;
1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref); 1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1087 port_unlock(p_ptr); 1087 tipc_port_unlock(p_ptr);
1088 return TIPC_OK; 1088 return TIPC_OK;
1089} 1089}
1090 1090
@@ -1099,7 +1099,7 @@ int tipc_portimportance(u32 ref, unsigned int *importance)
1099{ 1099{
1100 struct port *p_ptr; 1100 struct port *p_ptr;
1101 1101
1102 p_ptr = port_lock(ref); 1102 p_ptr = tipc_port_lock(ref);
1103 if (!p_ptr) 1103 if (!p_ptr)
1104 return -EINVAL; 1104 return -EINVAL;
1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr); 1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
@@ -1114,7 +1114,7 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
1114 if (imp > TIPC_CRITICAL_IMPORTANCE) 1114 if (imp > TIPC_CRITICAL_IMPORTANCE)
1115 return -EINVAL; 1115 return -EINVAL;
1116 1116
1117 p_ptr = port_lock(ref); 1117 p_ptr = tipc_port_lock(ref);
1118 if (!p_ptr) 1118 if (!p_ptr)
1119 return -EINVAL; 1119 return -EINVAL;
1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp); 1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
@@ -1130,7 +1130,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1130 u32 key; 1130 u32 key;
1131 int res = -EINVAL; 1131 int res = -EINVAL;
1132 1132
1133 p_ptr = port_lock(ref); 1133 p_ptr = tipc_port_lock(ref);
1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, " 1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1135 "lower = %u, upper = %u\n", 1135 "lower = %u, upper = %u\n",
1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper); 1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
@@ -1147,8 +1147,8 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1147 res = -EADDRINUSE; 1147 res = -EADDRINUSE;
1148 goto exit; 1148 goto exit;
1149 } 1149 }
1150 publ = nametbl_publish(seq->type, seq->lower, seq->upper, 1150 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1151 scope, p_ptr->publ.ref, key); 1151 scope, p_ptr->publ.ref, key);
1152 if (publ) { 1152 if (publ) {
1153 list_add(&publ->pport_list, &p_ptr->publications); 1153 list_add(&publ->pport_list, &p_ptr->publications);
1154 p_ptr->pub_count++; 1154 p_ptr->pub_count++;
@@ -1156,7 +1156,7 @@ int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1156 res = TIPC_OK; 1156 res = TIPC_OK;
1157 } 1157 }
1158exit: 1158exit:
1159 port_unlock(p_ptr); 1159 tipc_port_unlock(p_ptr);
1160 return res; 1160 return res;
1161} 1161}
1162 1162
@@ -1167,7 +1167,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1167 struct publication *tpubl; 1167 struct publication *tpubl;
1168 int res = -EINVAL; 1168 int res = -EINVAL;
1169 1169
1170 p_ptr = port_lock(ref); 1170 p_ptr = tipc_port_lock(ref);
1171 if (!p_ptr) 1171 if (!p_ptr)
1172 return -EINVAL; 1172 return -EINVAL;
1173 if (!p_ptr->publ.published) 1173 if (!p_ptr->publ.published)
@@ -1175,8 +1175,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1175 if (!seq) { 1175 if (!seq) {
1176 list_for_each_entry_safe(publ, tpubl, 1176 list_for_each_entry_safe(publ, tpubl,
1177 &p_ptr->publications, pport_list) { 1177 &p_ptr->publications, pport_list) {
1178 nametbl_withdraw(publ->type, publ->lower, 1178 tipc_nametbl_withdraw(publ->type, publ->lower,
1179 publ->ref, publ->key); 1179 publ->ref, publ->key);
1180 } 1180 }
1181 res = TIPC_OK; 1181 res = TIPC_OK;
1182 } else { 1182 } else {
@@ -1190,8 +1190,8 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1190 continue; 1190 continue;
1191 if (publ->upper != seq->upper) 1191 if (publ->upper != seq->upper)
1192 break; 1192 break;
1193 nametbl_withdraw(publ->type, publ->lower, 1193 tipc_nametbl_withdraw(publ->type, publ->lower,
1194 publ->ref, publ->key); 1194 publ->ref, publ->key);
1195 res = TIPC_OK; 1195 res = TIPC_OK;
1196 break; 1196 break;
1197 } 1197 }
@@ -1199,7 +1199,7 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1199 if (list_empty(&p_ptr->publications)) 1199 if (list_empty(&p_ptr->publications))
1200 p_ptr->publ.published = 0; 1200 p_ptr->publ.published = 0;
1201exit: 1201exit:
1202 port_unlock(p_ptr); 1202 tipc_port_unlock(p_ptr);
1203 return res; 1203 return res;
1204} 1204}
1205 1205
@@ -1209,7 +1209,7 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1209 struct tipc_msg *msg; 1209 struct tipc_msg *msg;
1210 int res = -EINVAL; 1210 int res = -EINVAL;
1211 1211
1212 p_ptr = port_lock(ref); 1212 p_ptr = tipc_port_lock(ref);
1213 if (!p_ptr) 1213 if (!p_ptr)
1214 return -EINVAL; 1214 return -EINVAL;
1215 if (p_ptr->publ.published || p_ptr->publ.connected) 1215 if (p_ptr->publ.published || p_ptr->publ.connected)
@@ -1234,13 +1234,13 @@ int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1234 p_ptr->publ.connected = 1; 1234 p_ptr->publ.connected = 1;
1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval); 1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1236 1236
1237 nodesub_subscribe(&p_ptr->subscription,peer->node, 1237 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1238 (void *)(unsigned long)ref, 1238 (void *)(unsigned long)ref,
1239 (net_ev_handler)port_handle_node_down); 1239 (net_ev_handler)port_handle_node_down);
1240 res = TIPC_OK; 1240 res = TIPC_OK;
1241exit: 1241exit:
1242 port_unlock(p_ptr); 1242 tipc_port_unlock(p_ptr);
1243 p_ptr->max_pkt = link_get_max_pkt(peer->node, ref); 1243 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1244 return res; 1244 return res;
1245} 1245}
1246 1246
@@ -1254,16 +1254,16 @@ int tipc_disconnect(u32 ref)
1254 struct port *p_ptr; 1254 struct port *p_ptr;
1255 int res = -ENOTCONN; 1255 int res = -ENOTCONN;
1256 1256
1257 p_ptr = port_lock(ref); 1257 p_ptr = tipc_port_lock(ref);
1258 if (!p_ptr) 1258 if (!p_ptr)
1259 return -EINVAL; 1259 return -EINVAL;
1260 if (p_ptr->publ.connected) { 1260 if (p_ptr->publ.connected) {
1261 p_ptr->publ.connected = 0; 1261 p_ptr->publ.connected = 0;
1262 /* let timer expire on it's own to avoid deadlock! */ 1262 /* let timer expire on it's own to avoid deadlock! */
1263 nodesub_unsubscribe(&p_ptr->subscription); 1263 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1264 res = TIPC_OK; 1264 res = TIPC_OK;
1265 } 1265 }
1266 port_unlock(p_ptr); 1266 tipc_port_unlock(p_ptr);
1267 return res; 1267 return res;
1268} 1268}
1269 1269
@@ -1275,7 +1275,7 @@ int tipc_shutdown(u32 ref)
1275 struct port *p_ptr; 1275 struct port *p_ptr;
1276 struct sk_buff *buf = 0; 1276 struct sk_buff *buf = 0;
1277 1277
1278 p_ptr = port_lock(ref); 1278 p_ptr = tipc_port_lock(ref);
1279 if (!p_ptr) 1279 if (!p_ptr)
1280 return -EINVAL; 1280 return -EINVAL;
1281 1281
@@ -1293,8 +1293,8 @@ int tipc_shutdown(u32 ref)
1293 port_out_seqno(p_ptr), 1293 port_out_seqno(p_ptr),
1294 0); 1294 0);
1295 } 1295 }
1296 port_unlock(p_ptr); 1296 tipc_port_unlock(p_ptr);
1297 net_route_msg(buf); 1297 tipc_net_route_msg(buf);
1298 return tipc_disconnect(ref); 1298 return tipc_disconnect(ref);
1299} 1299}
1300 1300
@@ -1302,11 +1302,11 @@ int tipc_isconnected(u32 ref, int *isconnected)
1302{ 1302{
1303 struct port *p_ptr; 1303 struct port *p_ptr;
1304 1304
1305 p_ptr = port_lock(ref); 1305 p_ptr = tipc_port_lock(ref);
1306 if (!p_ptr) 1306 if (!p_ptr)
1307 return -EINVAL; 1307 return -EINVAL;
1308 *isconnected = p_ptr->publ.connected; 1308 *isconnected = p_ptr->publ.connected;
1309 port_unlock(p_ptr); 1309 tipc_port_unlock(p_ptr);
1310 return TIPC_OK; 1310 return TIPC_OK;
1311} 1311}
1312 1312
@@ -1315,7 +1315,7 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1315 struct port *p_ptr; 1315 struct port *p_ptr;
1316 int res; 1316 int res;
1317 1317
1318 p_ptr = port_lock(ref); 1318 p_ptr = tipc_port_lock(ref);
1319 if (!p_ptr) 1319 if (!p_ptr)
1320 return -EINVAL; 1320 return -EINVAL;
1321 if (p_ptr->publ.connected) { 1321 if (p_ptr->publ.connected) {
@@ -1324,23 +1324,23 @@ int tipc_peer(u32 ref, struct tipc_portid *peer)
1324 res = TIPC_OK; 1324 res = TIPC_OK;
1325 } else 1325 } else
1326 res = -ENOTCONN; 1326 res = -ENOTCONN;
1327 port_unlock(p_ptr); 1327 tipc_port_unlock(p_ptr);
1328 return res; 1328 return res;
1329} 1329}
1330 1330
1331int tipc_ref_valid(u32 ref) 1331int tipc_ref_valid(u32 ref)
1332{ 1332{
1333 /* Works irrespective of type */ 1333 /* Works irrespective of type */
1334 return !!ref_deref(ref); 1334 return !!tipc_ref_deref(ref);
1335} 1335}
1336 1336
1337 1337
1338/* 1338/*
1339 * port_recv_sections(): Concatenate and deliver sectioned 1339 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1340 * message for this node. 1340 * message for this node.
1341 */ 1341 */
1342 1342
1343int port_recv_sections(struct port *sender, unsigned int num_sect, 1343int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1344 struct iovec const *msg_sect) 1344 struct iovec const *msg_sect)
1345{ 1345{
1346 struct sk_buff *buf; 1346 struct sk_buff *buf;
@@ -1349,7 +1349,7 @@ int port_recv_sections(struct port *sender, unsigned int num_sect,
1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect, 1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1350 MAX_MSG_SIZE, !sender->user_port, &buf); 1350 MAX_MSG_SIZE, !sender->user_port, &buf);
1351 if (likely(buf)) 1351 if (likely(buf))
1352 port_recv_msg(buf); 1352 tipc_port_recv_msg(buf);
1353 return res; 1353 return res;
1354} 1354}
1355 1355
@@ -1363,18 +1363,18 @@ int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1363 u32 destnode; 1363 u32 destnode;
1364 int res; 1364 int res;
1365 1365
1366 p_ptr = port_deref(ref); 1366 p_ptr = tipc_port_deref(ref);
1367 if (!p_ptr || !p_ptr->publ.connected) 1367 if (!p_ptr || !p_ptr->publ.connected)
1368 return -EINVAL; 1368 return -EINVAL;
1369 1369
1370 p_ptr->publ.congested = 1; 1370 p_ptr->publ.congested = 1;
1371 if (!port_congested(p_ptr)) { 1371 if (!tipc_port_congested(p_ptr)) {
1372 destnode = port_peernode(p_ptr); 1372 destnode = port_peernode(p_ptr);
1373 if (likely(destnode != tipc_own_addr)) 1373 if (likely(destnode != tipc_own_addr))
1374 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 1374 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1375 destnode); 1375 destnode);
1376 else 1376 else
1377 res = port_recv_sections(p_ptr, num_sect, msg_sect); 1377 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1378 1378
1379 if (likely(res != -ELINKCONG)) { 1379 if (likely(res != -ELINKCONG)) {
1380 port_incr_out_seqno(p_ptr); 1380 port_incr_out_seqno(p_ptr);
@@ -1404,7 +1404,7 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1404 u32 sz; 1404 u32 sz;
1405 u32 res; 1405 u32 res;
1406 1406
1407 p_ptr = port_deref(ref); 1407 p_ptr = tipc_port_deref(ref);
1408 if (!p_ptr || !p_ptr->publ.connected) 1408 if (!p_ptr || !p_ptr->publ.connected)
1409 return -EINVAL; 1409 return -EINVAL;
1410 1410
@@ -1419,11 +1419,11 @@ int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1419 memcpy(buf->data, (unchar *)msg, hsz); 1419 memcpy(buf->data, (unchar *)msg, hsz);
1420 destnode = msg_destnode(msg); 1420 destnode = msg_destnode(msg);
1421 p_ptr->publ.congested = 1; 1421 p_ptr->publ.congested = 1;
1422 if (!port_congested(p_ptr)) { 1422 if (!tipc_port_congested(p_ptr)) {
1423 if (likely(destnode != tipc_own_addr)) 1423 if (likely(destnode != tipc_own_addr))
1424 res = tipc_send_buf_fast(buf, destnode); 1424 res = tipc_send_buf_fast(buf, destnode);
1425 else { 1425 else {
1426 port_recv_msg(buf); 1426 tipc_port_recv_msg(buf);
1427 res = sz; 1427 res = sz;
1428 } 1428 }
1429 if (likely(res != -ELINKCONG)) { 1429 if (likely(res != -ELINKCONG)) {
@@ -1458,7 +1458,7 @@ int tipc_forward2name(u32 ref,
1458 u32 destport = 0; 1458 u32 destport = 0;
1459 int res; 1459 int res;
1460 1460
1461 p_ptr = port_deref(ref); 1461 p_ptr = tipc_port_deref(ref);
1462 if (!p_ptr || p_ptr->publ.connected) 1462 if (!p_ptr || p_ptr->publ.connected)
1463 return -EINVAL; 1463 return -EINVAL;
1464 1464
@@ -1472,16 +1472,16 @@ int tipc_forward2name(u32 ref,
1472 msg_set_lookup_scope(msg, addr_scope(domain)); 1472 msg_set_lookup_scope(msg, addr_scope(domain));
1473 if (importance <= TIPC_CRITICAL_IMPORTANCE) 1473 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1474 msg_set_importance(msg,importance); 1474 msg_set_importance(msg,importance);
1475 destport = nametbl_translate(name->type, name->instance, &destnode); 1475 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1476 msg_set_destnode(msg, destnode); 1476 msg_set_destnode(msg, destnode);
1477 msg_set_destport(msg, destport); 1477 msg_set_destport(msg, destport);
1478 1478
1479 if (likely(destport || destnode)) { 1479 if (likely(destport || destnode)) {
1480 p_ptr->sent++; 1480 p_ptr->sent++;
1481 if (likely(destnode == tipc_own_addr)) 1481 if (likely(destnode == tipc_own_addr))
1482 return port_recv_sections(p_ptr, num_sect, msg_sect); 1482 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1483 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, 1483 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1484 destnode); 1484 destnode);
1485 if (likely(res != -ELINKCONG)) 1485 if (likely(res != -ELINKCONG))
1486 return res; 1486 return res;
1487 if (port_unreliable(p_ptr)) { 1487 if (port_unreliable(p_ptr)) {
@@ -1490,8 +1490,8 @@ int tipc_forward2name(u32 ref,
1490 } 1490 }
1491 return -ELINKCONG; 1491 return -ELINKCONG;
1492 } 1492 }
1493 return port_reject_sections(p_ptr, msg, msg_sect, num_sect, 1493 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1494 TIPC_ERR_NO_NAME); 1494 TIPC_ERR_NO_NAME);
1495} 1495}
1496 1496
1497/** 1497/**
@@ -1530,7 +1530,7 @@ int tipc_forward_buf2name(u32 ref,
1530 u32 destport = 0; 1530 u32 destport = 0;
1531 int res; 1531 int res;
1532 1532
1533 p_ptr = (struct port *)ref_deref(ref); 1533 p_ptr = (struct port *)tipc_ref_deref(ref);
1534 if (!p_ptr || p_ptr->publ.connected) 1534 if (!p_ptr || p_ptr->publ.connected)
1535 return -EINVAL; 1535 return -EINVAL;
1536 1536
@@ -1545,7 +1545,7 @@ int tipc_forward_buf2name(u32 ref,
1545 msg_set_lookup_scope(msg, addr_scope(domain)); 1545 msg_set_lookup_scope(msg, addr_scope(domain));
1546 msg_set_hdr_sz(msg, LONG_H_SIZE); 1546 msg_set_hdr_sz(msg, LONG_H_SIZE);
1547 msg_set_size(msg, LONG_H_SIZE + dsz); 1547 msg_set_size(msg, LONG_H_SIZE + dsz);
1548 destport = nametbl_translate(name->type, name->instance, &destnode); 1548 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1549 msg_set_destnode(msg, destnode); 1549 msg_set_destnode(msg, destnode);
1550 msg_set_destport(msg, destport); 1550 msg_set_destport(msg, destport);
1551 msg_dbg(msg, "forw2name ==> "); 1551 msg_dbg(msg, "forw2name ==> ");
@@ -1557,7 +1557,7 @@ int tipc_forward_buf2name(u32 ref,
1557 if (likely(destport || destnode)) { 1557 if (likely(destport || destnode)) {
1558 p_ptr->sent++; 1558 p_ptr->sent++;
1559 if (destnode == tipc_own_addr) 1559 if (destnode == tipc_own_addr)
1560 return port_recv_msg(buf); 1560 return tipc_port_recv_msg(buf);
1561 res = tipc_send_buf_fast(buf, destnode); 1561 res = tipc_send_buf_fast(buf, destnode);
1562 if (likely(res != -ELINKCONG)) 1562 if (likely(res != -ELINKCONG))
1563 return res; 1563 return res;
@@ -1601,7 +1601,7 @@ int tipc_forward2port(u32 ref,
1601 struct tipc_msg *msg; 1601 struct tipc_msg *msg;
1602 int res; 1602 int res;
1603 1603
1604 p_ptr = port_deref(ref); 1604 p_ptr = tipc_port_deref(ref);
1605 if (!p_ptr || p_ptr->publ.connected) 1605 if (!p_ptr || p_ptr->publ.connected)
1606 return -EINVAL; 1606 return -EINVAL;
1607 1607
@@ -1616,8 +1616,8 @@ int tipc_forward2port(u32 ref,
1616 msg_set_importance(msg, importance); 1616 msg_set_importance(msg, importance);
1617 p_ptr->sent++; 1617 p_ptr->sent++;
1618 if (dest->node == tipc_own_addr) 1618 if (dest->node == tipc_own_addr)
1619 return port_recv_sections(p_ptr, num_sect, msg_sect); 1619 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1620 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node); 1620 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1621 if (likely(res != -ELINKCONG)) 1621 if (likely(res != -ELINKCONG))
1622 return res; 1622 return res;
1623 if (port_unreliable(p_ptr)) { 1623 if (port_unreliable(p_ptr)) {
@@ -1658,7 +1658,7 @@ int tipc_forward_buf2port(u32 ref,
1658 struct tipc_msg *msg; 1658 struct tipc_msg *msg;
1659 int res; 1659 int res;
1660 1660
1661 p_ptr = (struct port *)ref_deref(ref); 1661 p_ptr = (struct port *)tipc_ref_deref(ref);
1662 if (!p_ptr || p_ptr->publ.connected) 1662 if (!p_ptr || p_ptr->publ.connected)
1663 return -EINVAL; 1663 return -EINVAL;
1664 1664
@@ -1680,7 +1680,7 @@ int tipc_forward_buf2port(u32 ref,
1680 msg_dbg(msg, "buf2port: "); 1680 msg_dbg(msg, "buf2port: ");
1681 p_ptr->sent++; 1681 p_ptr->sent++;
1682 if (dest->node == tipc_own_addr) 1682 if (dest->node == tipc_own_addr)
1683 return port_recv_msg(buf); 1683 return tipc_port_recv_msg(buf);
1684 res = tipc_send_buf_fast(buf, dest->node); 1684 res = tipc_send_buf_fast(buf, dest->node);
1685 if (likely(res != -ELINKCONG)) 1685 if (likely(res != -ELINKCONG))
1686 return res; 1686 return res;
diff --git a/net/tipc/port.h b/net/tipc/port.h
index f4a8c2be3faa..839f100da646 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -110,65 +110,65 @@ struct port {
110 struct node_subscr subscription; 110 struct node_subscr subscription;
111}; 111};
112 112
113extern spinlock_t port_list_lock; 113extern spinlock_t tipc_port_list_lock;
114struct port_list; 114struct port_list;
115 115
116int port_recv_sections(struct port *p_ptr, u32 num_sect, 116int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
117 struct iovec const *msg_sect); 117 struct iovec const *msg_sect);
118int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr, 118int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
119 struct iovec const *msg_sect, u32 num_sect, 119 struct iovec const *msg_sect, u32 num_sect,
120 int err); 120 int err);
121struct sk_buff *port_get_ports(void); 121struct sk_buff *tipc_port_get_ports(void);
122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space); 122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
123void port_recv_proto_msg(struct sk_buff *buf); 123void tipc_port_recv_proto_msg(struct sk_buff *buf);
124void port_recv_mcast(struct sk_buff *buf, struct port_list *dp); 124void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
125void port_reinit(void); 125void tipc_port_reinit(void);
126 126
127/** 127/**
128 * port_lock - lock port instance referred to and return its pointer 128 * tipc_port_lock - lock port instance referred to and return its pointer
129 */ 129 */
130 130
131static inline struct port *port_lock(u32 ref) 131static inline struct port *tipc_port_lock(u32 ref)
132{ 132{
133 return (struct port *)ref_lock(ref); 133 return (struct port *)tipc_ref_lock(ref);
134} 134}
135 135
136/** 136/**
137 * port_unlock - unlock a port instance 137 * tipc_port_unlock - unlock a port instance
138 * 138 *
139 * Can use pointer instead of ref_unlock() since port is already locked. 139 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
140 */ 140 */
141 141
142static inline void port_unlock(struct port *p_ptr) 142static inline void tipc_port_unlock(struct port *p_ptr)
143{ 143{
144 spin_unlock_bh(p_ptr->publ.lock); 144 spin_unlock_bh(p_ptr->publ.lock);
145} 145}
146 146
147static inline struct port* port_deref(u32 ref) 147static inline struct port* tipc_port_deref(u32 ref)
148{ 148{
149 return (struct port *)ref_deref(ref); 149 return (struct port *)tipc_ref_deref(ref);
150} 150}
151 151
152static inline u32 peer_port(struct port *p_ptr) 152static inline u32 tipc_peer_port(struct port *p_ptr)
153{ 153{
154 return msg_destport(&p_ptr->publ.phdr); 154 return msg_destport(&p_ptr->publ.phdr);
155} 155}
156 156
157static inline u32 peer_node(struct port *p_ptr) 157static inline u32 tipc_peer_node(struct port *p_ptr)
158{ 158{
159 return msg_destnode(&p_ptr->publ.phdr); 159 return msg_destnode(&p_ptr->publ.phdr);
160} 160}
161 161
162static inline int port_congested(struct port *p_ptr) 162static inline int tipc_port_congested(struct port *p_ptr)
163{ 163{
164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2)); 164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
165} 165}
166 166
167/** 167/**
168 * port_recv_msg - receive message from lower layer and deliver to port user 168 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
169 */ 169 */
170 170
171static inline int port_recv_msg(struct sk_buff *buf) 171static inline int tipc_port_recv_msg(struct sk_buff *buf)
172{ 172{
173 struct port *p_ptr; 173 struct port *p_ptr;
174 struct tipc_msg *msg = buf_msg(buf); 174 struct tipc_msg *msg = buf_msg(buf);
@@ -178,24 +178,24 @@ static inline int port_recv_msg(struct sk_buff *buf)
178 178
179 /* forward unresolved named message */ 179 /* forward unresolved named message */
180 if (unlikely(!destport)) { 180 if (unlikely(!destport)) {
181 net_route_msg(buf); 181 tipc_net_route_msg(buf);
182 return dsz; 182 return dsz;
183 } 183 }
184 184
185 /* validate destination & pass to port, otherwise reject message */ 185 /* validate destination & pass to port, otherwise reject message */
186 p_ptr = port_lock(destport); 186 p_ptr = tipc_port_lock(destport);
187 if (likely(p_ptr)) { 187 if (likely(p_ptr)) {
188 if (likely(p_ptr->publ.connected)) { 188 if (likely(p_ptr->publ.connected)) {
189 if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) || 189 if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
190 (unlikely(msg_orignode(msg) != peer_node(p_ptr))) || 190 (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
191 (unlikely(!msg_connected(msg)))) { 191 (unlikely(!msg_connected(msg)))) {
192 err = TIPC_ERR_NO_PORT; 192 err = TIPC_ERR_NO_PORT;
193 port_unlock(p_ptr); 193 tipc_port_unlock(p_ptr);
194 goto reject; 194 goto reject;
195 } 195 }
196 } 196 }
197 err = p_ptr->dispatcher(&p_ptr->publ, buf); 197 err = p_ptr->dispatcher(&p_ptr->publ, buf);
198 port_unlock(p_ptr); 198 tipc_port_unlock(p_ptr);
199 if (likely(!err)) 199 if (likely(!err))
200 return dsz; 200 return dsz;
201 } else { 201 } else {
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 944093fe246f..5a13c2defe4a 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -61,15 +61,15 @@
61 * because entry 0's reference field has the form XXXX|1--1. 61 * because entry 0's reference field has the form XXXX|1--1.
62 */ 62 */
63 63
64struct ref_table ref_table = { 0 }; 64struct ref_table tipc_ref_table = { 0 };
65 65
66rwlock_t reftbl_lock = RW_LOCK_UNLOCKED; 66static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
67 67
68/** 68/**
69 * ref_table_init - create reference table for objects 69 * tipc_ref_table_init - create reference table for objects
70 */ 70 */
71 71
72int ref_table_init(u32 requested_size, u32 start) 72int tipc_ref_table_init(u32 requested_size, u32 start)
73{ 73{
74 struct reference *table; 74 struct reference *table;
75 u32 sz = 1 << 4; 75 u32 sz = 1 << 4;
@@ -83,43 +83,43 @@ int ref_table_init(u32 requested_size, u32 start)
83 if (table == NULL) 83 if (table == NULL)
84 return -ENOMEM; 84 return -ENOMEM;
85 85
86 write_lock_bh(&reftbl_lock); 86 write_lock_bh(&ref_table_lock);
87 index_mask = sz - 1; 87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) { 88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = 0; 89 table[i].object = 0;
90 table[i].lock = SPIN_LOCK_UNLOCKED; 90 table[i].lock = SPIN_LOCK_UNLOCKED;
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1; 91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 } 92 }
93 ref_table.entries = table; 93 tipc_ref_table.entries = table;
94 ref_table.index_mask = index_mask; 94 tipc_ref_table.index_mask = index_mask;
95 ref_table.first_free = sz - 1; 95 tipc_ref_table.first_free = sz - 1;
96 ref_table.last_free = 1; 96 tipc_ref_table.last_free = 1;
97 write_unlock_bh(&reftbl_lock); 97 write_unlock_bh(&ref_table_lock);
98 return TIPC_OK; 98 return TIPC_OK;
99} 99}
100 100
101/** 101/**
102 * ref_table_stop - destroy reference table for objects 102 * tipc_ref_table_stop - destroy reference table for objects
103 */ 103 */
104 104
105void ref_table_stop(void) 105void tipc_ref_table_stop(void)
106{ 106{
107 if (!ref_table.entries) 107 if (!tipc_ref_table.entries)
108 return; 108 return;
109 109
110 vfree(ref_table.entries); 110 vfree(tipc_ref_table.entries);
111 ref_table.entries = 0; 111 tipc_ref_table.entries = 0;
112} 112}
113 113
114/** 114/**
115 * ref_acquire - create reference to an object 115 * tipc_ref_acquire - create reference to an object
116 * 116 *
117 * Return a unique reference value which can be translated back to the pointer 117 * Return a unique reference value which can be translated back to the pointer
118 * 'object' at a later time. Also, pass back a pointer to the lock protecting 118 * 'object' at a later time. Also, pass back a pointer to the lock protecting
119 * the object, but without locking it. 119 * the object, but without locking it.
120 */ 120 */
121 121
122u32 ref_acquire(void *object, spinlock_t **lock) 122u32 tipc_ref_acquire(void *object, spinlock_t **lock)
123{ 123{
124 struct reference *entry; 124 struct reference *entry;
125 u32 index; 125 u32 index;
@@ -127,17 +127,17 @@ u32 ref_acquire(void *object, spinlock_t **lock)
127 u32 next_plus_upper; 127 u32 next_plus_upper;
128 u32 reference = 0; 128 u32 reference = 0;
129 129
130 assert(ref_table.entries && object); 130 assert(tipc_ref_table.entries && object);
131 131
132 write_lock_bh(&reftbl_lock); 132 write_lock_bh(&ref_table_lock);
133 if (ref_table.first_free) { 133 if (tipc_ref_table.first_free) {
134 index = ref_table.first_free; 134 index = tipc_ref_table.first_free;
135 entry = &(ref_table.entries[index]); 135 entry = &(tipc_ref_table.entries[index]);
136 index_mask = ref_table.index_mask; 136 index_mask = tipc_ref_table.index_mask;
137 /* take lock in case a previous user of entry still holds it */ 137 /* take lock in case a previous user of entry still holds it */
138 spin_lock_bh(&entry->lock); 138 spin_lock_bh(&entry->lock);
139 next_plus_upper = entry->data.next_plus_upper; 139 next_plus_upper = entry->data.next_plus_upper;
140 ref_table.first_free = next_plus_upper & index_mask; 140 tipc_ref_table.first_free = next_plus_upper & index_mask;
141 reference = (next_plus_upper & ~index_mask) + index; 141 reference = (next_plus_upper & ~index_mask) + index;
142 entry->data.reference = reference; 142 entry->data.reference = reference;
143 entry->object = object; 143 entry->object = object;
@@ -145,45 +145,45 @@ u32 ref_acquire(void *object, spinlock_t **lock)
145 *lock = &entry->lock; 145 *lock = &entry->lock;
146 spin_unlock_bh(&entry->lock); 146 spin_unlock_bh(&entry->lock);
147 } 147 }
148 write_unlock_bh(&reftbl_lock); 148 write_unlock_bh(&ref_table_lock);
149 return reference; 149 return reference;
150} 150}
151 151
152/** 152/**
153 * ref_discard - invalidate references to an object 153 * tipc_ref_discard - invalidate references to an object
154 * 154 *
155 * Disallow future references to an object and free up the entry for re-use. 155 * Disallow future references to an object and free up the entry for re-use.
156 * Note: The entry's spin_lock may still be busy after discard 156 * Note: The entry's spin_lock may still be busy after discard
157 */ 157 */
158 158
159void ref_discard(u32 ref) 159void tipc_ref_discard(u32 ref)
160{ 160{
161 struct reference *entry; 161 struct reference *entry;
162 u32 index; 162 u32 index;
163 u32 index_mask; 163 u32 index_mask;
164 164
165 assert(ref_table.entries); 165 assert(tipc_ref_table.entries);
166 assert(ref != 0); 166 assert(ref != 0);
167 167
168 write_lock_bh(&reftbl_lock); 168 write_lock_bh(&ref_table_lock);
169 index_mask = ref_table.index_mask; 169 index_mask = tipc_ref_table.index_mask;
170 index = ref & index_mask; 170 index = ref & index_mask;
171 entry = &(ref_table.entries[index]); 171 entry = &(tipc_ref_table.entries[index]);
172 assert(entry->object != 0); 172 assert(entry->object != 0);
173 assert(entry->data.reference == ref); 173 assert(entry->data.reference == ref);
174 174
175 /* mark entry as unused */ 175 /* mark entry as unused */
176 entry->object = 0; 176 entry->object = 0;
177 if (ref_table.first_free == 0) 177 if (tipc_ref_table.first_free == 0)
178 ref_table.first_free = index; 178 tipc_ref_table.first_free = index;
179 else 179 else
180 /* next_plus_upper is always XXXX|0--0 for last free entry */ 180 /* next_plus_upper is always XXXX|0--0 for last free entry */
181 ref_table.entries[ref_table.last_free].data.next_plus_upper 181 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
182 |= index; 182 |= index;
183 ref_table.last_free = index; 183 tipc_ref_table.last_free = index;
184 184
185 /* increment upper bits of entry to invalidate subsequent references */ 185 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1); 186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
187 write_unlock_bh(&reftbl_lock); 187 write_unlock_bh(&ref_table_lock);
188} 188}
189 189
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
index 429cde57228a..4f8f9f40dcac 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/ref.h
@@ -54,7 +54,7 @@ struct reference {
54}; 54};
55 55
56/** 56/**
57 * struct ref_table - table of TIPC object reference entries 57 * struct tipc_ref_table - table of TIPC object reference entries
58 * @entries: pointer to array of reference entries 58 * @entries: pointer to array of reference entries
59 * @index_mask: bitmask for array index portion of reference values 59 * @index_mask: bitmask for array index portion of reference values
60 * @first_free: array index of first unused object reference entry 60 * @first_free: array index of first unused object reference entry
@@ -68,24 +68,24 @@ struct ref_table {
68 u32 last_free; 68 u32 last_free;
69}; 69};
70 70
71extern struct ref_table ref_table; 71extern struct ref_table tipc_ref_table;
72 72
73int ref_table_init(u32 requested_size, u32 start); 73int tipc_ref_table_init(u32 requested_size, u32 start);
74void ref_table_stop(void); 74void tipc_ref_table_stop(void);
75 75
76u32 ref_acquire(void *object, spinlock_t **lock); 76u32 tipc_ref_acquire(void *object, spinlock_t **lock);
77void ref_discard(u32 ref); 77void tipc_ref_discard(u32 ref);
78 78
79 79
80/** 80/**
81 * ref_lock - lock referenced object and return pointer to it 81 * tipc_ref_lock - lock referenced object and return pointer to it
82 */ 82 */
83 83
84static inline void *ref_lock(u32 ref) 84static inline void *tipc_ref_lock(u32 ref)
85{ 85{
86 if (likely(ref_table.entries)) { 86 if (likely(tipc_ref_table.entries)) {
87 struct reference *r = 87 struct reference *r =
88 &ref_table.entries[ref & ref_table.index_mask]; 88 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
89 89
90 spin_lock_bh(&r->lock); 90 spin_lock_bh(&r->lock);
91 if (likely(r->data.reference == ref)) 91 if (likely(r->data.reference == ref))
@@ -96,31 +96,31 @@ static inline void *ref_lock(u32 ref)
96} 96}
97 97
98/** 98/**
99 * ref_unlock - unlock referenced object 99 * tipc_ref_unlock - unlock referenced object
100 */ 100 */
101 101
102static inline void ref_unlock(u32 ref) 102static inline void tipc_ref_unlock(u32 ref)
103{ 103{
104 if (likely(ref_table.entries)) { 104 if (likely(tipc_ref_table.entries)) {
105 struct reference *r = 105 struct reference *r =
106 &ref_table.entries[ref & ref_table.index_mask]; 106 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
107 107
108 if (likely(r->data.reference == ref)) 108 if (likely(r->data.reference == ref))
109 spin_unlock_bh(&r->lock); 109 spin_unlock_bh(&r->lock);
110 else 110 else
111 err("ref_unlock() invoked using obsolete reference\n"); 111 err("tipc_ref_unlock() invoked using obsolete reference\n");
112 } 112 }
113} 113}
114 114
115/** 115/**
116 * ref_deref - return pointer referenced object (without locking it) 116 * tipc_ref_deref - return pointer referenced object (without locking it)
117 */ 117 */
118 118
119static inline void *ref_deref(u32 ref) 119static inline void *tipc_ref_deref(u32 ref)
120{ 120{
121 if (likely(ref_table.entries)) { 121 if (likely(tipc_ref_table.entries)) {
122 struct reference *r = 122 struct reference *r =
123 &ref_table.entries[ref & ref_table.index_mask]; 123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
124 124
125 if (likely(r->data.reference == ref)) 125 if (likely(r->data.reference == ref))
126 return r->object; 126 return r->object;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 107f654db0ba..67253bfcd702 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1183,7 +1183,7 @@ static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1183 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) { 1183 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1184 sock->state = SS_DISCONNECTING; 1184 sock->state = SS_DISCONNECTING;
1185 /* Note: Use signal since port lock is already taken! */ 1185 /* Note: Use signal since port lock is already taken! */
1186 k_signal((Handler)async_disconnect, tport->ref); 1186 tipc_k_signal((Handler)async_disconnect, tport->ref);
1187 } 1187 }
1188 1188
1189 /* Enqueue message (finally!) */ 1189 /* Enqueue message (finally!) */
@@ -1683,11 +1683,11 @@ static struct proto tipc_proto = {
1683}; 1683};
1684 1684
1685/** 1685/**
1686 * socket_init - initialize TIPC socket interface 1686 * tipc_socket_init - initialize TIPC socket interface
1687 * 1687 *
1688 * Returns 0 on success, errno otherwise 1688 * Returns 0 on success, errno otherwise
1689 */ 1689 */
1690int socket_init(void) 1690int tipc_socket_init(void)
1691{ 1691{
1692 int res; 1692 int res;
1693 1693
@@ -1710,9 +1710,9 @@ int socket_init(void)
1710} 1710}
1711 1711
1712/** 1712/**
1713 * sock_stop - stop TIPC socket interface 1713 * tipc_socket_stop - stop TIPC socket interface
1714 */ 1714 */
1715void socket_stop(void) 1715void tipc_socket_stop(void)
1716{ 1716{
1717 if (!sockets_enabled) 1717 if (!sockets_enabled)
1718 return; 1718 return;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 80e219ba527d..5ff38b9f3194 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -118,14 +118,14 @@ static void subscr_send_event(struct subscription *sub,
118} 118}
119 119
120/** 120/**
121 * subscr_overlap - test for subscription overlap with the given values 121 * tipc_subscr_overlap - test for subscription overlap with the given values
122 * 122 *
123 * Returns 1 if there is overlap, otherwise 0. 123 * Returns 1 if there is overlap, otherwise 0.
124 */ 124 */
125 125
126int subscr_overlap(struct subscription *sub, 126int tipc_subscr_overlap(struct subscription *sub,
127 u32 found_lower, 127 u32 found_lower,
128 u32 found_upper) 128 u32 found_upper)
129 129
130{ 130{
131 if (found_lower < sub->seq.lower) 131 if (found_lower < sub->seq.lower)
@@ -138,22 +138,22 @@ int subscr_overlap(struct subscription *sub,
138} 138}
139 139
140/** 140/**
141 * subscr_report_overlap - issue event if there is subscription overlap 141 * tipc_subscr_report_overlap - issue event if there is subscription overlap
142 * 142 *
143 * Protected by nameseq.lock in name_table.c 143 * Protected by nameseq.lock in name_table.c
144 */ 144 */
145 145
146void subscr_report_overlap(struct subscription *sub, 146void tipc_subscr_report_overlap(struct subscription *sub,
147 u32 found_lower, 147 u32 found_lower,
148 u32 found_upper, 148 u32 found_upper,
149 u32 event, 149 u32 event,
150 u32 port_ref, 150 u32 port_ref,
151 u32 node, 151 u32 node,
152 int must) 152 int must)
153{ 153{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower, 154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper); 155 sub->seq.upper, found_lower, found_upper);
156 if (!subscr_overlap(sub, found_lower, found_upper)) 156 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return; 157 return;
158 if (!must && (sub->filter != TIPC_SUB_PORTS)) 158 if (!must && (sub->filter != TIPC_SUB_PORTS))
159 return; 159 return;
@@ -172,13 +172,13 @@ static void subscr_timeout(struct subscription *sub)
172 /* Validate subscriber reference (in case subscriber is terminating) */ 172 /* Validate subscriber reference (in case subscriber is terminating) */
173 173
174 subscriber_ref = sub->owner->ref; 174 subscriber_ref = sub->owner->ref;
175 subscriber = (struct subscriber *)ref_lock(subscriber_ref); 175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref);
176 if (subscriber == NULL) 176 if (subscriber == NULL)
177 return; 177 return;
178 178
179 /* Unlink subscription from name table */ 179 /* Unlink subscription from name table */
180 180
181 nametbl_unsubscribe(sub); 181 tipc_nametbl_unsubscribe(sub);
182 182
183 /* Notify subscriber of timeout, then unlink subscription */ 183 /* Notify subscriber of timeout, then unlink subscription */
184 184
@@ -192,7 +192,7 @@ static void subscr_timeout(struct subscription *sub)
192 192
193 /* Now destroy subscription */ 193 /* Now destroy subscription */
194 194
195 ref_unlock(subscriber_ref); 195 tipc_ref_unlock(subscriber_ref);
196 k_term_timer(&sub->timer); 196 k_term_timer(&sub->timer);
197 kfree(sub); 197 kfree(sub);
198 atomic_dec(&topsrv.subscription_count); 198 atomic_dec(&topsrv.subscription_count);
@@ -216,7 +216,7 @@ static void subscr_terminate(struct subscriber *subscriber)
216 216
217 /* Invalidate subscriber reference */ 217 /* Invalidate subscriber reference */
218 218
219 ref_discard(subscriber->ref); 219 tipc_ref_discard(subscriber->ref);
220 spin_unlock_bh(subscriber->lock); 220 spin_unlock_bh(subscriber->lock);
221 221
222 /* Destroy any existing subscriptions for subscriber */ 222 /* Destroy any existing subscriptions for subscriber */
@@ -227,7 +227,7 @@ static void subscr_terminate(struct subscriber *subscriber)
227 k_cancel_timer(&sub->timer); 227 k_cancel_timer(&sub->timer);
228 k_term_timer(&sub->timer); 228 k_term_timer(&sub->timer);
229 } 229 }
230 nametbl_unsubscribe(sub); 230 tipc_nametbl_unsubscribe(sub);
231 list_del(&sub->subscription_list); 231 list_del(&sub->subscription_list);
232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n", 232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber); 233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
@@ -315,7 +315,7 @@ static void subscr_subscribe(struct tipc_subscr *s,
315 k_start_timer(&sub->timer, sub->timeout); 315 k_start_timer(&sub->timer, sub->timeout);
316 } 316 }
317 sub->owner = subscriber; 317 sub->owner = subscriber;
318 nametbl_subscribe(sub); 318 tipc_nametbl_subscribe(sub);
319} 319}
320 320
321/** 321/**
@@ -332,7 +332,7 @@ static void subscr_conn_shutdown_event(void *usr_handle,
332 struct subscriber *subscriber; 332 struct subscriber *subscriber;
333 spinlock_t *subscriber_lock; 333 spinlock_t *subscriber_lock;
334 334
335 subscriber = ref_lock((u32)(unsigned long)usr_handle); 335 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
336 if (subscriber == NULL) 336 if (subscriber == NULL)
337 return; 337 return;
338 338
@@ -354,7 +354,7 @@ static void subscr_conn_msg_event(void *usr_handle,
354 struct subscriber *subscriber; 354 struct subscriber *subscriber;
355 spinlock_t *subscriber_lock; 355 spinlock_t *subscriber_lock;
356 356
357 subscriber = ref_lock((u32)(unsigned long)usr_handle); 357 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
358 if (subscriber == NULL) 358 if (subscriber == NULL)
359 return; 359 return;
360 360
@@ -401,7 +401,7 @@ static void subscr_named_msg_event(void *usr_handle,
401 memset(subscriber, 0, sizeof(struct subscriber)); 401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list); 402 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list); 403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = ref_acquire(subscriber, &subscriber->lock); 404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) { 405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n"); 406 warn("Failed to acquire subscriber reference\n");
407 kfree(subscriber); 407 kfree(subscriber);
@@ -423,7 +423,7 @@ static void subscr_named_msg_event(void *usr_handle,
423 &subscriber->port_ref); 423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) { 424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n"); 425 warn("Memory squeeze; failed to create subscription port\n");
426 ref_discard(subscriber->ref); 426 tipc_ref_discard(subscriber->ref);
427 kfree(subscriber); 427 kfree(subscriber);
428 return; 428 return;
429 } 429 }
@@ -432,7 +432,7 @@ static void subscr_named_msg_event(void *usr_handle,
432 432
433 /* Add subscriber to topology server's subscriber list */ 433 /* Add subscriber to topology server's subscriber list */
434 434
435 ref_lock(subscriber->ref); 435 tipc_ref_lock(subscriber->ref);
436 spin_lock_bh(&topsrv.lock); 436 spin_lock_bh(&topsrv.lock);
437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list); 437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
438 spin_unlock_bh(&topsrv.lock); 438 spin_unlock_bh(&topsrv.lock);
@@ -451,7 +451,7 @@ static void subscr_named_msg_event(void *usr_handle,
451 spin_unlock_bh(subscriber_lock); 451 spin_unlock_bh(subscriber_lock);
452} 452}
453 453
454int subscr_start(void) 454int tipc_subscr_start(void)
455{ 455{
456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV}; 456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
457 int res = -1; 457 int res = -1;
@@ -481,7 +481,7 @@ int subscr_start(void)
481 if (res) 481 if (res)
482 goto failed; 482 goto failed;
483 483
484 res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq); 484 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
485 if (res) 485 if (res)
486 goto failed; 486 goto failed;
487 487
@@ -496,7 +496,7 @@ failed:
496 return res; 496 return res;
497} 497}
498 498
499void subscr_stop(void) 499void tipc_subscr_stop(void)
500{ 500{
501 struct subscriber *subscriber; 501 struct subscriber *subscriber;
502 struct subscriber *subscriber_temp; 502 struct subscriber *subscriber_temp;
@@ -507,7 +507,7 @@ void subscr_stop(void)
507 list_for_each_entry_safe(subscriber, subscriber_temp, 507 list_for_each_entry_safe(subscriber, subscriber_temp,
508 &topsrv.subscriber_list, 508 &topsrv.subscriber_list,
509 subscriber_list) { 509 subscriber_list) {
510 ref_lock(subscriber->ref); 510 tipc_ref_lock(subscriber->ref);
511 subscriber_lock = subscriber->lock; 511 subscriber_lock = subscriber->lock;
512 subscr_terminate(subscriber); 512 subscr_terminate(subscriber);
513 spin_unlock_bh(subscriber_lock); 513 spin_unlock_bh(subscriber_lock);
@@ -522,6 +522,6 @@ int tipc_ispublished(struct tipc_name const *name)
522{ 522{
523 u32 domain = 0; 523 u32 domain = 0;
524 524
525 return(nametbl_translate(name->type, name->instance,&domain) != 0); 525 return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
526} 526}
527 527
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index ccff4efcb755..1e5090465d2e 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -60,21 +60,21 @@ struct subscription {
60 struct subscriber *owner; 60 struct subscriber *owner;
61}; 61};
62 62
63int subscr_overlap(struct subscription * sub, 63int tipc_subscr_overlap(struct subscription * sub,
64 u32 found_lower, 64 u32 found_lower,
65 u32 found_upper); 65 u32 found_upper);
66 66
67void subscr_report_overlap(struct subscription * sub, 67void tipc_subscr_report_overlap(struct subscription * sub,
68 u32 found_lower, 68 u32 found_lower,
69 u32 found_upper, 69 u32 found_upper,
70 u32 event, 70 u32 event,
71 u32 port_ref, 71 u32 port_ref,
72 u32 node, 72 u32 node,
73 int must_report); 73 int must_report);
74 74
75int subscr_start(void); 75int tipc_subscr_start(void);
76 76
77void subscr_stop(void); 77void tipc_subscr_stop(void);
78 78
79 79
80#endif 80#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
index 35ec7dc8211d..106200d76587 100644
--- a/net/tipc/user_reg.c
+++ b/net/tipc/user_reg.c
@@ -114,10 +114,10 @@ static void reg_callback(struct tipc_user *user_ptr)
114} 114}
115 115
116/** 116/**
117 * reg_start - activate TIPC user registry 117 * tipc_reg_start - activate TIPC user registry
118 */ 118 */
119 119
120int reg_start(void) 120int tipc_reg_start(void)
121{ 121{
122 u32 u; 122 u32 u;
123 int res; 123 int res;
@@ -127,17 +127,17 @@ int reg_start(void)
127 127
128 for (u = 1; u <= MAX_USERID; u++) { 128 for (u = 1; u <= MAX_USERID; u++) {
129 if (users[u].callback) 129 if (users[u].callback)
130 k_signal((Handler)reg_callback, 130 tipc_k_signal((Handler)reg_callback,
131 (unsigned long)&users[u]); 131 (unsigned long)&users[u]);
132 } 132 }
133 return TIPC_OK; 133 return TIPC_OK;
134} 134}
135 135
136/** 136/**
137 * reg_stop - shut down & delete TIPC user registry 137 * tipc_reg_stop - shut down & delete TIPC user registry
138 */ 138 */
139 139
140void reg_stop(void) 140void tipc_reg_stop(void)
141{ 141{
142 int id; 142 int id;
143 143
@@ -184,7 +184,7 @@ int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
184 atomic_inc(&tipc_user_count); 184 atomic_inc(&tipc_user_count);
185 185
186 if (cb && (tipc_mode != TIPC_NOT_RUNNING)) 186 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
187 k_signal((Handler)reg_callback, (unsigned long)user_ptr); 187 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
188 return TIPC_OK; 188 return TIPC_OK;
189} 189}
190 190
@@ -223,10 +223,10 @@ void tipc_detach(u32 userid)
223} 223}
224 224
225/** 225/**
226 * reg_add_port - register a user's driver port 226 * tipc_reg_add_port - register a user's driver port
227 */ 227 */
228 228
229int reg_add_port(struct user_port *up_ptr) 229int tipc_reg_add_port(struct user_port *up_ptr)
230{ 230{
231 struct tipc_user *user_ptr; 231 struct tipc_user *user_ptr;
232 232
@@ -245,10 +245,10 @@ int reg_add_port(struct user_port *up_ptr)
245} 245}
246 246
247/** 247/**
248 * reg_remove_port - deregister a user's driver port 248 * tipc_reg_remove_port - deregister a user's driver port
249 */ 249 */
250 250
251int reg_remove_port(struct user_port *up_ptr) 251int tipc_reg_remove_port(struct user_port *up_ptr)
252{ 252{
253 if (up_ptr->user_ref == 0) 253 if (up_ptr->user_ref == 0)
254 return TIPC_OK; 254 return TIPC_OK;
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
index 122ca9be3671..d0e88794ed1b 100644
--- a/net/tipc/user_reg.h
+++ b/net/tipc/user_reg.h
@@ -39,10 +39,10 @@
39 39
40#include "port.h" 40#include "port.h"
41 41
42int reg_start(void); 42int tipc_reg_start(void);
43void reg_stop(void); 43void tipc_reg_stop(void);
44 44
45int reg_add_port(struct user_port *up_ptr); 45int tipc_reg_add_port(struct user_port *up_ptr);
46int reg_remove_port(struct user_port *up_ptr); 46int tipc_reg_remove_port(struct user_port *up_ptr);
47 47
48#endif 48#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
index 4eaef662d568..7c11f7f83a21 100644
--- a/net/tipc/zone.c
+++ b/net/tipc/zone.c
@@ -42,12 +42,12 @@
42#include "cluster.h" 42#include "cluster.h"
43#include "node.h" 43#include "node.h"
44 44
45struct _zone *zone_create(u32 addr) 45struct _zone *tipc_zone_create(u32 addr)
46{ 46{
47 struct _zone *z_ptr = 0; 47 struct _zone *z_ptr = 0;
48 u32 z_num; 48 u32 z_num;
49 49
50 if (!addr_domain_valid(addr)) 50 if (!tipc_addr_domain_valid(addr))
51 return 0; 51 return 0;
52 52
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC); 53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
@@ -55,24 +55,24 @@ struct _zone *zone_create(u32 addr)
55 memset(z_ptr, 0, sizeof(*z_ptr)); 55 memset(z_ptr, 0, sizeof(*z_ptr));
56 z_num = tipc_zone(addr); 56 z_num = tipc_zone(addr);
57 z_ptr->addr = tipc_addr(z_num, 0, 0); 57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 net.zones[z_num] = z_ptr; 58 tipc_net.zones[z_num] = z_ptr;
59 } 59 }
60 return z_ptr; 60 return z_ptr;
61} 61}
62 62
63void zone_delete(struct _zone *z_ptr) 63void tipc_zone_delete(struct _zone *z_ptr)
64{ 64{
65 u32 c_num; 65 u32 c_num;
66 66
67 if (!z_ptr) 67 if (!z_ptr)
68 return; 68 return;
69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
70 cluster_delete(z_ptr->clusters[c_num]); 70 tipc_cltr_delete(z_ptr->clusters[c_num]);
71 } 71 }
72 kfree(z_ptr); 72 kfree(z_ptr);
73} 73}
74 74
75void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr) 75void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
76{ 76{
77 u32 c_num = tipc_cluster(c_ptr->addr); 77 u32 c_num = tipc_cluster(c_ptr->addr);
78 78
@@ -82,19 +82,19 @@ void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
82 z_ptr->clusters[c_num] = c_ptr; 82 z_ptr->clusters[c_num] = c_ptr;
83} 83}
84 84
85void zone_remove_as_router(struct _zone *z_ptr, u32 router) 85void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
86{ 86{
87 u32 c_num; 87 u32 c_num;
88 88
89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
90 if (z_ptr->clusters[c_num]) { 90 if (z_ptr->clusters[c_num]) {
91 cluster_remove_as_router(z_ptr->clusters[c_num], 91 tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
92 router); 92 router);
93 } 93 }
94 } 94 }
95} 95}
96 96
97void zone_send_external_routes(struct _zone *z_ptr, u32 dest) 97void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
98{ 98{
99 u32 c_num; 99 u32 c_num;
100 100
@@ -102,12 +102,12 @@ void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
102 if (z_ptr->clusters[c_num]) { 102 if (z_ptr->clusters[c_num]) {
103 if (in_own_cluster(z_ptr->addr)) 103 if (in_own_cluster(z_ptr->addr))
104 continue; 104 continue;
105 cluster_send_ext_routes(z_ptr->clusters[c_num], dest); 105 tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
106 } 106 }
107 } 107 }
108} 108}
109 109
110struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref) 110struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
111{ 111{
112 struct cluster *c_ptr; 112 struct cluster *c_ptr;
113 struct node *n_ptr; 113 struct node *n_ptr;
@@ -118,7 +118,7 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
118 c_ptr = z_ptr->clusters[tipc_cluster(addr)]; 118 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
119 if (!c_ptr) 119 if (!c_ptr)
120 return 0; 120 return 0;
121 n_ptr = cluster_select_node(c_ptr, ref); 121 n_ptr = tipc_cltr_select_node(c_ptr, ref);
122 if (n_ptr) 122 if (n_ptr)
123 return n_ptr; 123 return n_ptr;
124 124
@@ -127,14 +127,14 @@ struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
127 c_ptr = z_ptr->clusters[c_num]; 127 c_ptr = z_ptr->clusters[c_num];
128 if (!c_ptr) 128 if (!c_ptr)
129 return 0; 129 return 0;
130 n_ptr = cluster_select_node(c_ptr, ref); 130 n_ptr = tipc_cltr_select_node(c_ptr, ref);
131 if (n_ptr) 131 if (n_ptr)
132 return n_ptr; 132 return n_ptr;
133 } 133 }
134 return 0; 134 return 0;
135} 135}
136 136
137u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref) 137u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
138{ 138{
139 struct cluster *c_ptr; 139 struct cluster *c_ptr;
140 u32 c_num; 140 u32 c_num;
@@ -143,14 +143,14 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
143 if (!z_ptr) 143 if (!z_ptr)
144 return 0; 144 return 0;
145 c_ptr = z_ptr->clusters[tipc_cluster(addr)]; 145 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
146 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0; 146 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
147 if (router) 147 if (router)
148 return router; 148 return router;
149 149
150 /* Links to any other clusters within the zone? */ 150 /* Links to any other clusters within the zone? */
151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) { 151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
152 c_ptr = z_ptr->clusters[c_num]; 152 c_ptr = z_ptr->clusters[c_num];
153 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0; 153 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
154 if (router) 154 if (router)
155 return router; 155 return router;
156 } 156 }
@@ -158,12 +158,12 @@ u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
158} 158}
159 159
160 160
161u32 zone_next_node(u32 addr) 161u32 tipc_zone_next_node(u32 addr)
162{ 162{
163 struct cluster *c_ptr = cluster_find(addr); 163 struct cluster *c_ptr = tipc_cltr_find(addr);
164 164
165 if (c_ptr) 165 if (c_ptr)
166 return cluster_next_node(c_ptr, addr); 166 return tipc_cltr_next_node(c_ptr, addr);
167 return 0; 167 return 0;
168} 168}
169 169
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
index 4326f78d8292..267999c5a240 100644
--- a/net/tipc/zone.h
+++ b/net/tipc/zone.h
@@ -54,18 +54,18 @@ struct _zone {
54 u32 links; 54 u32 links;
55}; 55};
56 56
57struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref); 57struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
58u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref); 58u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
59void zone_remove_as_router(struct _zone *z_ptr, u32 router); 59void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
60void zone_send_external_routes(struct _zone *z_ptr, u32 dest); 60void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
61struct _zone *zone_create(u32 addr); 61struct _zone *tipc_zone_create(u32 addr);
62void zone_delete(struct _zone *z_ptr); 62void tipc_zone_delete(struct _zone *z_ptr);
63void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr); 63void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
64u32 zone_next_node(u32 addr); 64u32 tipc_zone_next_node(u32 addr);
65 65
66static inline struct _zone *zone_find(u32 addr) 66static inline struct _zone *tipc_zone_find(u32 addr)
67{ 67{
68 return net.zones[tipc_zone(addr)]; 68 return tipc_net.zones[tipc_zone(addr)];
69} 69}
70 70
71#endif 71#endif