diff options
author | Per Liden <per.liden@ericsson.com> | 2006-01-17 18:38:21 -0500 |
---|---|---|
committer | Per Liden <per.liden@ericsson.com> | 2006-01-17 18:45:16 -0500 |
commit | 4323add67792ced172d0d93b8b2e6187023115f1 (patch) | |
tree | 13224010f6f18029fb710a1e0b48392aea90b486 /net/tipc/bcast.c | |
parent | 1e63e681e06d438fdc542d40924a4f155d461bbd (diff) |
[TIPC] Avoid polluting the global namespace
This patch adds a tipc_ prefix to all externally visible symbols.
Signed-off-by: Per Liden <per.liden@ericsson.com>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r-- | net/tipc/bcast.c | 162 |
1 files changed, 81 insertions, 81 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index af9743a52d6c..a7b04f397c12 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c | |||
@@ -104,7 +104,7 @@ static struct bclink *bclink = NULL; | |||
104 | static struct link *bcl = NULL; | 104 | static struct link *bcl = NULL; |
105 | static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; | 105 | static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED; |
106 | 106 | ||
107 | char bc_link_name[] = "multicast-link"; | 107 | char tipc_bclink_name[] = "multicast-link"; |
108 | 108 | ||
109 | 109 | ||
110 | static inline u32 buf_seqno(struct sk_buff *buf) | 110 | static inline u32 buf_seqno(struct sk_buff *buf) |
@@ -178,19 +178,19 @@ static void bclink_retransmit_pkt(u32 after, u32 to) | |||
178 | buf = buf->next; | 178 | buf = buf->next; |
179 | } | 179 | } |
180 | if (buf != NULL) | 180 | if (buf != NULL) |
181 | link_retransmit(bcl, buf, mod(to - after)); | 181 | tipc_link_retransmit(bcl, buf, mod(to - after)); |
182 | spin_unlock_bh(&bc_lock); | 182 | spin_unlock_bh(&bc_lock); |
183 | } | 183 | } |
184 | 184 | ||
185 | /** | 185 | /** |
186 | * bclink_acknowledge - handle acknowledgement of broadcast packets | 186 | * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets |
187 | * @n_ptr: node that sent acknowledgement info | 187 | * @n_ptr: node that sent acknowledgement info |
188 | * @acked: broadcast sequence # that has been acknowledged | 188 | * @acked: broadcast sequence # that has been acknowledged |
189 | * | 189 | * |
190 | * Node is locked, bc_lock unlocked. | 190 | * Node is locked, bc_lock unlocked. |
191 | */ | 191 | */ |
192 | 192 | ||
193 | void bclink_acknowledge(struct node *n_ptr, u32 acked) | 193 | void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked) |
194 | { | 194 | { |
195 | struct sk_buff *crs; | 195 | struct sk_buff *crs; |
196 | struct sk_buff *next; | 196 | struct sk_buff *next; |
@@ -226,16 +226,16 @@ void bclink_acknowledge(struct node *n_ptr, u32 acked) | |||
226 | /* Try resolving broadcast link congestion, if necessary */ | 226 | /* Try resolving broadcast link congestion, if necessary */ |
227 | 227 | ||
228 | if (unlikely(bcl->next_out)) | 228 | if (unlikely(bcl->next_out)) |
229 | link_push_queue(bcl); | 229 | tipc_link_push_queue(bcl); |
230 | if (unlikely(released && !list_empty(&bcl->waiting_ports))) | 230 | if (unlikely(released && !list_empty(&bcl->waiting_ports))) |
231 | link_wakeup_ports(bcl, 0); | 231 | tipc_link_wakeup_ports(bcl, 0); |
232 | spin_unlock_bh(&bc_lock); | 232 | spin_unlock_bh(&bc_lock); |
233 | } | 233 | } |
234 | 234 | ||
235 | /** | 235 | /** |
236 | * bclink_send_ack - unicast an ACK msg | 236 | * bclink_send_ack - unicast an ACK msg |
237 | * | 237 | * |
238 | * net_lock and node lock set | 238 | * tipc_net_lock and node lock set |
239 | */ | 239 | */ |
240 | 240 | ||
241 | static void bclink_send_ack(struct node *n_ptr) | 241 | static void bclink_send_ack(struct node *n_ptr) |
@@ -243,13 +243,13 @@ static void bclink_send_ack(struct node *n_ptr) | |||
243 | struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; | 243 | struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1]; |
244 | 244 | ||
245 | if (l_ptr != NULL) | 245 | if (l_ptr != NULL) |
246 | link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 246 | tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); |
247 | } | 247 | } |
248 | 248 | ||
249 | /** | 249 | /** |
250 | * bclink_send_nack- broadcast a NACK msg | 250 | * bclink_send_nack- broadcast a NACK msg |
251 | * | 251 | * |
252 | * net_lock and node lock set | 252 | * tipc_net_lock and node lock set |
253 | */ | 253 | */ |
254 | 254 | ||
255 | static void bclink_send_nack(struct node *n_ptr) | 255 | static void bclink_send_nack(struct node *n_ptr) |
@@ -271,11 +271,11 @@ static void bclink_send_nack(struct node *n_ptr) | |||
271 | msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); | 271 | msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); |
272 | msg_set_bcast_tag(msg, tipc_own_tag); | 272 | msg_set_bcast_tag(msg, tipc_own_tag); |
273 | 273 | ||
274 | if (bearer_send(&bcbearer->bearer, buf, 0)) { | 274 | if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) { |
275 | bcl->stats.sent_nacks++; | 275 | bcl->stats.sent_nacks++; |
276 | buf_discard(buf); | 276 | buf_discard(buf); |
277 | } else { | 277 | } else { |
278 | bearer_schedule(bcl->b_ptr, bcl); | 278 | tipc_bearer_schedule(bcl->b_ptr, bcl); |
279 | bcl->proto_msg_queue = buf; | 279 | bcl->proto_msg_queue = buf; |
280 | bcl->stats.bearer_congs++; | 280 | bcl->stats.bearer_congs++; |
281 | } | 281 | } |
@@ -291,12 +291,12 @@ static void bclink_send_nack(struct node *n_ptr) | |||
291 | } | 291 | } |
292 | 292 | ||
293 | /** | 293 | /** |
294 | * bclink_check_gap - send a NACK if a sequence gap exists | 294 | * tipc_bclink_check_gap - send a NACK if a sequence gap exists |
295 | * | 295 | * |
296 | * net_lock and node lock set | 296 | * tipc_net_lock and node lock set |
297 | */ | 297 | */ |
298 | 298 | ||
299 | void bclink_check_gap(struct node *n_ptr, u32 last_sent) | 299 | void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent) |
300 | { | 300 | { |
301 | if (!n_ptr->bclink.supported || | 301 | if (!n_ptr->bclink.supported || |
302 | less_eq(last_sent, mod(n_ptr->bclink.last_in))) | 302 | less_eq(last_sent, mod(n_ptr->bclink.last_in))) |
@@ -309,19 +309,19 @@ void bclink_check_gap(struct node *n_ptr, u32 last_sent) | |||
309 | } | 309 | } |
310 | 310 | ||
311 | /** | 311 | /** |
312 | * bclink_peek_nack - process a NACK msg meant for another node | 312 | * tipc_bclink_peek_nack - process a NACK msg meant for another node |
313 | * | 313 | * |
314 | * Only net_lock set. | 314 | * Only tipc_net_lock set. |
315 | */ | 315 | */ |
316 | 316 | ||
317 | void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) | 317 | void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) |
318 | { | 318 | { |
319 | struct node *n_ptr = node_find(dest); | 319 | struct node *n_ptr = tipc_node_find(dest); |
320 | u32 my_after, my_to; | 320 | u32 my_after, my_to; |
321 | 321 | ||
322 | if (unlikely(!n_ptr || !node_is_up(n_ptr))) | 322 | if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr))) |
323 | return; | 323 | return; |
324 | node_lock(n_ptr); | 324 | tipc_node_lock(n_ptr); |
325 | /* | 325 | /* |
326 | * Modify gap to suppress unnecessary NACKs from this node | 326 | * Modify gap to suppress unnecessary NACKs from this node |
327 | */ | 327 | */ |
@@ -364,20 +364,20 @@ void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to) | |||
364 | bclink_set_gap(n_ptr); | 364 | bclink_set_gap(n_ptr); |
365 | } | 365 | } |
366 | } | 366 | } |
367 | node_unlock(n_ptr); | 367 | tipc_node_unlock(n_ptr); |
368 | } | 368 | } |
369 | 369 | ||
370 | /** | 370 | /** |
371 | * bclink_send_msg - broadcast a packet to all nodes in cluster | 371 | * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster |
372 | */ | 372 | */ |
373 | 373 | ||
374 | int bclink_send_msg(struct sk_buff *buf) | 374 | int tipc_bclink_send_msg(struct sk_buff *buf) |
375 | { | 375 | { |
376 | int res; | 376 | int res; |
377 | 377 | ||
378 | spin_lock_bh(&bc_lock); | 378 | spin_lock_bh(&bc_lock); |
379 | 379 | ||
380 | res = link_send_buf(bcl, buf); | 380 | res = tipc_link_send_buf(bcl, buf); |
381 | if (unlikely(res == -ELINKCONG)) | 381 | if (unlikely(res == -ELINKCONG)) |
382 | buf_discard(buf); | 382 | buf_discard(buf); |
383 | else | 383 | else |
@@ -393,22 +393,22 @@ int bclink_send_msg(struct sk_buff *buf) | |||
393 | } | 393 | } |
394 | 394 | ||
395 | /** | 395 | /** |
396 | * bclink_recv_pkt - receive a broadcast packet, and deliver upwards | 396 | * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards |
397 | * | 397 | * |
398 | * net_lock is read_locked, no other locks set | 398 | * tipc_net_lock is read_locked, no other locks set |
399 | */ | 399 | */ |
400 | 400 | ||
401 | void bclink_recv_pkt(struct sk_buff *buf) | 401 | void tipc_bclink_recv_pkt(struct sk_buff *buf) |
402 | { | 402 | { |
403 | struct tipc_msg *msg = buf_msg(buf); | 403 | struct tipc_msg *msg = buf_msg(buf); |
404 | struct node* node = node_find(msg_prevnode(msg)); | 404 | struct node* node = tipc_node_find(msg_prevnode(msg)); |
405 | u32 next_in; | 405 | u32 next_in; |
406 | u32 seqno; | 406 | u32 seqno; |
407 | struct sk_buff *deferred; | 407 | struct sk_buff *deferred; |
408 | 408 | ||
409 | msg_dbg(msg, "<BC<<<"); | 409 | msg_dbg(msg, "<BC<<<"); |
410 | 410 | ||
411 | if (unlikely(!node || !node_is_up(node) || !node->bclink.supported || | 411 | if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || |
412 | (msg_mc_netid(msg) != tipc_net_id))) { | 412 | (msg_mc_netid(msg) != tipc_net_id))) { |
413 | buf_discard(buf); | 413 | buf_discard(buf); |
414 | return; | 414 | return; |
@@ -417,14 +417,14 @@ void bclink_recv_pkt(struct sk_buff *buf) | |||
417 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { | 417 | if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { |
418 | msg_dbg(msg, "<BCNACK<<<"); | 418 | msg_dbg(msg, "<BCNACK<<<"); |
419 | if (msg_destnode(msg) == tipc_own_addr) { | 419 | if (msg_destnode(msg) == tipc_own_addr) { |
420 | node_lock(node); | 420 | tipc_node_lock(node); |
421 | bclink_acknowledge(node, msg_bcast_ack(msg)); | 421 | tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); |
422 | node_unlock(node); | 422 | tipc_node_unlock(node); |
423 | bcl->stats.recv_nacks++; | 423 | bcl->stats.recv_nacks++; |
424 | bclink_retransmit_pkt(msg_bcgap_after(msg), | 424 | bclink_retransmit_pkt(msg_bcgap_after(msg), |
425 | msg_bcgap_to(msg)); | 425 | msg_bcgap_to(msg)); |
426 | } else { | 426 | } else { |
427 | bclink_peek_nack(msg_destnode(msg), | 427 | tipc_bclink_peek_nack(msg_destnode(msg), |
428 | msg_bcast_tag(msg), | 428 | msg_bcast_tag(msg), |
429 | msg_bcgap_after(msg), | 429 | msg_bcgap_after(msg), |
430 | msg_bcgap_to(msg)); | 430 | msg_bcgap_to(msg)); |
@@ -433,7 +433,7 @@ void bclink_recv_pkt(struct sk_buff *buf) | |||
433 | return; | 433 | return; |
434 | } | 434 | } |
435 | 435 | ||
436 | node_lock(node); | 436 | tipc_node_lock(node); |
437 | receive: | 437 | receive: |
438 | deferred = node->bclink.deferred_head; | 438 | deferred = node->bclink.deferred_head; |
439 | next_in = mod(node->bclink.last_in + 1); | 439 | next_in = mod(node->bclink.last_in + 1); |
@@ -448,26 +448,26 @@ receive: | |||
448 | bcl->stats.sent_acks++; | 448 | bcl->stats.sent_acks++; |
449 | } | 449 | } |
450 | if (likely(msg_isdata(msg))) { | 450 | if (likely(msg_isdata(msg))) { |
451 | node_unlock(node); | 451 | tipc_node_unlock(node); |
452 | port_recv_mcast(buf, NULL); | 452 | tipc_port_recv_mcast(buf, NULL); |
453 | } else if (msg_user(msg) == MSG_BUNDLER) { | 453 | } else if (msg_user(msg) == MSG_BUNDLER) { |
454 | bcl->stats.recv_bundles++; | 454 | bcl->stats.recv_bundles++; |
455 | bcl->stats.recv_bundled += msg_msgcnt(msg); | 455 | bcl->stats.recv_bundled += msg_msgcnt(msg); |
456 | node_unlock(node); | 456 | tipc_node_unlock(node); |
457 | link_recv_bundle(buf); | 457 | tipc_link_recv_bundle(buf); |
458 | } else if (msg_user(msg) == MSG_FRAGMENTER) { | 458 | } else if (msg_user(msg) == MSG_FRAGMENTER) { |
459 | bcl->stats.recv_fragments++; | 459 | bcl->stats.recv_fragments++; |
460 | if (link_recv_fragment(&node->bclink.defragm, | 460 | if (tipc_link_recv_fragment(&node->bclink.defragm, |
461 | &buf, &msg)) | 461 | &buf, &msg)) |
462 | bcl->stats.recv_fragmented++; | 462 | bcl->stats.recv_fragmented++; |
463 | node_unlock(node); | 463 | tipc_node_unlock(node); |
464 | net_route_msg(buf); | 464 | tipc_net_route_msg(buf); |
465 | } else { | 465 | } else { |
466 | node_unlock(node); | 466 | tipc_node_unlock(node); |
467 | net_route_msg(buf); | 467 | tipc_net_route_msg(buf); |
468 | } | 468 | } |
469 | if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { | 469 | if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { |
470 | node_lock(node); | 470 | tipc_node_lock(node); |
471 | buf = deferred; | 471 | buf = deferred; |
472 | msg = buf_msg(buf); | 472 | msg = buf_msg(buf); |
473 | node->bclink.deferred_head = deferred->next; | 473 | node->bclink.deferred_head = deferred->next; |
@@ -478,9 +478,9 @@ receive: | |||
478 | u32 gap_after = node->bclink.gap_after; | 478 | u32 gap_after = node->bclink.gap_after; |
479 | u32 gap_to = node->bclink.gap_to; | 479 | u32 gap_to = node->bclink.gap_to; |
480 | 480 | ||
481 | if (link_defer_pkt(&node->bclink.deferred_head, | 481 | if (tipc_link_defer_pkt(&node->bclink.deferred_head, |
482 | &node->bclink.deferred_tail, | 482 | &node->bclink.deferred_tail, |
483 | buf)) { | 483 | buf)) { |
484 | node->bclink.nack_sync++; | 484 | node->bclink.nack_sync++; |
485 | bcl->stats.deferred_recv++; | 485 | bcl->stats.deferred_recv++; |
486 | if (seqno == mod(gap_after + 1)) | 486 | if (seqno == mod(gap_after + 1)) |
@@ -497,10 +497,10 @@ receive: | |||
497 | bcl->stats.duplicates++; | 497 | bcl->stats.duplicates++; |
498 | buf_discard(buf); | 498 | buf_discard(buf); |
499 | } | 499 | } |
500 | node_unlock(node); | 500 | tipc_node_unlock(node); |
501 | } | 501 | } |
502 | 502 | ||
503 | u32 bclink_get_last_sent(void) | 503 | u32 tipc_bclink_get_last_sent(void) |
504 | { | 504 | { |
505 | u32 last_sent = mod(bcl->next_out_no - 1); | 505 | u32 last_sent = mod(bcl->next_out_no - 1); |
506 | 506 | ||
@@ -509,15 +509,15 @@ u32 bclink_get_last_sent(void) | |||
509 | return last_sent; | 509 | return last_sent; |
510 | } | 510 | } |
511 | 511 | ||
512 | u32 bclink_acks_missing(struct node *n_ptr) | 512 | u32 tipc_bclink_acks_missing(struct node *n_ptr) |
513 | { | 513 | { |
514 | return (n_ptr->bclink.supported && | 514 | return (n_ptr->bclink.supported && |
515 | (bclink_get_last_sent() != n_ptr->bclink.acked)); | 515 | (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); |
516 | } | 516 | } |
517 | 517 | ||
518 | 518 | ||
519 | /** | 519 | /** |
520 | * bcbearer_send - send a packet through the broadcast pseudo-bearer | 520 | * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer |
521 | * | 521 | * |
522 | * Send through as many bearers as necessary to reach all nodes | 522 | * Send through as many bearers as necessary to reach all nodes |
523 | * that support TIPC multicasting. | 523 | * that support TIPC multicasting. |
@@ -525,9 +525,9 @@ u32 bclink_acks_missing(struct node *n_ptr) | |||
525 | * Returns 0 if packet sent successfully, non-zero if not | 525 | * Returns 0 if packet sent successfully, non-zero if not |
526 | */ | 526 | */ |
527 | 527 | ||
528 | int bcbearer_send(struct sk_buff *buf, | 528 | int tipc_bcbearer_send(struct sk_buff *buf, |
529 | struct tipc_bearer *unused1, | 529 | struct tipc_bearer *unused1, |
530 | struct tipc_media_addr *unused2) | 530 | struct tipc_media_addr *unused2) |
531 | { | 531 | { |
532 | static int send_count = 0; | 532 | static int send_count = 0; |
533 | 533 | ||
@@ -541,8 +541,8 @@ int bcbearer_send(struct sk_buff *buf, | |||
541 | if (likely(!msg_non_seq(buf_msg(buf)))) { | 541 | if (likely(!msg_non_seq(buf_msg(buf)))) { |
542 | struct tipc_msg *msg; | 542 | struct tipc_msg *msg; |
543 | 543 | ||
544 | assert(cluster_bcast_nodes.count != 0); | 544 | assert(tipc_cltr_bcast_nodes.count != 0); |
545 | bcbuf_set_acks(buf, cluster_bcast_nodes.count); | 545 | bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count); |
546 | msg = buf_msg(buf); | 546 | msg = buf_msg(buf); |
547 | msg_set_non_seq(msg); | 547 | msg_set_non_seq(msg); |
548 | msg_set_mc_netid(msg, tipc_net_id); | 548 | msg_set_mc_netid(msg, tipc_net_id); |
@@ -555,7 +555,7 @@ int bcbearer_send(struct sk_buff *buf, | |||
555 | 555 | ||
556 | /* Send buffer over bearers until all targets reached */ | 556 | /* Send buffer over bearers until all targets reached */ |
557 | 557 | ||
558 | remains = cluster_bcast_nodes; | 558 | remains = tipc_cltr_bcast_nodes; |
559 | 559 | ||
560 | for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { | 560 | for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { |
561 | struct bearer *p = bcbearer->bpairs[bp_index].primary; | 561 | struct bearer *p = bcbearer->bpairs[bp_index].primary; |
@@ -564,7 +564,7 @@ int bcbearer_send(struct sk_buff *buf, | |||
564 | if (!p) | 564 | if (!p) |
565 | break; /* no more bearers to try */ | 565 | break; /* no more bearers to try */ |
566 | 566 | ||
567 | nmap_diff(&remains, &p->nodes, &remains_new); | 567 | tipc_nmap_diff(&remains, &p->nodes, &remains_new); |
568 | if (remains_new.count == remains.count) | 568 | if (remains_new.count == remains.count) |
569 | continue; /* bearer pair doesn't add anything */ | 569 | continue; /* bearer pair doesn't add anything */ |
570 | 570 | ||
@@ -597,10 +597,10 @@ update: | |||
597 | } | 597 | } |
598 | 598 | ||
599 | /** | 599 | /** |
600 | * bcbearer_sort - create sets of bearer pairs used by broadcast bearer | 600 | * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer |
601 | */ | 601 | */ |
602 | 602 | ||
603 | void bcbearer_sort(void) | 603 | void tipc_bcbearer_sort(void) |
604 | { | 604 | { |
605 | struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; | 605 | struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp; |
606 | struct bcbearer_pair *bp_curr; | 606 | struct bcbearer_pair *bp_curr; |
@@ -614,7 +614,7 @@ void bcbearer_sort(void) | |||
614 | memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); | 614 | memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); |
615 | 615 | ||
616 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { | 616 | for (b_index = 0; b_index < MAX_BEARERS; b_index++) { |
617 | struct bearer *b = &bearers[b_index]; | 617 | struct bearer *b = &tipc_bearers[b_index]; |
618 | 618 | ||
619 | if (!b->active || !b->nodes.count) | 619 | if (!b->active || !b->nodes.count) |
620 | continue; | 620 | continue; |
@@ -638,8 +638,8 @@ void bcbearer_sort(void) | |||
638 | bp_curr->primary = bp_temp[pri].primary; | 638 | bp_curr->primary = bp_temp[pri].primary; |
639 | 639 | ||
640 | if (bp_temp[pri].secondary) { | 640 | if (bp_temp[pri].secondary) { |
641 | if (nmap_equal(&bp_temp[pri].primary->nodes, | 641 | if (tipc_nmap_equal(&bp_temp[pri].primary->nodes, |
642 | &bp_temp[pri].secondary->nodes)) { | 642 | &bp_temp[pri].secondary->nodes)) { |
643 | bp_curr->secondary = bp_temp[pri].secondary; | 643 | bp_curr->secondary = bp_temp[pri].secondary; |
644 | } else { | 644 | } else { |
645 | bp_curr++; | 645 | bp_curr++; |
@@ -654,14 +654,14 @@ void bcbearer_sort(void) | |||
654 | } | 654 | } |
655 | 655 | ||
656 | /** | 656 | /** |
657 | * bcbearer_push - resolve bearer congestion | 657 | * tipc_bcbearer_push - resolve bearer congestion |
658 | * | 658 | * |
659 | * Forces bclink to push out any unsent packets, until all packets are gone | 659 | * Forces bclink to push out any unsent packets, until all packets are gone |
660 | * or congestion reoccurs. | 660 | * or congestion reoccurs. |
661 | * No locks set when function called | 661 | * No locks set when function called |
662 | */ | 662 | */ |
663 | 663 | ||
664 | void bcbearer_push(void) | 664 | void tipc_bcbearer_push(void) |
665 | { | 665 | { |
666 | struct bearer *b_ptr; | 666 | struct bearer *b_ptr; |
667 | 667 | ||
@@ -669,20 +669,20 @@ void bcbearer_push(void) | |||
669 | b_ptr = &bcbearer->bearer; | 669 | b_ptr = &bcbearer->bearer; |
670 | if (b_ptr->publ.blocked) { | 670 | if (b_ptr->publ.blocked) { |
671 | b_ptr->publ.blocked = 0; | 671 | b_ptr->publ.blocked = 0; |
672 | bearer_lock_push(b_ptr); | 672 | tipc_bearer_lock_push(b_ptr); |
673 | } | 673 | } |
674 | spin_unlock_bh(&bc_lock); | 674 | spin_unlock_bh(&bc_lock); |
675 | } | 675 | } |
676 | 676 | ||
677 | 677 | ||
678 | int bclink_stats(char *buf, const u32 buf_size) | 678 | int tipc_bclink_stats(char *buf, const u32 buf_size) |
679 | { | 679 | { |
680 | struct print_buf pb; | 680 | struct print_buf pb; |
681 | 681 | ||
682 | if (!bcl) | 682 | if (!bcl) |
683 | return 0; | 683 | return 0; |
684 | 684 | ||
685 | printbuf_init(&pb, buf, buf_size); | 685 | tipc_printbuf_init(&pb, buf, buf_size); |
686 | 686 | ||
687 | spin_lock_bh(&bc_lock); | 687 | spin_lock_bh(&bc_lock); |
688 | 688 | ||
@@ -718,10 +718,10 @@ int bclink_stats(char *buf, const u32 buf_size) | |||
718 | : 0); | 718 | : 0); |
719 | 719 | ||
720 | spin_unlock_bh(&bc_lock); | 720 | spin_unlock_bh(&bc_lock); |
721 | return printbuf_validate(&pb); | 721 | return tipc_printbuf_validate(&pb); |
722 | } | 722 | } |
723 | 723 | ||
724 | int bclink_reset_stats(void) | 724 | int tipc_bclink_reset_stats(void) |
725 | { | 725 | { |
726 | if (!bcl) | 726 | if (!bcl) |
727 | return -ENOPROTOOPT; | 727 | return -ENOPROTOOPT; |
@@ -732,7 +732,7 @@ int bclink_reset_stats(void) | |||
732 | return TIPC_OK; | 732 | return TIPC_OK; |
733 | } | 733 | } |
734 | 734 | ||
735 | int bclink_set_queue_limits(u32 limit) | 735 | int tipc_bclink_set_queue_limits(u32 limit) |
736 | { | 736 | { |
737 | if (!bcl) | 737 | if (!bcl) |
738 | return -ENOPROTOOPT; | 738 | return -ENOPROTOOPT; |
@@ -740,12 +740,12 @@ int bclink_set_queue_limits(u32 limit) | |||
740 | return -EINVAL; | 740 | return -EINVAL; |
741 | 741 | ||
742 | spin_lock_bh(&bc_lock); | 742 | spin_lock_bh(&bc_lock); |
743 | link_set_queue_limits(bcl, limit); | 743 | tipc_link_set_queue_limits(bcl, limit); |
744 | spin_unlock_bh(&bc_lock); | 744 | spin_unlock_bh(&bc_lock); |
745 | return TIPC_OK; | 745 | return TIPC_OK; |
746 | } | 746 | } |
747 | 747 | ||
748 | int bclink_init(void) | 748 | int tipc_bclink_init(void) |
749 | { | 749 | { |
750 | bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC); | 750 | bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC); |
751 | bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); | 751 | bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC); |
@@ -762,7 +762,7 @@ int bclink_init(void) | |||
762 | memset(bcbearer, 0, sizeof(struct bcbearer)); | 762 | memset(bcbearer, 0, sizeof(struct bcbearer)); |
763 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); | 763 | INIT_LIST_HEAD(&bcbearer->bearer.cong_links); |
764 | bcbearer->bearer.media = &bcbearer->media; | 764 | bcbearer->bearer.media = &bcbearer->media; |
765 | bcbearer->media.send_msg = bcbearer_send; | 765 | bcbearer->media.send_msg = tipc_bcbearer_send; |
766 | sprintf(bcbearer->media.name, "tipc-multicast"); | 766 | sprintf(bcbearer->media.name, "tipc-multicast"); |
767 | 767 | ||
768 | bcl = &bclink->link; | 768 | bcl = &bclink->link; |
@@ -772,27 +772,27 @@ int bclink_init(void) | |||
772 | bclink->node.lock = SPIN_LOCK_UNLOCKED; | 772 | bclink->node.lock = SPIN_LOCK_UNLOCKED; |
773 | bcl->owner = &bclink->node; | 773 | bcl->owner = &bclink->node; |
774 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; | 774 | bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; |
775 | link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); | 775 | tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); |
776 | bcl->b_ptr = &bcbearer->bearer; | 776 | bcl->b_ptr = &bcbearer->bearer; |
777 | bcl->state = WORKING_WORKING; | 777 | bcl->state = WORKING_WORKING; |
778 | sprintf(bcl->name, bc_link_name); | 778 | sprintf(bcl->name, tipc_bclink_name); |
779 | 779 | ||
780 | if (BCLINK_LOG_BUF_SIZE) { | 780 | if (BCLINK_LOG_BUF_SIZE) { |
781 | char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC); | 781 | char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC); |
782 | 782 | ||
783 | if (!pb) | 783 | if (!pb) |
784 | goto nomem; | 784 | goto nomem; |
785 | printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); | 785 | tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE); |
786 | } | 786 | } |
787 | 787 | ||
788 | return TIPC_OK; | 788 | return TIPC_OK; |
789 | } | 789 | } |
790 | 790 | ||
791 | void bclink_stop(void) | 791 | void tipc_bclink_stop(void) |
792 | { | 792 | { |
793 | spin_lock_bh(&bc_lock); | 793 | spin_lock_bh(&bc_lock); |
794 | if (bcbearer) { | 794 | if (bcbearer) { |
795 | link_stop(bcl); | 795 | tipc_link_stop(bcl); |
796 | if (BCLINK_LOG_BUF_SIZE) | 796 | if (BCLINK_LOG_BUF_SIZE) |
797 | kfree(bcl->print_buf.buf); | 797 | kfree(bcl->print_buf.buf); |
798 | bcl = NULL; | 798 | bcl = NULL; |