aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c41
1 files changed, 20 insertions, 21 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index bf860d9e75af..95ab5ef92920 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -41,9 +41,9 @@
41#include "bcast.h" 41#include "bcast.h"
42#include "name_distr.h" 42#include "name_distr.h"
43 43
44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
46#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 46#define BCBEARER MAX_BEARERS
47 47
48/** 48/**
49 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link 49 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
@@ -356,9 +356,9 @@ static void bclink_peek_nack(struct tipc_msg *msg)
356} 356}
357 357
358/* 358/*
359 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster 359 * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
360 */ 360 */
361int tipc_bclink_send_msg(struct sk_buff *buf) 361int tipc_bclink_xmit(struct sk_buff *buf)
362{ 362{
363 int res; 363 int res;
364 364
@@ -370,7 +370,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
370 goto exit; 370 goto exit;
371 } 371 }
372 372
373 res = tipc_link_send_buf(bcl, buf); 373 res = __tipc_link_xmit(bcl, buf);
374 if (likely(res >= 0)) { 374 if (likely(res >= 0)) {
375 bclink_set_last_sent(); 375 bclink_set_last_sent();
376 bcl->stats.queue_sz_counts++; 376 bcl->stats.queue_sz_counts++;
@@ -399,19 +399,18 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
399 */ 399 */
400 400
401 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { 401 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
402 tipc_link_send_proto_msg( 402 tipc_link_proto_xmit(node->active_links[node->addr & 1],
403 node->active_links[node->addr & 1], 403 STATE_MSG, 0, 0, 0, 0, 0);
404 STATE_MSG, 0, 0, 0, 0, 0);
405 bcl->stats.sent_acks++; 404 bcl->stats.sent_acks++;
406 } 405 }
407} 406}
408 407
409/** 408/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 409 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
411 * 410 *
412 * tipc_net_lock is read_locked, no other locks set 411 * tipc_net_lock is read_locked, no other locks set
413 */ 412 */
414void tipc_bclink_recv_pkt(struct sk_buff *buf) 413void tipc_bclink_rcv(struct sk_buff *buf)
415{ 414{
416 struct tipc_msg *msg = buf_msg(buf); 415 struct tipc_msg *msg = buf_msg(buf);
417 struct tipc_node *node; 416 struct tipc_node *node;
@@ -468,7 +467,7 @@ receive:
468 spin_unlock_bh(&bc_lock); 467 spin_unlock_bh(&bc_lock);
469 tipc_node_unlock(node); 468 tipc_node_unlock(node);
470 if (likely(msg_mcast(msg))) 469 if (likely(msg_mcast(msg)))
471 tipc_port_recv_mcast(buf, NULL); 470 tipc_port_mcast_rcv(buf, NULL);
472 else 471 else
473 kfree_skb(buf); 472 kfree_skb(buf);
474 } else if (msg_user(msg) == MSG_BUNDLER) { 473 } else if (msg_user(msg) == MSG_BUNDLER) {
@@ -478,12 +477,12 @@ receive:
478 bcl->stats.recv_bundled += msg_msgcnt(msg); 477 bcl->stats.recv_bundled += msg_msgcnt(msg);
479 spin_unlock_bh(&bc_lock); 478 spin_unlock_bh(&bc_lock);
480 tipc_node_unlock(node); 479 tipc_node_unlock(node);
481 tipc_link_recv_bundle(buf); 480 tipc_link_bundle_rcv(buf);
482 } else if (msg_user(msg) == MSG_FRAGMENTER) { 481 } else if (msg_user(msg) == MSG_FRAGMENTER) {
483 int ret; 482 int ret;
484 ret = tipc_link_recv_fragment(&node->bclink.reasm_head, 483 ret = tipc_link_frag_rcv(&node->bclink.reasm_head,
485 &node->bclink.reasm_tail, 484 &node->bclink.reasm_tail,
486 &buf); 485 &buf);
487 if (ret == LINK_REASM_ERROR) 486 if (ret == LINK_REASM_ERROR)
488 goto unlock; 487 goto unlock;
489 spin_lock_bh(&bc_lock); 488 spin_lock_bh(&bc_lock);
@@ -503,7 +502,7 @@ receive:
503 bclink_accept_pkt(node, seqno); 502 bclink_accept_pkt(node, seqno);
504 spin_unlock_bh(&bc_lock); 503 spin_unlock_bh(&bc_lock);
505 tipc_node_unlock(node); 504 tipc_node_unlock(node);
506 tipc_named_recv(buf); 505 tipc_named_rcv(buf);
507 } else { 506 } else {
508 spin_lock_bh(&bc_lock); 507 spin_lock_bh(&bc_lock);
509 bclink_accept_pkt(node, seqno); 508 bclink_accept_pkt(node, seqno);
@@ -669,9 +668,8 @@ void tipc_bcbearer_sort(void)
669 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 668 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
670 669
671 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 670 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
672 struct tipc_bearer *b = &tipc_bearers[b_index]; 671 struct tipc_bearer *b = bearer_list[b_index];
673 672 if (!b || !b->nodes.count)
674 if (!b->active || !b->nodes.count)
675 continue; 673 continue;
676 674
677 if (!bp_temp[b->priority].primary) 675 if (!bp_temp[b->priority].primary)
@@ -785,8 +783,8 @@ void tipc_bclink_init(void)
785 bcl->owner = &bclink->node; 783 bcl->owner = &bclink->node;
786 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 784 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
787 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 785 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
788 spin_lock_init(&bcbearer->bearer.lock);
789 bcl->b_ptr = &bcbearer->bearer; 786 bcl->b_ptr = &bcbearer->bearer;
787 bearer_list[BCBEARER] = &bcbearer->bearer;
790 bcl->state = WORKING_WORKING; 788 bcl->state = WORKING_WORKING;
791 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 789 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
792} 790}
@@ -797,6 +795,7 @@ void tipc_bclink_stop(void)
797 tipc_link_purge_queues(bcl); 795 tipc_link_purge_queues(bcl);
798 spin_unlock_bh(&bc_lock); 796 spin_unlock_bh(&bc_lock);
799 797
798 bearer_list[BCBEARER] = NULL;
800 memset(bclink, 0, sizeof(*bclink)); 799 memset(bclink, 0, sizeof(*bclink));
801 memset(bcbearer, 0, sizeof(*bcbearer)); 800 memset(bcbearer, 0, sizeof(*bcbearer));
802} 801}