aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c98
1 files changed, 49 insertions, 49 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 730c5c47ed8d..e7880172ef19 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * net/tipc/bcast.c: TIPC broadcast code 2 * net/tipc/bcast.c: TIPC broadcast code
3 * 3 *
4 * Copyright (c) 2004-2006, Ericsson AB 4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation. 5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems 6 * Copyright (c) 2005, Wind River Systems
@@ -59,15 +59,15 @@
59 * Loss rate for incoming broadcast frames; used to test retransmission code. 59 * Loss rate for incoming broadcast frames; used to test retransmission code.
60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any. 60 * Set to N to cause every N'th frame to be discarded; 0 => don't discard any.
61 */ 61 */
62 62
63#define TIPC_BCAST_LOSS_RATE 0 63#define TIPC_BCAST_LOSS_RATE 0
64 64
65/** 65/**
66 * struct bcbearer_pair - a pair of bearers used by broadcast link 66 * struct bcbearer_pair - a pair of bearers used by broadcast link
67 * @primary: pointer to primary bearer 67 * @primary: pointer to primary bearer
68 * @secondary: pointer to secondary bearer 68 * @secondary: pointer to secondary bearer
69 * 69 *
70 * Bearers must have same priority and same set of reachable destinations 70 * Bearers must have same priority and same set of reachable destinations
71 * to be paired. 71 * to be paired.
72 */ 72 */
73 73
@@ -84,7 +84,7 @@ struct bcbearer_pair {
84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort() 84 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
85 * @remains: temporary node map used by tipc_bcbearer_send() 85 * @remains: temporary node map used by tipc_bcbearer_send()
86 * @remains_new: temporary node map used tipc_bcbearer_send() 86 * @remains_new: temporary node map used tipc_bcbearer_send()
87 * 87 *
88 * Note: The fields labelled "temporary" are incorporated into the bearer 88 * Note: The fields labelled "temporary" are incorporated into the bearer
89 * to avoid consuming potentially limited stack space through the use of 89 * to avoid consuming potentially limited stack space through the use of
90 * large local variables within multicast routines. Concurrent access is 90 * large local variables within multicast routines. Concurrent access is
@@ -104,7 +104,7 @@ struct bcbearer {
104 * struct bclink - link used for broadcast messages 104 * struct bclink - link used for broadcast messages
105 * @link: (non-standard) broadcast link structure 105 * @link: (non-standard) broadcast link structure
106 * @node: (non-standard) node structure representing b'cast link's peer node 106 * @node: (non-standard) node structure representing b'cast link's peer node
107 * 107 *
108 * Handles sequence numbering, fragmentation, bundling, etc. 108 * Handles sequence numbering, fragmentation, bundling, etc.
109 */ 109 */
110 110
@@ -125,7 +125,7 @@ char tipc_bclink_name[] = "multicast-link";
125static u32 buf_seqno(struct sk_buff *buf) 125static u32 buf_seqno(struct sk_buff *buf)
126{ 126{
127 return msg_seqno(buf_msg(buf)); 127 return msg_seqno(buf_msg(buf));
128} 128}
129 129
130static u32 bcbuf_acks(struct sk_buff *buf) 130static u32 bcbuf_acks(struct sk_buff *buf)
131{ 131{
@@ -143,9 +143,9 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
143} 143}
144 144
145 145
146/** 146/**
147 * bclink_set_gap - set gap according to contents of current deferred pkt queue 147 * bclink_set_gap - set gap according to contents of current deferred pkt queue
148 * 148 *
149 * Called with 'node' locked, bc_lock unlocked 149 * Called with 'node' locked, bc_lock unlocked
150 */ 150 */
151 151
@@ -159,14 +159,14 @@ static void bclink_set_gap(struct node *n_ptr)
159 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1); 159 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
160} 160}
161 161
162/** 162/**
163 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment 163 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
164 * 164 *
165 * This mechanism endeavours to prevent all nodes in network from trying 165 * This mechanism endeavours to prevent all nodes in network from trying
166 * to ACK or NACK at the same time. 166 * to ACK or NACK at the same time.
167 * 167 *
168 * Note: TIPC uses a different trigger to distribute ACKs than it does to 168 * Note: TIPC uses a different trigger to distribute ACKs than it does to
169 * distribute NACKs, but tries to use the same spacing (divide by 16). 169 * distribute NACKs, but tries to use the same spacing (divide by 16).
170 */ 170 */
171 171
172static int bclink_ack_allowed(u32 n) 172static int bclink_ack_allowed(u32 n)
@@ -175,11 +175,11 @@ static int bclink_ack_allowed(u32 n)
175} 175}
176 176
177 177
178/** 178/**
179 * bclink_retransmit_pkt - retransmit broadcast packets 179 * bclink_retransmit_pkt - retransmit broadcast packets
180 * @after: sequence number of last packet to *not* retransmit 180 * @after: sequence number of last packet to *not* retransmit
181 * @to: sequence number of last packet to retransmit 181 * @to: sequence number of last packet to retransmit
182 * 182 *
183 * Called with bc_lock locked 183 * Called with bc_lock locked
184 */ 184 */
185 185
@@ -189,16 +189,16 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
189 189
190 buf = bcl->first_out; 190 buf = bcl->first_out;
191 while (buf && less_eq(buf_seqno(buf), after)) { 191 while (buf && less_eq(buf_seqno(buf), after)) {
192 buf = buf->next; 192 buf = buf->next;
193 } 193 }
194 tipc_link_retransmit(bcl, buf, mod(to - after)); 194 tipc_link_retransmit(bcl, buf, mod(to - after));
195} 195}
196 196
197/** 197/**
198 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets 198 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
199 * @n_ptr: node that sent acknowledgement info 199 * @n_ptr: node that sent acknowledgement info
200 * @acked: broadcast sequence # that has been acknowledged 200 * @acked: broadcast sequence # that has been acknowledged
201 * 201 *
202 * Node is locked, bc_lock unlocked. 202 * Node is locked, bc_lock unlocked.
203 */ 203 */
204 204
@@ -244,9 +244,9 @@ void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
244 spin_unlock_bh(&bc_lock); 244 spin_unlock_bh(&bc_lock);
245} 245}
246 246
247/** 247/**
248 * bclink_send_ack - unicast an ACK msg 248 * bclink_send_ack - unicast an ACK msg
249 * 249 *
250 * tipc_net_lock and node lock set 250 * tipc_net_lock and node lock set
251 */ 251 */
252 252
@@ -258,9 +258,9 @@ static void bclink_send_ack(struct node *n_ptr)
258 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 258 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
259} 259}
260 260
261/** 261/**
262 * bclink_send_nack- broadcast a NACK msg 262 * bclink_send_nack- broadcast a NACK msg
263 * 263 *
264 * tipc_net_lock and node lock set 264 * tipc_net_lock and node lock set
265 */ 265 */
266 266
@@ -278,7 +278,7 @@ static void bclink_send_nack(struct node *n_ptr)
278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 278 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
279 TIPC_OK, INT_H_SIZE, n_ptr->addr); 279 TIPC_OK, INT_H_SIZE, n_ptr->addr);
280 msg_set_mc_netid(msg, tipc_net_id); 280 msg_set_mc_netid(msg, tipc_net_id);
281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in)); 281 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after); 282 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
283 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to); 283 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
284 msg_set_bcast_tag(msg, tipc_own_tag); 284 msg_set_bcast_tag(msg, tipc_own_tag);
@@ -292,17 +292,17 @@ static void bclink_send_nack(struct node *n_ptr)
292 bcl->stats.bearer_congs++; 292 bcl->stats.bearer_congs++;
293 } 293 }
294 294
295 /* 295 /*
296 * Ensure we doesn't send another NACK msg to the node 296 * Ensure we doesn't send another NACK msg to the node
297 * until 16 more deferred messages arrive from it 297 * until 16 more deferred messages arrive from it
298 * (i.e. helps prevent all nodes from NACK'ing at same time) 298 * (i.e. helps prevent all nodes from NACK'ing at same time)
299 */ 299 */
300 300
301 n_ptr->bclink.nack_sync = tipc_own_tag; 301 n_ptr->bclink.nack_sync = tipc_own_tag;
302 } 302 }
303} 303}
304 304
305/** 305/**
306 * tipc_bclink_check_gap - send a NACK if a sequence gap exists 306 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
307 * 307 *
308 * tipc_net_lock and node lock set 308 * tipc_net_lock and node lock set
@@ -320,9 +320,9 @@ void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
320 bclink_send_nack(n_ptr); 320 bclink_send_nack(n_ptr);
321} 321}
322 322
323/** 323/**
324 * tipc_bclink_peek_nack - process a NACK msg meant for another node 324 * tipc_bclink_peek_nack - process a NACK msg meant for another node
325 * 325 *
326 * Only tipc_net_lock set. 326 * Only tipc_net_lock set.
327 */ 327 */
328 328
@@ -349,7 +349,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
349 if (less_eq(my_to, gap_to)) 349 if (less_eq(my_to, gap_to))
350 n_ptr->bclink.gap_to = gap_after; 350 n_ptr->bclink.gap_to = gap_after;
351 } else { 351 } else {
352 /* 352 /*
353 * Expand gap if missing bufs not in deferred queue: 353 * Expand gap if missing bufs not in deferred queue:
354 */ 354 */
355 struct sk_buff *buf = n_ptr->bclink.deferred_head; 355 struct sk_buff *buf = n_ptr->bclink.deferred_head;
@@ -371,7 +371,7 @@ static void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 g
371 } 371 }
372 /* 372 /*
373 * Some nodes may send a complementary NACK now: 373 * Some nodes may send a complementary NACK now:
374 */ 374 */
375 if (bclink_ack_allowed(sender_tag + 1)) { 375 if (bclink_ack_allowed(sender_tag + 1)) {
376 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) { 376 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
377 bclink_send_nack(n_ptr); 377 bclink_send_nack(n_ptr);
@@ -408,7 +408,7 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
408 408
409/** 409/**
410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards 410 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
411 * 411 *
412 * tipc_net_lock is read_locked, no other locks set 412 * tipc_net_lock is read_locked, no other locks set
413 */ 413 */
414 414
@@ -425,7 +425,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
425 425
426 msg_dbg(msg, "<BC<<<"); 426 msg_dbg(msg, "<BC<<<");
427 427
428 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 428 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
429 (msg_mc_netid(msg) != tipc_net_id))) { 429 (msg_mc_netid(msg) != tipc_net_id))) {
430 buf_discard(buf); 430 buf_discard(buf);
431 return; 431 return;
@@ -443,7 +443,7 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
443 bclink_retransmit_pkt(msg_bcgap_after(msg), 443 bclink_retransmit_pkt(msg_bcgap_after(msg),
444 msg_bcgap_to(msg)); 444 msg_bcgap_to(msg));
445 bcl->owner->next = NULL; 445 bcl->owner->next = NULL;
446 spin_unlock_bh(&bc_lock); 446 spin_unlock_bh(&bc_lock);
447 } else { 447 } else {
448 tipc_bclink_peek_nack(msg_destnode(msg), 448 tipc_bclink_peek_nack(msg_destnode(msg),
449 msg_bcast_tag(msg), 449 msg_bcast_tag(msg),
@@ -547,10 +547,10 @@ u32 tipc_bclink_acks_missing(struct node *n_ptr)
547 547
548/** 548/**
549 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer 549 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
550 * 550 *
551 * Send through as many bearers as necessary to reach all nodes 551 * Send through as many bearers as necessary to reach all nodes
552 * that support TIPC multicasting. 552 * that support TIPC multicasting.
553 * 553 *
554 * Returns 0 if packet sent successfully, non-zero if not 554 * Returns 0 if packet sent successfully, non-zero if not
555 */ 555 */
556 556
@@ -581,7 +581,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
581 send_count = 0; 581 send_count = 0;
582 582
583 /* Send buffer over bearers until all targets reached */ 583 /* Send buffer over bearers until all targets reached */
584 584
585 bcbearer->remains = tipc_cltr_bcast_nodes; 585 bcbearer->remains = tipc_cltr_bcast_nodes;
586 586
587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) { 587 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
@@ -615,7 +615,7 @@ update:
615 615
616 bcbearer->remains = bcbearer->remains_new; 616 bcbearer->remains = bcbearer->remains_new;
617 } 617 }
618 618
619 /* Unable to reach all targets */ 619 /* Unable to reach all targets */
620 620
621 bcbearer->bearer.publ.blocked = 1; 621 bcbearer->bearer.publ.blocked = 1;
@@ -682,7 +682,7 @@ void tipc_bcbearer_sort(void)
682 682
683/** 683/**
684 * tipc_bcbearer_push - resolve bearer congestion 684 * tipc_bcbearer_push - resolve bearer congestion
685 * 685 *
686 * Forces bclink to push out any unsent packets, until all packets are gone 686 * Forces bclink to push out any unsent packets, until all packets are gone
687 * or congestion reoccurs. 687 * or congestion reoccurs.
688 * No locks set when function called 688 * No locks set when function called
@@ -714,27 +714,27 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
714 spin_lock_bh(&bc_lock); 714 spin_lock_bh(&bc_lock);
715 715
716 tipc_printf(&pb, "Link <%s>\n" 716 tipc_printf(&pb, "Link <%s>\n"
717 " Window:%u packets\n", 717 " Window:%u packets\n",
718 bcl->name, bcl->queue_limit[0]); 718 bcl->name, bcl->queue_limit[0]);
719 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n", 719 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
720 bcl->stats.recv_info, 720 bcl->stats.recv_info,
721 bcl->stats.recv_fragments, 721 bcl->stats.recv_fragments,
722 bcl->stats.recv_fragmented, 722 bcl->stats.recv_fragmented,
723 bcl->stats.recv_bundles, 723 bcl->stats.recv_bundles,
724 bcl->stats.recv_bundled); 724 bcl->stats.recv_bundled);
725 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n", 725 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
726 bcl->stats.sent_info, 726 bcl->stats.sent_info,
727 bcl->stats.sent_fragments, 727 bcl->stats.sent_fragments,
728 bcl->stats.sent_fragmented, 728 bcl->stats.sent_fragmented,
729 bcl->stats.sent_bundles, 729 bcl->stats.sent_bundles,
730 bcl->stats.sent_bundled); 730 bcl->stats.sent_bundled);
731 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n", 731 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
732 bcl->stats.recv_nacks, 732 bcl->stats.recv_nacks,
733 bcl->stats.deferred_recv, 733 bcl->stats.deferred_recv,
734 bcl->stats.duplicates); 734 bcl->stats.duplicates);
735 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n", 735 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
736 bcl->stats.sent_nacks, 736 bcl->stats.sent_nacks,
737 bcl->stats.sent_acks, 737 bcl->stats.sent_acks,
738 bcl->stats.retransmitted); 738 bcl->stats.retransmitted);
739 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 739 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
740 bcl->stats.bearer_congs, 740 bcl->stats.bearer_congs,
@@ -778,7 +778,7 @@ int tipc_bclink_init(void)
778 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC); 778 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
779 if (!bcbearer || !bclink) { 779 if (!bcbearer || !bclink) {
780 nomem: 780 nomem:
781 warn("Multicast link creation failed, no memory\n"); 781 warn("Multicast link creation failed, no memory\n");
782 kfree(bcbearer); 782 kfree(bcbearer);
783 bcbearer = NULL; 783 bcbearer = NULL;
784 kfree(bclink); 784 kfree(bclink);
@@ -796,7 +796,7 @@ int tipc_bclink_init(void)
796 bcl->next_out_no = 1; 796 bcl->next_out_no = 1;
797 spin_lock_init(&bclink->node.lock); 797 spin_lock_init(&bclink->node.lock);
798 bcl->owner = &bclink->node; 798 bcl->owner = &bclink->node;
799 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 799 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
800 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 800 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
801 bcl->b_ptr = &bcbearer->bearer; 801 bcl->b_ptr = &bcbearer->bearer;
802 bcl->state = WORKING_WORKING; 802 bcl->state = WORKING_WORKING;