aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-05-04 20:56:15 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-05 17:26:44 -0400
commitd69afc90b8d47e471d2870f090f662e569b08407 (patch)
tree7c79d103af656d7912358e09f9350ead1bf7e727 /net/tipc
parentca0c42732c512a12fabe677594840f31861dd31a (diff)
tipc: define new functions to operate bc_lock
As we are going to do more jobs when bc_lock is released, the two operations of holding/releasing the lock should be encapsulated with functions. In addition, we move bc_lock spin lock into tipc_bclink structure avoiding to define the global variable. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/bcast.c96
1 files changed, 53 insertions, 43 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 119a59b4bec6..9eceaa72f21b 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
71 * Note: The fields labelled "temporary" are incorporated into the bearer 71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of 72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bclink_lock".
75 */ 75 */
76struct tipc_bcbearer { 76struct tipc_bcbearer {
77 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
@@ -84,6 +84,7 @@ struct tipc_bcbearer {
84 84
85/** 85/**
86 * struct tipc_bclink - link used for broadcast messages 86 * struct tipc_bclink - link used for broadcast messages
87 * @lock: spinlock governing access to structure
87 * @link: (non-standard) broadcast link structure 88 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @node: (non-standard) node structure representing b'cast link's peer node
89 * @bcast_nodes: map of broadcast-capable nodes 90 * @bcast_nodes: map of broadcast-capable nodes
@@ -92,6 +93,7 @@ struct tipc_bcbearer {
92 * Handles sequence numbering, fragmentation, bundling, etc. 93 * Handles sequence numbering, fragmentation, bundling, etc.
93 */ 94 */
94struct tipc_bclink { 95struct tipc_bclink {
96 spinlock_t lock;
95 struct tipc_link link; 97 struct tipc_link link;
96 struct tipc_node node; 98 struct tipc_node node;
97 struct tipc_node_map bcast_nodes; 99 struct tipc_node_map bcast_nodes;
@@ -105,8 +107,6 @@ static struct tipc_bcbearer *bcbearer = &bcast_bearer;
105static struct tipc_bclink *bclink = &bcast_link; 107static struct tipc_bclink *bclink = &bcast_link;
106static struct tipc_link *bcl = &bcast_link.link; 108static struct tipc_link *bcl = &bcast_link.link;
107 109
108static DEFINE_SPINLOCK(bc_lock);
109
110const char tipc_bclink_name[] = "broadcast-link"; 110const char tipc_bclink_name[] = "broadcast-link";
111 111
112static void tipc_nmap_diff(struct tipc_node_map *nm_a, 112static void tipc_nmap_diff(struct tipc_node_map *nm_a,
@@ -115,6 +115,16 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
115static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); 115static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
116static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); 116static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
117 117
118static void tipc_bclink_lock(void)
119{
120 spin_lock_bh(&bclink->lock);
121}
122
123static void tipc_bclink_unlock(void)
124{
125 spin_unlock_bh(&bclink->lock);
126}
127
118static u32 bcbuf_acks(struct sk_buff *buf) 128static u32 bcbuf_acks(struct sk_buff *buf)
119{ 129{
120 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle; 130 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
@@ -132,16 +142,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
132 142
133void tipc_bclink_add_node(u32 addr) 143void tipc_bclink_add_node(u32 addr)
134{ 144{
135 spin_lock_bh(&bc_lock); 145 tipc_bclink_lock();
136 tipc_nmap_add(&bclink->bcast_nodes, addr); 146 tipc_nmap_add(&bclink->bcast_nodes, addr);
137 spin_unlock_bh(&bc_lock); 147 tipc_bclink_unlock();
138} 148}
139 149
140void tipc_bclink_remove_node(u32 addr) 150void tipc_bclink_remove_node(u32 addr)
141{ 151{
142 spin_lock_bh(&bc_lock); 152 tipc_bclink_lock();
143 tipc_nmap_remove(&bclink->bcast_nodes, addr); 153 tipc_nmap_remove(&bclink->bcast_nodes, addr);
144 spin_unlock_bh(&bc_lock); 154 tipc_bclink_unlock();
145} 155}
146 156
147static void bclink_set_last_sent(void) 157static void bclink_set_last_sent(void)
@@ -167,7 +177,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
167/** 177/**
168 * tipc_bclink_retransmit_to - get most recent node to request retransmission 178 * tipc_bclink_retransmit_to - get most recent node to request retransmission
169 * 179 *
170 * Called with bc_lock locked 180 * Called with bclink_lock locked
171 */ 181 */
172struct tipc_node *tipc_bclink_retransmit_to(void) 182struct tipc_node *tipc_bclink_retransmit_to(void)
173{ 183{
@@ -179,7 +189,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
179 * @after: sequence number of last packet to *not* retransmit 189 * @after: sequence number of last packet to *not* retransmit
180 * @to: sequence number of last packet to retransmit 190 * @to: sequence number of last packet to retransmit
181 * 191 *
182 * Called with bc_lock locked 192 * Called with bclink_lock locked
183 */ 193 */
184static void bclink_retransmit_pkt(u32 after, u32 to) 194static void bclink_retransmit_pkt(u32 after, u32 to)
185{ 195{
@@ -196,7 +206,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
196 * @n_ptr: node that sent acknowledgement info 206 * @n_ptr: node that sent acknowledgement info
197 * @acked: broadcast sequence # that has been acknowledged 207 * @acked: broadcast sequence # that has been acknowledged
198 * 208 *
199 * Node is locked, bc_lock unlocked. 209 * Node is locked, bclink_lock unlocked.
200 */ 210 */
201void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 211void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
202{ 212{
@@ -204,8 +214,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
204 struct sk_buff *next; 214 struct sk_buff *next;
205 unsigned int released = 0; 215 unsigned int released = 0;
206 216
207 spin_lock_bh(&bc_lock); 217 tipc_bclink_lock();
208
209 /* Bail out if tx queue is empty (no clean up is required) */ 218 /* Bail out if tx queue is empty (no clean up is required) */
210 crs = bcl->first_out; 219 crs = bcl->first_out;
211 if (!crs) 220 if (!crs)
@@ -269,7 +278,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
269 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 278 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
270 tipc_link_wakeup_ports(bcl, 0); 279 tipc_link_wakeup_ports(bcl, 0);
271exit: 280exit:
272 spin_unlock_bh(&bc_lock); 281 tipc_bclink_unlock();
273} 282}
274 283
275/** 284/**
@@ -322,10 +331,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
322 ? buf_seqno(n_ptr->bclink.deferred_head) - 1 331 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
323 : n_ptr->bclink.last_sent); 332 : n_ptr->bclink.last_sent);
324 333
325 spin_lock_bh(&bc_lock); 334 tipc_bclink_lock();
326 tipc_bearer_send(MAX_BEARERS, buf, NULL); 335 tipc_bearer_send(MAX_BEARERS, buf, NULL);
327 bcl->stats.sent_nacks++; 336 bcl->stats.sent_nacks++;
328 spin_unlock_bh(&bc_lock); 337 tipc_bclink_unlock();
329 kfree_skb(buf); 338 kfree_skb(buf);
330 339
331 n_ptr->bclink.oos_state++; 340 n_ptr->bclink.oos_state++;
@@ -362,7 +371,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
362{ 371{
363 int res; 372 int res;
364 373
365 spin_lock_bh(&bc_lock); 374 tipc_bclink_lock();
366 375
367 if (!bclink->bcast_nodes.count) { 376 if (!bclink->bcast_nodes.count) {
368 res = msg_data_sz(buf_msg(buf)); 377 res = msg_data_sz(buf_msg(buf));
@@ -377,14 +386,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
377 bcl->stats.accu_queue_sz += bcl->out_queue_size; 386 bcl->stats.accu_queue_sz += bcl->out_queue_size;
378 } 387 }
379exit: 388exit:
380 spin_unlock_bh(&bc_lock); 389 tipc_bclink_unlock();
381 return res; 390 return res;
382} 391}
383 392
384/** 393/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 394 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 395 *
387 * Called with both sending node's lock and bc_lock taken. 396 * Called with both sending node's lock and bclink_lock taken.
388 */ 397 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 398static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{ 399{
@@ -439,12 +448,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
439 if (msg_destnode(msg) == tipc_own_addr) { 448 if (msg_destnode(msg) == tipc_own_addr) {
440 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 449 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
441 tipc_node_unlock(node); 450 tipc_node_unlock(node);
442 spin_lock_bh(&bc_lock); 451 tipc_bclink_lock();
443 bcl->stats.recv_nacks++; 452 bcl->stats.recv_nacks++;
444 bclink->retransmit_to = node; 453 bclink->retransmit_to = node;
445 bclink_retransmit_pkt(msg_bcgap_after(msg), 454 bclink_retransmit_pkt(msg_bcgap_after(msg),
446 msg_bcgap_to(msg)); 455 msg_bcgap_to(msg));
447 spin_unlock_bh(&bc_lock); 456 tipc_bclink_unlock();
448 } else { 457 } else {
449 tipc_node_unlock(node); 458 tipc_node_unlock(node);
450 bclink_peek_nack(msg); 459 bclink_peek_nack(msg);
@@ -462,20 +471,20 @@ receive:
462 /* Deliver message to destination */ 471 /* Deliver message to destination */
463 472
464 if (likely(msg_isdata(msg))) { 473 if (likely(msg_isdata(msg))) {
465 spin_lock_bh(&bc_lock); 474 tipc_bclink_lock();
466 bclink_accept_pkt(node, seqno); 475 bclink_accept_pkt(node, seqno);
467 spin_unlock_bh(&bc_lock); 476 tipc_bclink_unlock();
468 tipc_node_unlock(node); 477 tipc_node_unlock(node);
469 if (likely(msg_mcast(msg))) 478 if (likely(msg_mcast(msg)))
470 tipc_port_mcast_rcv(buf, NULL); 479 tipc_port_mcast_rcv(buf, NULL);
471 else 480 else
472 kfree_skb(buf); 481 kfree_skb(buf);
473 } else if (msg_user(msg) == MSG_BUNDLER) { 482 } else if (msg_user(msg) == MSG_BUNDLER) {
474 spin_lock_bh(&bc_lock); 483 tipc_bclink_lock();
475 bclink_accept_pkt(node, seqno); 484 bclink_accept_pkt(node, seqno);
476 bcl->stats.recv_bundles++; 485 bcl->stats.recv_bundles++;
477 bcl->stats.recv_bundled += msg_msgcnt(msg); 486 bcl->stats.recv_bundled += msg_msgcnt(msg);
478 spin_unlock_bh(&bc_lock); 487 tipc_bclink_unlock();
479 tipc_node_unlock(node); 488 tipc_node_unlock(node);
480 tipc_link_bundle_rcv(buf); 489 tipc_link_bundle_rcv(buf);
481 } else if (msg_user(msg) == MSG_FRAGMENTER) { 490 } else if (msg_user(msg) == MSG_FRAGMENTER) {
@@ -485,28 +494,28 @@ receive:
485 &buf); 494 &buf);
486 if (ret == LINK_REASM_ERROR) 495 if (ret == LINK_REASM_ERROR)
487 goto unlock; 496 goto unlock;
488 spin_lock_bh(&bc_lock); 497 tipc_bclink_lock();
489 bclink_accept_pkt(node, seqno); 498 bclink_accept_pkt(node, seqno);
490 bcl->stats.recv_fragments++; 499 bcl->stats.recv_fragments++;
491 if (ret == LINK_REASM_COMPLETE) { 500 if (ret == LINK_REASM_COMPLETE) {
492 bcl->stats.recv_fragmented++; 501 bcl->stats.recv_fragmented++;
493 /* Point msg to inner header */ 502 /* Point msg to inner header */
494 msg = buf_msg(buf); 503 msg = buf_msg(buf);
495 spin_unlock_bh(&bc_lock); 504 tipc_bclink_unlock();
496 goto receive; 505 goto receive;
497 } 506 }
498 spin_unlock_bh(&bc_lock); 507 tipc_bclink_unlock();
499 tipc_node_unlock(node); 508 tipc_node_unlock(node);
500 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 509 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
501 spin_lock_bh(&bc_lock); 510 tipc_bclink_lock();
502 bclink_accept_pkt(node, seqno); 511 bclink_accept_pkt(node, seqno);
503 spin_unlock_bh(&bc_lock); 512 tipc_bclink_unlock();
504 tipc_node_unlock(node); 513 tipc_node_unlock(node);
505 tipc_named_rcv(buf); 514 tipc_named_rcv(buf);
506 } else { 515 } else {
507 spin_lock_bh(&bc_lock); 516 tipc_bclink_lock();
508 bclink_accept_pkt(node, seqno); 517 bclink_accept_pkt(node, seqno);
509 spin_unlock_bh(&bc_lock); 518 tipc_bclink_unlock();
510 tipc_node_unlock(node); 519 tipc_node_unlock(node);
511 kfree_skb(buf); 520 kfree_skb(buf);
512 } 521 }
@@ -552,14 +561,14 @@ receive:
552 } else 561 } else
553 deferred = 0; 562 deferred = 0;
554 563
555 spin_lock_bh(&bc_lock); 564 tipc_bclink_lock();
556 565
557 if (deferred) 566 if (deferred)
558 bcl->stats.deferred_recv++; 567 bcl->stats.deferred_recv++;
559 else 568 else
560 bcl->stats.duplicates++; 569 bcl->stats.duplicates++;
561 570
562 spin_unlock_bh(&bc_lock); 571 tipc_bclink_unlock();
563 572
564unlock: 573unlock:
565 tipc_node_unlock(node); 574 tipc_node_unlock(node);
@@ -663,7 +672,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
663 int b_index; 672 int b_index;
664 int pri; 673 int pri;
665 674
666 spin_lock_bh(&bc_lock); 675 tipc_bclink_lock();
667 676
668 if (action) 677 if (action)
669 tipc_nmap_add(nm_ptr, node); 678 tipc_nmap_add(nm_ptr, node);
@@ -710,7 +719,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
710 bp_curr++; 719 bp_curr++;
711 } 720 }
712 721
713 spin_unlock_bh(&bc_lock); 722 tipc_bclink_unlock();
714} 723}
715 724
716 725
@@ -722,7 +731,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
722 if (!bcl) 731 if (!bcl)
723 return 0; 732 return 0;
724 733
725 spin_lock_bh(&bc_lock); 734 tipc_bclink_lock();
726 735
727 s = &bcl->stats; 736 s = &bcl->stats;
728 737
@@ -751,7 +760,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
751 s->queue_sz_counts ? 760 s->queue_sz_counts ?
752 (s->accu_queue_sz / s->queue_sz_counts) : 0); 761 (s->accu_queue_sz / s->queue_sz_counts) : 0);
753 762
754 spin_unlock_bh(&bc_lock); 763 tipc_bclink_unlock();
755 return ret; 764 return ret;
756} 765}
757 766
@@ -760,9 +769,9 @@ int tipc_bclink_reset_stats(void)
760 if (!bcl) 769 if (!bcl)
761 return -ENOPROTOOPT; 770 return -ENOPROTOOPT;
762 771
763 spin_lock_bh(&bc_lock); 772 tipc_bclink_lock();
764 memset(&bcl->stats, 0, sizeof(bcl->stats)); 773 memset(&bcl->stats, 0, sizeof(bcl->stats));
765 spin_unlock_bh(&bc_lock); 774 tipc_bclink_unlock();
766 return 0; 775 return 0;
767} 776}
768 777
@@ -773,9 +782,9 @@ int tipc_bclink_set_queue_limits(u32 limit)
773 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 782 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
774 return -EINVAL; 783 return -EINVAL;
775 784
776 spin_lock_bh(&bc_lock); 785 tipc_bclink_lock();
777 tipc_link_set_queue_limits(bcl, limit); 786 tipc_link_set_queue_limits(bcl, limit);
778 spin_unlock_bh(&bc_lock); 787 tipc_bclink_unlock();
779 return 0; 788 return 0;
780} 789}
781 790
@@ -785,6 +794,7 @@ void tipc_bclink_init(void)
785 bcbearer->media.send_msg = tipc_bcbearer_send; 794 bcbearer->media.send_msg = tipc_bcbearer_send;
786 sprintf(bcbearer->media.name, "tipc-broadcast"); 795 sprintf(bcbearer->media.name, "tipc-broadcast");
787 796
797 spin_lock_init(&bclink->lock);
788 INIT_LIST_HEAD(&bcl->waiting_ports); 798 INIT_LIST_HEAD(&bcl->waiting_ports);
789 bcl->next_out_no = 1; 799 bcl->next_out_no = 1;
790 spin_lock_init(&bclink->node.lock); 800 spin_lock_init(&bclink->node.lock);
@@ -799,9 +809,9 @@ void tipc_bclink_init(void)
799 809
800void tipc_bclink_stop(void) 810void tipc_bclink_stop(void)
801{ 811{
802 spin_lock_bh(&bc_lock); 812 tipc_bclink_lock();
803 tipc_link_purge_queues(bcl); 813 tipc_link_purge_queues(bcl);
804 spin_unlock_bh(&bc_lock); 814 tipc_bclink_unlock();
805 815
806 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL); 816 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
807 memset(bclink, 0, sizeof(*bclink)); 817 memset(bclink, 0, sizeof(*bclink));