aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorWolfram Sang <wsa@the-dreams.de>2014-06-17 08:36:41 -0400
committerWolfram Sang <wsa@the-dreams.de>2014-06-17 08:37:31 -0400
commitf0b1f6442b5090fed3529cb39f3acf8c91693d3d (patch)
treebc5f62b017a82161c9a7f892f464813f6efd5bf3 /net/tipc
parent4632a93f015caf6d7db4352f37aab74a39e60d7a (diff)
parent7171511eaec5bf23fb06078f59784a3a0626b38f (diff)
Merge tag 'v3.16-rc1' into i2c/for-next
Merge a stable base (Linux 3.16-rc1) Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Makefile2
-rw-r--r--net/tipc/bcast.c194
-rw-r--r--net/tipc/bcast.h9
-rw-r--r--net/tipc/bearer.c153
-rw-r--r--net/tipc/bearer.h47
-rw-r--r--net/tipc/config.c12
-rw-r--r--net/tipc/core.c14
-rw-r--r--net/tipc/core.h10
-rw-r--r--net/tipc/discover.c281
-rw-r--r--net/tipc/discover.h1
-rw-r--r--net/tipc/eth_media.c51
-rw-r--r--net/tipc/handler.c134
-rw-r--r--net/tipc/ib_media.c34
-rw-r--r--net/tipc/link.c216
-rw-r--r--net/tipc/link.h21
-rw-r--r--net/tipc/msg.c55
-rw-r--r--net/tipc/msg.h5
-rw-r--r--net/tipc/name_distr.c78
-rw-r--r--net/tipc/name_distr.h35
-rw-r--r--net/tipc/name_table.c14
-rw-r--r--net/tipc/net.c71
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/node.c110
-rw-r--r--net/tipc/node.h88
-rw-r--r--net/tipc/node_subscr.c9
-rw-r--r--net/tipc/node_subscr.h2
-rw-r--r--net/tipc/port.c39
-rw-r--r--net/tipc/port.h10
-rw-r--r--net/tipc/socket.c121
-rw-r--r--net/tipc/socket.h4
30 files changed, 908 insertions, 916 deletions
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index b282f7130d2b..a080c66d819a 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -5,7 +5,7 @@
5obj-$(CONFIG_TIPC) := tipc.o 5obj-$(CONFIG_TIPC) := tipc.o
6 6
7tipc-y += addr.o bcast.o bearer.o config.o \ 7tipc-y += addr.o bcast.o bearer.o config.o \
8 core.o handler.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o log.o eth_media.o server.o 11 socket.o log.o eth_media.o server.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 95ab5ef92920..26631679a1fa 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -71,7 +71,7 @@ struct tipc_bcbearer_pair {
71 * Note: The fields labelled "temporary" are incorporated into the bearer 71 * Note: The fields labelled "temporary" are incorporated into the bearer
72 * to avoid consuming potentially limited stack space through the use of 72 * to avoid consuming potentially limited stack space through the use of
73 * large local variables within multicast routines. Concurrent access is 73 * large local variables within multicast routines. Concurrent access is
74 * prevented through use of the spinlock "bc_lock". 74 * prevented through use of the spinlock "bclink_lock".
75 */ 75 */
76struct tipc_bcbearer { 76struct tipc_bcbearer {
77 struct tipc_bearer bearer; 77 struct tipc_bearer bearer;
@@ -84,34 +84,64 @@ struct tipc_bcbearer {
84 84
85/** 85/**
86 * struct tipc_bclink - link used for broadcast messages 86 * struct tipc_bclink - link used for broadcast messages
87 * @lock: spinlock governing access to structure
87 * @link: (non-standard) broadcast link structure 88 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node 89 * @node: (non-standard) node structure representing b'cast link's peer node
90 * @flags: represent bclink states
89 * @bcast_nodes: map of broadcast-capable nodes 91 * @bcast_nodes: map of broadcast-capable nodes
90 * @retransmit_to: node that most recently requested a retransmit 92 * @retransmit_to: node that most recently requested a retransmit
91 * 93 *
92 * Handles sequence numbering, fragmentation, bundling, etc. 94 * Handles sequence numbering, fragmentation, bundling, etc.
93 */ 95 */
94struct tipc_bclink { 96struct tipc_bclink {
97 spinlock_t lock;
95 struct tipc_link link; 98 struct tipc_link link;
96 struct tipc_node node; 99 struct tipc_node node;
100 unsigned int flags;
97 struct tipc_node_map bcast_nodes; 101 struct tipc_node_map bcast_nodes;
98 struct tipc_node *retransmit_to; 102 struct tipc_node *retransmit_to;
99}; 103};
100 104
101static struct tipc_bcbearer bcast_bearer; 105static struct tipc_bcbearer *bcbearer;
102static struct tipc_bclink bcast_link; 106static struct tipc_bclink *bclink;
103 107static struct tipc_link *bcl;
104static struct tipc_bcbearer *bcbearer = &bcast_bearer;
105static struct tipc_bclink *bclink = &bcast_link;
106static struct tipc_link *bcl = &bcast_link.link;
107
108static DEFINE_SPINLOCK(bc_lock);
109 108
110const char tipc_bclink_name[] = "broadcast-link"; 109const char tipc_bclink_name[] = "broadcast-link";
111 110
112static void tipc_nmap_diff(struct tipc_node_map *nm_a, 111static void tipc_nmap_diff(struct tipc_node_map *nm_a,
113 struct tipc_node_map *nm_b, 112 struct tipc_node_map *nm_b,
114 struct tipc_node_map *nm_diff); 113 struct tipc_node_map *nm_diff);
114static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
115static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
116
117static void tipc_bclink_lock(void)
118{
119 spin_lock_bh(&bclink->lock);
120}
121
122static void tipc_bclink_unlock(void)
123{
124 struct tipc_node *node = NULL;
125
126 if (likely(!bclink->flags)) {
127 spin_unlock_bh(&bclink->lock);
128 return;
129 }
130
131 if (bclink->flags & TIPC_BCLINK_RESET) {
132 bclink->flags &= ~TIPC_BCLINK_RESET;
133 node = tipc_bclink_retransmit_to();
134 }
135 spin_unlock_bh(&bclink->lock);
136
137 if (node)
138 tipc_link_reset_all(node);
139}
140
141void tipc_bclink_set_flags(unsigned int flags)
142{
143 bclink->flags |= flags;
144}
115 145
116static u32 bcbuf_acks(struct sk_buff *buf) 146static u32 bcbuf_acks(struct sk_buff *buf)
117{ 147{
@@ -130,16 +160,16 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
130 160
131void tipc_bclink_add_node(u32 addr) 161void tipc_bclink_add_node(u32 addr)
132{ 162{
133 spin_lock_bh(&bc_lock); 163 tipc_bclink_lock();
134 tipc_nmap_add(&bclink->bcast_nodes, addr); 164 tipc_nmap_add(&bclink->bcast_nodes, addr);
135 spin_unlock_bh(&bc_lock); 165 tipc_bclink_unlock();
136} 166}
137 167
138void tipc_bclink_remove_node(u32 addr) 168void tipc_bclink_remove_node(u32 addr)
139{ 169{
140 spin_lock_bh(&bc_lock); 170 tipc_bclink_lock();
141 tipc_nmap_remove(&bclink->bcast_nodes, addr); 171 tipc_nmap_remove(&bclink->bcast_nodes, addr);
142 spin_unlock_bh(&bc_lock); 172 tipc_bclink_unlock();
143} 173}
144 174
145static void bclink_set_last_sent(void) 175static void bclink_set_last_sent(void)
@@ -165,7 +195,7 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
165/** 195/**
166 * tipc_bclink_retransmit_to - get most recent node to request retransmission 196 * tipc_bclink_retransmit_to - get most recent node to request retransmission
167 * 197 *
168 * Called with bc_lock locked 198 * Called with bclink_lock locked
169 */ 199 */
170struct tipc_node *tipc_bclink_retransmit_to(void) 200struct tipc_node *tipc_bclink_retransmit_to(void)
171{ 201{
@@ -177,7 +207,7 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
177 * @after: sequence number of last packet to *not* retransmit 207 * @after: sequence number of last packet to *not* retransmit
178 * @to: sequence number of last packet to retransmit 208 * @to: sequence number of last packet to retransmit
179 * 209 *
180 * Called with bc_lock locked 210 * Called with bclink_lock locked
181 */ 211 */
182static void bclink_retransmit_pkt(u32 after, u32 to) 212static void bclink_retransmit_pkt(u32 after, u32 to)
183{ 213{
@@ -194,7 +224,7 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
194 * @n_ptr: node that sent acknowledgement info 224 * @n_ptr: node that sent acknowledgement info
195 * @acked: broadcast sequence # that has been acknowledged 225 * @acked: broadcast sequence # that has been acknowledged
196 * 226 *
197 * Node is locked, bc_lock unlocked. 227 * Node is locked, bclink_lock unlocked.
198 */ 228 */
199void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 229void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
200{ 230{
@@ -202,8 +232,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
202 struct sk_buff *next; 232 struct sk_buff *next;
203 unsigned int released = 0; 233 unsigned int released = 0;
204 234
205 spin_lock_bh(&bc_lock); 235 tipc_bclink_lock();
206
207 /* Bail out if tx queue is empty (no clean up is required) */ 236 /* Bail out if tx queue is empty (no clean up is required) */
208 crs = bcl->first_out; 237 crs = bcl->first_out;
209 if (!crs) 238 if (!crs)
@@ -267,13 +296,13 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
267 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 296 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
268 tipc_link_wakeup_ports(bcl, 0); 297 tipc_link_wakeup_ports(bcl, 0);
269exit: 298exit:
270 spin_unlock_bh(&bc_lock); 299 tipc_bclink_unlock();
271} 300}
272 301
273/** 302/**
274 * tipc_bclink_update_link_state - update broadcast link state 303 * tipc_bclink_update_link_state - update broadcast link state
275 * 304 *
276 * tipc_net_lock and node lock set 305 * RCU and node lock set
277 */ 306 */
278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 307void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
279{ 308{
@@ -320,10 +349,10 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
320 ? buf_seqno(n_ptr->bclink.deferred_head) - 1 349 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
321 : n_ptr->bclink.last_sent); 350 : n_ptr->bclink.last_sent);
322 351
323 spin_lock_bh(&bc_lock); 352 tipc_bclink_lock();
324 tipc_bearer_send(&bcbearer->bearer, buf, NULL); 353 tipc_bearer_send(MAX_BEARERS, buf, NULL);
325 bcl->stats.sent_nacks++; 354 bcl->stats.sent_nacks++;
326 spin_unlock_bh(&bc_lock); 355 tipc_bclink_unlock();
327 kfree_skb(buf); 356 kfree_skb(buf);
328 357
329 n_ptr->bclink.oos_state++; 358 n_ptr->bclink.oos_state++;
@@ -335,8 +364,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
335 * 364 *
336 * Delay any upcoming NACK by this node if another node has already 365 * Delay any upcoming NACK by this node if another node has already
337 * requested the first message this node is going to ask for. 366 * requested the first message this node is going to ask for.
338 *
339 * Only tipc_net_lock set.
340 */ 367 */
341static void bclink_peek_nack(struct tipc_msg *msg) 368static void bclink_peek_nack(struct tipc_msg *msg)
342{ 369{
@@ -362,7 +389,7 @@ int tipc_bclink_xmit(struct sk_buff *buf)
362{ 389{
363 int res; 390 int res;
364 391
365 spin_lock_bh(&bc_lock); 392 tipc_bclink_lock();
366 393
367 if (!bclink->bcast_nodes.count) { 394 if (!bclink->bcast_nodes.count) {
368 res = msg_data_sz(buf_msg(buf)); 395 res = msg_data_sz(buf_msg(buf));
@@ -377,14 +404,14 @@ int tipc_bclink_xmit(struct sk_buff *buf)
377 bcl->stats.accu_queue_sz += bcl->out_queue_size; 404 bcl->stats.accu_queue_sz += bcl->out_queue_size;
378 } 405 }
379exit: 406exit:
380 spin_unlock_bh(&bc_lock); 407 tipc_bclink_unlock();
381 return res; 408 return res;
382} 409}
383 410
384/** 411/**
385 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet 412 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
386 * 413 *
387 * Called with both sending node's lock and bc_lock taken. 414 * Called with both sending node's lock and bclink_lock taken.
388 */ 415 */
389static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 416static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
390{ 417{
@@ -408,7 +435,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
408/** 435/**
409 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards 436 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
410 * 437 *
411 * tipc_net_lock is read_locked, no other locks set 438 * RCU is locked, no other locks set
412 */ 439 */
413void tipc_bclink_rcv(struct sk_buff *buf) 440void tipc_bclink_rcv(struct sk_buff *buf)
414{ 441{
@@ -439,12 +466,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
439 if (msg_destnode(msg) == tipc_own_addr) { 466 if (msg_destnode(msg) == tipc_own_addr) {
440 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 467 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
441 tipc_node_unlock(node); 468 tipc_node_unlock(node);
442 spin_lock_bh(&bc_lock); 469 tipc_bclink_lock();
443 bcl->stats.recv_nacks++; 470 bcl->stats.recv_nacks++;
444 bclink->retransmit_to = node; 471 bclink->retransmit_to = node;
445 bclink_retransmit_pkt(msg_bcgap_after(msg), 472 bclink_retransmit_pkt(msg_bcgap_after(msg),
446 msg_bcgap_to(msg)); 473 msg_bcgap_to(msg));
447 spin_unlock_bh(&bc_lock); 474 tipc_bclink_unlock();
448 } else { 475 } else {
449 tipc_node_unlock(node); 476 tipc_node_unlock(node);
450 bclink_peek_nack(msg); 477 bclink_peek_nack(msg);
@@ -462,51 +489,47 @@ receive:
462 /* Deliver message to destination */ 489 /* Deliver message to destination */
463 490
464 if (likely(msg_isdata(msg))) { 491 if (likely(msg_isdata(msg))) {
465 spin_lock_bh(&bc_lock); 492 tipc_bclink_lock();
466 bclink_accept_pkt(node, seqno); 493 bclink_accept_pkt(node, seqno);
467 spin_unlock_bh(&bc_lock); 494 tipc_bclink_unlock();
468 tipc_node_unlock(node); 495 tipc_node_unlock(node);
469 if (likely(msg_mcast(msg))) 496 if (likely(msg_mcast(msg)))
470 tipc_port_mcast_rcv(buf, NULL); 497 tipc_port_mcast_rcv(buf, NULL);
471 else 498 else
472 kfree_skb(buf); 499 kfree_skb(buf);
473 } else if (msg_user(msg) == MSG_BUNDLER) { 500 } else if (msg_user(msg) == MSG_BUNDLER) {
474 spin_lock_bh(&bc_lock); 501 tipc_bclink_lock();
475 bclink_accept_pkt(node, seqno); 502 bclink_accept_pkt(node, seqno);
476 bcl->stats.recv_bundles++; 503 bcl->stats.recv_bundles++;
477 bcl->stats.recv_bundled += msg_msgcnt(msg); 504 bcl->stats.recv_bundled += msg_msgcnt(msg);
478 spin_unlock_bh(&bc_lock); 505 tipc_bclink_unlock();
479 tipc_node_unlock(node); 506 tipc_node_unlock(node);
480 tipc_link_bundle_rcv(buf); 507 tipc_link_bundle_rcv(buf);
481 } else if (msg_user(msg) == MSG_FRAGMENTER) { 508 } else if (msg_user(msg) == MSG_FRAGMENTER) {
482 int ret; 509 tipc_buf_append(&node->bclink.reasm_buf, &buf);
483 ret = tipc_link_frag_rcv(&node->bclink.reasm_head, 510 if (unlikely(!buf && !node->bclink.reasm_buf))
484 &node->bclink.reasm_tail,
485 &buf);
486 if (ret == LINK_REASM_ERROR)
487 goto unlock; 511 goto unlock;
488 spin_lock_bh(&bc_lock); 512 tipc_bclink_lock();
489 bclink_accept_pkt(node, seqno); 513 bclink_accept_pkt(node, seqno);
490 bcl->stats.recv_fragments++; 514 bcl->stats.recv_fragments++;
491 if (ret == LINK_REASM_COMPLETE) { 515 if (buf) {
492 bcl->stats.recv_fragmented++; 516 bcl->stats.recv_fragmented++;
493 /* Point msg to inner header */
494 msg = buf_msg(buf); 517 msg = buf_msg(buf);
495 spin_unlock_bh(&bc_lock); 518 tipc_bclink_unlock();
496 goto receive; 519 goto receive;
497 } 520 }
498 spin_unlock_bh(&bc_lock); 521 tipc_bclink_unlock();
499 tipc_node_unlock(node); 522 tipc_node_unlock(node);
500 } else if (msg_user(msg) == NAME_DISTRIBUTOR) { 523 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
501 spin_lock_bh(&bc_lock); 524 tipc_bclink_lock();
502 bclink_accept_pkt(node, seqno); 525 bclink_accept_pkt(node, seqno);
503 spin_unlock_bh(&bc_lock); 526 tipc_bclink_unlock();
504 tipc_node_unlock(node); 527 tipc_node_unlock(node);
505 tipc_named_rcv(buf); 528 tipc_named_rcv(buf);
506 } else { 529 } else {
507 spin_lock_bh(&bc_lock); 530 tipc_bclink_lock();
508 bclink_accept_pkt(node, seqno); 531 bclink_accept_pkt(node, seqno);
509 spin_unlock_bh(&bc_lock); 532 tipc_bclink_unlock();
510 tipc_node_unlock(node); 533 tipc_node_unlock(node);
511 kfree_skb(buf); 534 kfree_skb(buf);
512 } 535 }
@@ -552,14 +575,14 @@ receive:
552 } else 575 } else
553 deferred = 0; 576 deferred = 0;
554 577
555 spin_lock_bh(&bc_lock); 578 tipc_bclink_lock();
556 579
557 if (deferred) 580 if (deferred)
558 bcl->stats.deferred_recv++; 581 bcl->stats.deferred_recv++;
559 else 582 else
560 bcl->stats.duplicates++; 583 bcl->stats.duplicates++;
561 584
562 spin_unlock_bh(&bc_lock); 585 tipc_bclink_unlock();
563 586
564unlock: 587unlock:
565 tipc_node_unlock(node); 588 tipc_node_unlock(node);
@@ -627,13 +650,13 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
627 650
628 if (bp_index == 0) { 651 if (bp_index == 0) {
629 /* Use original buffer for first bearer */ 652 /* Use original buffer for first bearer */
630 tipc_bearer_send(b, buf, &b->bcast_addr); 653 tipc_bearer_send(b->identity, buf, &b->bcast_addr);
631 } else { 654 } else {
632 /* Avoid concurrent buffer access */ 655 /* Avoid concurrent buffer access */
633 tbuf = pskb_copy(buf, GFP_ATOMIC); 656 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
634 if (!tbuf) 657 if (!tbuf)
635 break; 658 break;
636 tipc_bearer_send(b, tbuf, &b->bcast_addr); 659 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
637 kfree_skb(tbuf); /* Bearer keeps a clone */ 660 kfree_skb(tbuf); /* Bearer keeps a clone */
638 } 661 }
639 662
@@ -655,20 +678,27 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
655/** 678/**
656 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 679 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
657 */ 680 */
658void tipc_bcbearer_sort(void) 681void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
659{ 682{
660 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 683 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
661 struct tipc_bcbearer_pair *bp_curr; 684 struct tipc_bcbearer_pair *bp_curr;
685 struct tipc_bearer *b;
662 int b_index; 686 int b_index;
663 int pri; 687 int pri;
664 688
665 spin_lock_bh(&bc_lock); 689 tipc_bclink_lock();
690
691 if (action)
692 tipc_nmap_add(nm_ptr, node);
693 else
694 tipc_nmap_remove(nm_ptr, node);
666 695
667 /* Group bearers by priority (can assume max of two per priority) */ 696 /* Group bearers by priority (can assume max of two per priority) */
668 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp)); 697 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
669 698
699 rcu_read_lock();
670 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 700 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
671 struct tipc_bearer *b = bearer_list[b_index]; 701 b = rcu_dereference_rtnl(bearer_list[b_index]);
672 if (!b || !b->nodes.count) 702 if (!b || !b->nodes.count)
673 continue; 703 continue;
674 704
@@ -677,6 +707,7 @@ void tipc_bcbearer_sort(void)
677 else 707 else
678 bp_temp[b->priority].secondary = b; 708 bp_temp[b->priority].secondary = b;
679 } 709 }
710 rcu_read_unlock();
680 711
681 /* Create array of bearer pairs for broadcasting */ 712 /* Create array of bearer pairs for broadcasting */
682 bp_curr = bcbearer->bpairs; 713 bp_curr = bcbearer->bpairs;
@@ -702,7 +733,7 @@ void tipc_bcbearer_sort(void)
702 bp_curr++; 733 bp_curr++;
703 } 734 }
704 735
705 spin_unlock_bh(&bc_lock); 736 tipc_bclink_unlock();
706} 737}
707 738
708 739
@@ -714,7 +745,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
714 if (!bcl) 745 if (!bcl)
715 return 0; 746 return 0;
716 747
717 spin_lock_bh(&bc_lock); 748 tipc_bclink_lock();
718 749
719 s = &bcl->stats; 750 s = &bcl->stats;
720 751
@@ -743,7 +774,7 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
743 s->queue_sz_counts ? 774 s->queue_sz_counts ?
744 (s->accu_queue_sz / s->queue_sz_counts) : 0); 775 (s->accu_queue_sz / s->queue_sz_counts) : 0);
745 776
746 spin_unlock_bh(&bc_lock); 777 tipc_bclink_unlock();
747 return ret; 778 return ret;
748} 779}
749 780
@@ -752,9 +783,9 @@ int tipc_bclink_reset_stats(void)
752 if (!bcl) 783 if (!bcl)
753 return -ENOPROTOOPT; 784 return -ENOPROTOOPT;
754 785
755 spin_lock_bh(&bc_lock); 786 tipc_bclink_lock();
756 memset(&bcl->stats, 0, sizeof(bcl->stats)); 787 memset(&bcl->stats, 0, sizeof(bcl->stats));
757 spin_unlock_bh(&bc_lock); 788 tipc_bclink_unlock();
758 return 0; 789 return 0;
759} 790}
760 791
@@ -765,46 +796,59 @@ int tipc_bclink_set_queue_limits(u32 limit)
765 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 796 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
766 return -EINVAL; 797 return -EINVAL;
767 798
768 spin_lock_bh(&bc_lock); 799 tipc_bclink_lock();
769 tipc_link_set_queue_limits(bcl, limit); 800 tipc_link_set_queue_limits(bcl, limit);
770 spin_unlock_bh(&bc_lock); 801 tipc_bclink_unlock();
771 return 0; 802 return 0;
772} 803}
773 804
774void tipc_bclink_init(void) 805int tipc_bclink_init(void)
775{ 806{
807 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
808 if (!bcbearer)
809 return -ENOMEM;
810
811 bclink = kzalloc(sizeof(*bclink), GFP_ATOMIC);
812 if (!bclink) {
813 kfree(bcbearer);
814 return -ENOMEM;
815 }
816
817 bcl = &bclink->link;
776 bcbearer->bearer.media = &bcbearer->media; 818 bcbearer->bearer.media = &bcbearer->media;
777 bcbearer->media.send_msg = tipc_bcbearer_send; 819 bcbearer->media.send_msg = tipc_bcbearer_send;
778 sprintf(bcbearer->media.name, "tipc-broadcast"); 820 sprintf(bcbearer->media.name, "tipc-broadcast");
779 821
822 spin_lock_init(&bclink->lock);
780 INIT_LIST_HEAD(&bcl->waiting_ports); 823 INIT_LIST_HEAD(&bcl->waiting_ports);
781 bcl->next_out_no = 1; 824 bcl->next_out_no = 1;
782 spin_lock_init(&bclink->node.lock); 825 spin_lock_init(&bclink->node.lock);
783 bcl->owner = &bclink->node; 826 bcl->owner = &bclink->node;
784 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 827 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
785 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 828 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
786 bcl->b_ptr = &bcbearer->bearer; 829 bcl->bearer_id = MAX_BEARERS;
787 bearer_list[BCBEARER] = &bcbearer->bearer; 830 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer);
788 bcl->state = WORKING_WORKING; 831 bcl->state = WORKING_WORKING;
789 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 832 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
833 return 0;
790} 834}
791 835
792void tipc_bclink_stop(void) 836void tipc_bclink_stop(void)
793{ 837{
794 spin_lock_bh(&bc_lock); 838 tipc_bclink_lock();
795 tipc_link_purge_queues(bcl); 839 tipc_link_purge_queues(bcl);
796 spin_unlock_bh(&bc_lock); 840 tipc_bclink_unlock();
797 841
798 bearer_list[BCBEARER] = NULL; 842 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL);
799 memset(bclink, 0, sizeof(*bclink)); 843 synchronize_net();
800 memset(bcbearer, 0, sizeof(*bcbearer)); 844 kfree(bcbearer);
845 kfree(bclink);
801} 846}
802 847
803
804/** 848/**
805 * tipc_nmap_add - add a node to a node map 849 * tipc_nmap_add - add a node to a node map
806 */ 850 */
807void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node) 851static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
808{ 852{
809 int n = tipc_node(node); 853 int n = tipc_node(node);
810 int w = n / WSIZE; 854 int w = n / WSIZE;
@@ -819,7 +863,7 @@ void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node)
819/** 863/**
820 * tipc_nmap_remove - remove a node from a node map 864 * tipc_nmap_remove - remove a node from a node map
821 */ 865 */
822void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node) 866static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node)
823{ 867{
824 int n = tipc_node(node); 868 int n = tipc_node(node);
825 int w = n / WSIZE; 869 int w = n / WSIZE;
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index a80ef54b818e..00330c45df3e 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -39,6 +39,7 @@
39 39
40#define MAX_NODES 4096 40#define MAX_NODES 4096
41#define WSIZE 32 41#define WSIZE 32
42#define TIPC_BCLINK_RESET 1
42 43
43/** 44/**
44 * struct tipc_node_map - set of node identifiers 45 * struct tipc_node_map - set of node identifiers
@@ -69,9 +70,6 @@ struct tipc_node;
69 70
70extern const char tipc_bclink_name[]; 71extern const char tipc_bclink_name[];
71 72
72void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
73void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
74
75/** 73/**
76 * tipc_nmap_equal - test for equality of node maps 74 * tipc_nmap_equal - test for equality of node maps
77 */ 75 */
@@ -84,8 +82,9 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
84void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); 82void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port);
85void tipc_port_list_free(struct tipc_port_list *pl_ptr); 83void tipc_port_list_free(struct tipc_port_list *pl_ptr);
86 84
87void tipc_bclink_init(void); 85int tipc_bclink_init(void);
88void tipc_bclink_stop(void); 86void tipc_bclink_stop(void);
87void tipc_bclink_set_flags(unsigned int flags);
89void tipc_bclink_add_node(u32 addr); 88void tipc_bclink_add_node(u32 addr);
90void tipc_bclink_remove_node(u32 addr); 89void tipc_bclink_remove_node(u32 addr);
91struct tipc_node *tipc_bclink_retransmit_to(void); 90struct tipc_node *tipc_bclink_retransmit_to(void);
@@ -98,6 +97,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent);
98int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 97int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
99int tipc_bclink_reset_stats(void); 98int tipc_bclink_reset_stats(void);
100int tipc_bclink_set_queue_limits(u32 limit); 99int tipc_bclink_set_queue_limits(u32 limit);
101void tipc_bcbearer_sort(void); 100void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
102 101
103#endif 102#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3fef7eb776dc..264474394f9f 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -49,7 +49,7 @@ static struct tipc_media * const media_info_array[] = {
49 NULL 49 NULL
50}; 50};
51 51
52struct tipc_bearer *bearer_list[MAX_BEARERS + 1]; 52struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
53 53
54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); 54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
55 55
@@ -178,7 +178,7 @@ struct tipc_bearer *tipc_bearer_find(const char *name)
178 u32 i; 178 u32 i;
179 179
180 for (i = 0; i < MAX_BEARERS; i++) { 180 for (i = 0; i < MAX_BEARERS; i++) {
181 b_ptr = bearer_list[i]; 181 b_ptr = rtnl_dereference(bearer_list[i]);
182 if (b_ptr && (!strcmp(b_ptr->name, name))) 182 if (b_ptr && (!strcmp(b_ptr->name, name)))
183 return b_ptr; 183 return b_ptr;
184 } 184 }
@@ -198,10 +198,9 @@ struct sk_buff *tipc_bearer_get_names(void)
198 if (!buf) 198 if (!buf)
199 return NULL; 199 return NULL;
200 200
201 read_lock_bh(&tipc_net_lock);
202 for (i = 0; media_info_array[i] != NULL; i++) { 201 for (i = 0; media_info_array[i] != NULL; i++) {
203 for (j = 0; j < MAX_BEARERS; j++) { 202 for (j = 0; j < MAX_BEARERS; j++) {
204 b = bearer_list[j]; 203 b = rtnl_dereference(bearer_list[j]);
205 if (!b) 204 if (!b)
206 continue; 205 continue;
207 if (b->media == media_info_array[i]) { 206 if (b->media == media_info_array[i]) {
@@ -211,22 +210,33 @@ struct sk_buff *tipc_bearer_get_names(void)
211 } 210 }
212 } 211 }
213 } 212 }
214 read_unlock_bh(&tipc_net_lock);
215 return buf; 213 return buf;
216} 214}
217 215
218void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest) 216void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
219{ 217{
220 tipc_nmap_add(&b_ptr->nodes, dest); 218 struct tipc_bearer *b_ptr;
221 tipc_bcbearer_sort(); 219
222 tipc_disc_add_dest(b_ptr->link_req); 220 rcu_read_lock();
221 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
222 if (b_ptr) {
223 tipc_bcbearer_sort(&b_ptr->nodes, dest, true);
224 tipc_disc_add_dest(b_ptr->link_req);
225 }
226 rcu_read_unlock();
223} 227}
224 228
225void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest) 229void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
226{ 230{
227 tipc_nmap_remove(&b_ptr->nodes, dest); 231 struct tipc_bearer *b_ptr;
228 tipc_bcbearer_sort(); 232
229 tipc_disc_remove_dest(b_ptr->link_req); 233 rcu_read_lock();
234 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
235 if (b_ptr) {
236 tipc_bcbearer_sort(&b_ptr->nodes, dest, false);
237 tipc_disc_remove_dest(b_ptr->link_req);
238 }
239 rcu_read_unlock();
230} 240}
231 241
232/** 242/**
@@ -271,13 +281,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
271 return -EINVAL; 281 return -EINVAL;
272 } 282 }
273 283
274 write_lock_bh(&tipc_net_lock);
275
276 m_ptr = tipc_media_find(b_names.media_name); 284 m_ptr = tipc_media_find(b_names.media_name);
277 if (!m_ptr) { 285 if (!m_ptr) {
278 pr_warn("Bearer <%s> rejected, media <%s> not registered\n", 286 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
279 name, b_names.media_name); 287 name, b_names.media_name);
280 goto exit; 288 return -EINVAL;
281 } 289 }
282 290
283 if (priority == TIPC_MEDIA_LINK_PRI) 291 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -287,7 +295,7 @@ restart:
287 bearer_id = MAX_BEARERS; 295 bearer_id = MAX_BEARERS;
288 with_this_prio = 1; 296 with_this_prio = 1;
289 for (i = MAX_BEARERS; i-- != 0; ) { 297 for (i = MAX_BEARERS; i-- != 0; ) {
290 b_ptr = bearer_list[i]; 298 b_ptr = rtnl_dereference(bearer_list[i]);
291 if (!b_ptr) { 299 if (!b_ptr) {
292 bearer_id = i; 300 bearer_id = i;
293 continue; 301 continue;
@@ -295,14 +303,14 @@ restart:
295 if (!strcmp(name, b_ptr->name)) { 303 if (!strcmp(name, b_ptr->name)) {
296 pr_warn("Bearer <%s> rejected, already enabled\n", 304 pr_warn("Bearer <%s> rejected, already enabled\n",
297 name); 305 name);
298 goto exit; 306 return -EINVAL;
299 } 307 }
300 if ((b_ptr->priority == priority) && 308 if ((b_ptr->priority == priority) &&
301 (++with_this_prio > 2)) { 309 (++with_this_prio > 2)) {
302 if (priority-- == 0) { 310 if (priority-- == 0) {
303 pr_warn("Bearer <%s> rejected, duplicate priority\n", 311 pr_warn("Bearer <%s> rejected, duplicate priority\n",
304 name); 312 name);
305 goto exit; 313 return -EINVAL;
306 } 314 }
307 pr_warn("Bearer <%s> priority adjustment required %u->%u\n", 315 pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
308 name, priority + 1, priority); 316 name, priority + 1, priority);
@@ -312,21 +320,20 @@ restart:
312 if (bearer_id >= MAX_BEARERS) { 320 if (bearer_id >= MAX_BEARERS) {
313 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 321 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
314 name, MAX_BEARERS); 322 name, MAX_BEARERS);
315 goto exit; 323 return -EINVAL;
316 } 324 }
317 325
318 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC); 326 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
319 if (!b_ptr) { 327 if (!b_ptr)
320 res = -ENOMEM; 328 return -ENOMEM;
321 goto exit; 329
322 }
323 strcpy(b_ptr->name, name); 330 strcpy(b_ptr->name, name);
324 b_ptr->media = m_ptr; 331 b_ptr->media = m_ptr;
325 res = m_ptr->enable_media(b_ptr); 332 res = m_ptr->enable_media(b_ptr);
326 if (res) { 333 if (res) {
327 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 334 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
328 name, -res); 335 name, -res);
329 goto exit; 336 return -EINVAL;
330 } 337 }
331 338
332 b_ptr->identity = bearer_id; 339 b_ptr->identity = bearer_id;
@@ -341,16 +348,14 @@ restart:
341 bearer_disable(b_ptr, false); 348 bearer_disable(b_ptr, false);
342 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 349 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
343 name); 350 name);
344 goto exit; 351 return -EINVAL;
345 } 352 }
346 353
347 bearer_list[bearer_id] = b_ptr; 354 rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
348 355
349 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 356 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
350 name, 357 name,
351 tipc_addr_string_fill(addr_string, disc_domain), priority); 358 tipc_addr_string_fill(addr_string, disc_domain), priority);
352exit:
353 write_unlock_bh(&tipc_net_lock);
354 return res; 359 return res;
355} 360}
356 361
@@ -359,19 +364,16 @@ exit:
359 */ 364 */
360static int tipc_reset_bearer(struct tipc_bearer *b_ptr) 365static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
361{ 366{
362 read_lock_bh(&tipc_net_lock);
363 pr_info("Resetting bearer <%s>\n", b_ptr->name); 367 pr_info("Resetting bearer <%s>\n", b_ptr->name);
364 tipc_disc_delete(b_ptr->link_req);
365 tipc_link_reset_list(b_ptr->identity); 368 tipc_link_reset_list(b_ptr->identity);
366 tipc_disc_create(b_ptr, &b_ptr->bcast_addr); 369 tipc_disc_reset(b_ptr);
367 read_unlock_bh(&tipc_net_lock);
368 return 0; 370 return 0;
369} 371}
370 372
371/** 373/**
372 * bearer_disable 374 * bearer_disable
373 * 375 *
374 * Note: This routine assumes caller holds tipc_net_lock. 376 * Note: This routine assumes caller holds RTNL lock.
375 */ 377 */
376static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 378static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
377{ 379{
@@ -385,12 +387,12 @@ static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
385 tipc_disc_delete(b_ptr->link_req); 387 tipc_disc_delete(b_ptr->link_req);
386 388
387 for (i = 0; i < MAX_BEARERS; i++) { 389 for (i = 0; i < MAX_BEARERS; i++) {
388 if (b_ptr == bearer_list[i]) { 390 if (b_ptr == rtnl_dereference(bearer_list[i])) {
389 bearer_list[i] = NULL; 391 RCU_INIT_POINTER(bearer_list[i], NULL);
390 break; 392 break;
391 } 393 }
392 } 394 }
393 kfree(b_ptr); 395 kfree_rcu(b_ptr, rcu);
394} 396}
395 397
396int tipc_disable_bearer(const char *name) 398int tipc_disable_bearer(const char *name)
@@ -398,7 +400,6 @@ int tipc_disable_bearer(const char *name)
398 struct tipc_bearer *b_ptr; 400 struct tipc_bearer *b_ptr;
399 int res; 401 int res;
400 402
401 write_lock_bh(&tipc_net_lock);
402 b_ptr = tipc_bearer_find(name); 403 b_ptr = tipc_bearer_find(name);
403 if (b_ptr == NULL) { 404 if (b_ptr == NULL) {
404 pr_warn("Attempt to disable unknown bearer <%s>\n", name); 405 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
@@ -407,32 +408,9 @@ int tipc_disable_bearer(const char *name)
407 bearer_disable(b_ptr, false); 408 bearer_disable(b_ptr, false);
408 res = 0; 409 res = 0;
409 } 410 }
410 write_unlock_bh(&tipc_net_lock);
411 return res; 411 return res;
412} 412}
413 413
414
415/* tipc_l2_media_addr_set - initialize Ethernet media address structure
416 *
417 * Media-dependent "value" field stores MAC address in first 6 bytes
418 * and zeroes out the remaining bytes.
419 */
420void tipc_l2_media_addr_set(const struct tipc_bearer *b,
421 struct tipc_media_addr *a, char *mac)
422{
423 int len = b->media->hwaddr_len;
424
425 if (unlikely(sizeof(a->value) < len)) {
426 WARN_ONCE(1, "Media length invalid\n");
427 return;
428 }
429
430 memcpy(a->value, mac, len);
431 memset(a->value + len, 0, sizeof(a->value) - len);
432 a->media_id = b->media->type_id;
433 a->broadcast = !memcmp(mac, b->bcast_addr.value, len);
434}
435
436int tipc_enable_l2_media(struct tipc_bearer *b) 414int tipc_enable_l2_media(struct tipc_bearer *b)
437{ 415{
438 struct net_device *dev; 416 struct net_device *dev;
@@ -443,33 +421,37 @@ int tipc_enable_l2_media(struct tipc_bearer *b)
443 if (!dev) 421 if (!dev)
444 return -ENODEV; 422 return -ENODEV;
445 423
446 /* Associate TIPC bearer with Ethernet bearer */ 424 /* Associate TIPC bearer with L2 bearer */
447 b->media_ptr = dev; 425 rcu_assign_pointer(b->media_ptr, dev);
448 memset(b->bcast_addr.value, 0, sizeof(b->bcast_addr.value)); 426 memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
449 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len); 427 memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
450 b->bcast_addr.media_id = b->media->type_id; 428 b->bcast_addr.media_id = b->media->type_id;
451 b->bcast_addr.broadcast = 1; 429 b->bcast_addr.broadcast = 1;
452 b->mtu = dev->mtu; 430 b->mtu = dev->mtu;
453 tipc_l2_media_addr_set(b, &b->addr, (char *)dev->dev_addr); 431 b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
454 rcu_assign_pointer(dev->tipc_ptr, b); 432 rcu_assign_pointer(dev->tipc_ptr, b);
455 return 0; 433 return 0;
456} 434}
457 435
458/* tipc_disable_l2_media - detach TIPC bearer from an Ethernet interface 436/* tipc_disable_l2_media - detach TIPC bearer from an L2 interface
459 * 437 *
460 * Mark Ethernet bearer as inactive so that incoming buffers are thrown away, 438 * Mark L2 bearer as inactive so that incoming buffers are thrown away,
461 * then get worker thread to complete bearer cleanup. (Can't do cleanup 439 * then get worker thread to complete bearer cleanup. (Can't do cleanup
462 * here because cleanup code needs to sleep and caller holds spinlocks.) 440 * here because cleanup code needs to sleep and caller holds spinlocks.)
463 */ 441 */
464void tipc_disable_l2_media(struct tipc_bearer *b) 442void tipc_disable_l2_media(struct tipc_bearer *b)
465{ 443{
466 struct net_device *dev = (struct net_device *)b->media_ptr; 444 struct net_device *dev;
445
446 dev = (struct net_device *)rtnl_dereference(b->media_ptr);
447 RCU_INIT_POINTER(b->media_ptr, NULL);
467 RCU_INIT_POINTER(dev->tipc_ptr, NULL); 448 RCU_INIT_POINTER(dev->tipc_ptr, NULL);
449 synchronize_net();
468 dev_put(dev); 450 dev_put(dev);
469} 451}
470 452
471/** 453/**
472 * tipc_l2_send_msg - send a TIPC packet out over an Ethernet interface 454 * tipc_l2_send_msg - send a TIPC packet out over an L2 interface
473 * @buf: the packet to be sent 455 * @buf: the packet to be sent
474 * @b_ptr: the bearer through which the packet is to be sent 456 * @b_ptr: the bearer through which the packet is to be sent
475 * @dest: peer destination address 457 * @dest: peer destination address
@@ -478,8 +460,12 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
478 struct tipc_media_addr *dest) 460 struct tipc_media_addr *dest)
479{ 461{
480 struct sk_buff *clone; 462 struct sk_buff *clone;
463 struct net_device *dev;
481 int delta; 464 int delta;
482 struct net_device *dev = (struct net_device *)b->media_ptr; 465
466 dev = (struct net_device *)rcu_dereference_rtnl(b->media_ptr);
467 if (!dev)
468 return 0;
483 469
484 clone = skb_clone(buf, GFP_ATOMIC); 470 clone = skb_clone(buf, GFP_ATOMIC);
485 if (!clone) 471 if (!clone)
@@ -507,10 +493,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
507 * The media send routine must not alter the buffer being passed in 493 * The media send routine must not alter the buffer being passed in
508 * as it may be needed for later retransmission! 494 * as it may be needed for later retransmission!
509 */ 495 */
510void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 496void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
511 struct tipc_media_addr *dest) 497 struct tipc_media_addr *dest)
512{ 498{
513 b->media->send_msg(buf, b, dest); 499 struct tipc_bearer *b_ptr;
500
501 rcu_read_lock();
502 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]);
503 if (likely(b_ptr))
504 b_ptr->media->send_msg(buf, b_ptr, dest);
505 rcu_read_unlock();
514} 506}
515 507
516/** 508/**
@@ -535,7 +527,7 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
535 } 527 }
536 528
537 rcu_read_lock(); 529 rcu_read_lock();
538 b_ptr = rcu_dereference(dev->tipc_ptr); 530 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
539 if (likely(b_ptr)) { 531 if (likely(b_ptr)) {
540 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 532 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
541 buf->next = NULL; 533 buf->next = NULL;
@@ -568,12 +560,9 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
568 if (!net_eq(dev_net(dev), &init_net)) 560 if (!net_eq(dev_net(dev), &init_net))
569 return NOTIFY_DONE; 561 return NOTIFY_DONE;
570 562
571 rcu_read_lock(); 563 b_ptr = rtnl_dereference(dev->tipc_ptr);
572 b_ptr = rcu_dereference(dev->tipc_ptr); 564 if (!b_ptr)
573 if (!b_ptr) {
574 rcu_read_unlock();
575 return NOTIFY_DONE; 565 return NOTIFY_DONE;
576 }
577 566
578 b_ptr->mtu = dev->mtu; 567 b_ptr->mtu = dev->mtu;
579 568
@@ -586,17 +575,15 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
586 tipc_reset_bearer(b_ptr); 575 tipc_reset_bearer(b_ptr);
587 break; 576 break;
588 case NETDEV_CHANGEADDR: 577 case NETDEV_CHANGEADDR:
589 tipc_l2_media_addr_set(b_ptr, &b_ptr->addr, 578 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
590 (char *)dev->dev_addr); 579 (char *)dev->dev_addr);
591 tipc_reset_bearer(b_ptr); 580 tipc_reset_bearer(b_ptr);
592 break; 581 break;
593 case NETDEV_UNREGISTER: 582 case NETDEV_UNREGISTER:
594 case NETDEV_CHANGENAME: 583 case NETDEV_CHANGENAME:
595 tipc_disable_bearer(b_ptr->name); 584 bearer_disable(b_ptr, false);
596 break; 585 break;
597 } 586 }
598 rcu_read_unlock();
599
600 return NOTIFY_OK; 587 return NOTIFY_OK;
601} 588}
602 589
@@ -633,7 +620,7 @@ void tipc_bearer_stop(void)
633 u32 i; 620 u32 i;
634 621
635 for (i = 0; i < MAX_BEARERS; i++) { 622 for (i = 0; i < MAX_BEARERS; i++) {
636 b_ptr = bearer_list[i]; 623 b_ptr = rtnl_dereference(bearer_list[i]);
637 if (b_ptr) { 624 if (b_ptr) {
638 bearer_disable(b_ptr, true); 625 bearer_disable(b_ptr, true);
639 bearer_list[i] = NULL; 626 bearer_list[i] = NULL;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index ba48145e871d..78fccc49de23 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -42,14 +42,12 @@
42#define MAX_BEARERS 2 42#define MAX_BEARERS 2
43#define MAX_MEDIA 2 43#define MAX_MEDIA 2
44 44
45/* 45/* Identifiers associated with TIPC message header media address info
46 * Identifiers associated with TIPC message header media address info 46 * - address info field is 32 bytes long
47 * 47 * - the field's actual content and length is defined per media
48 * - address info field is 20 bytes long 48 * - remaining unused bytes in the field are set to zero
49 * - media type identifier located at offset 3
50 * - remaining bytes vary according to media type
51 */ 49 */
52#define TIPC_MEDIA_ADDR_SIZE 20 50#define TIPC_MEDIA_ADDR_SIZE 32
53#define TIPC_MEDIA_TYPE_OFFSET 3 51#define TIPC_MEDIA_TYPE_OFFSET 3
54 52
55/* 53/*
@@ -77,9 +75,10 @@ struct tipc_bearer;
77 * @send_msg: routine which handles buffer transmission 75 * @send_msg: routine which handles buffer transmission
78 * @enable_media: routine which enables a media 76 * @enable_media: routine which enables a media
79 * @disable_media: routine which disables a media 77 * @disable_media: routine which disables a media
80 * @addr2str: routine which converts media address to string 78 * @addr2str: convert media address format to string
81 * @addr2msg: routine which converts media address to protocol message area 79 * @addr2msg: convert from media addr format to discovery msg addr format
82 * @msg2addr: routine which converts media address from protocol message area 80 * @msg2addr: convert from discovery msg addr format to media addr format
81 * @raw2addr: convert from raw addr format to media addr format
83 * @priority: default link (and bearer) priority 82 * @priority: default link (and bearer) priority
84 * @tolerance: default time (in ms) before declaring link failure 83 * @tolerance: default time (in ms) before declaring link failure
85 * @window: default window (in packets) before declaring link congestion 84 * @window: default window (in packets) before declaring link congestion
@@ -93,10 +92,16 @@ struct tipc_media {
93 struct tipc_media_addr *dest); 92 struct tipc_media_addr *dest);
94 int (*enable_media)(struct tipc_bearer *b_ptr); 93 int (*enable_media)(struct tipc_bearer *b_ptr);
95 void (*disable_media)(struct tipc_bearer *b_ptr); 94 void (*disable_media)(struct tipc_bearer *b_ptr);
96 int (*addr2str)(struct tipc_media_addr *a, char *str_buf, int str_size); 95 int (*addr2str)(struct tipc_media_addr *addr,
97 int (*addr2msg)(struct tipc_media_addr *a, char *msg_area); 96 char *strbuf,
98 int (*msg2addr)(const struct tipc_bearer *b_ptr, 97 int bufsz);
99 struct tipc_media_addr *a, char *msg_area); 98 int (*addr2msg)(char *msg, struct tipc_media_addr *addr);
99 int (*msg2addr)(struct tipc_bearer *b,
100 struct tipc_media_addr *addr,
101 char *msg);
102 int (*raw2addr)(struct tipc_bearer *b,
103 struct tipc_media_addr *addr,
104 char *raw);
100 u32 priority; 105 u32 priority;
101 u32 tolerance; 106 u32 tolerance;
102 u32 window; 107 u32 window;
@@ -113,6 +118,7 @@ struct tipc_media {
113 * @name: bearer name (format = media:interface) 118 * @name: bearer name (format = media:interface)
114 * @media: ptr to media structure associated with bearer 119 * @media: ptr to media structure associated with bearer
115 * @bcast_addr: media address used in broadcasting 120 * @bcast_addr: media address used in broadcasting
121 * @rcu: rcu struct for tipc_bearer
116 * @priority: default link priority for bearer 122 * @priority: default link priority for bearer
117 * @window: default window size for bearer 123 * @window: default window size for bearer
118 * @tolerance: default link tolerance for bearer 124 * @tolerance: default link tolerance for bearer
@@ -127,12 +133,13 @@ struct tipc_media {
127 * care of initializing all other fields. 133 * care of initializing all other fields.
128 */ 134 */
129struct tipc_bearer { 135struct tipc_bearer {
130 void *media_ptr; /* initalized by media */ 136 void __rcu *media_ptr; /* initalized by media */
131 u32 mtu; /* initalized by media */ 137 u32 mtu; /* initalized by media */
132 struct tipc_media_addr addr; /* initalized by media */ 138 struct tipc_media_addr addr; /* initalized by media */
133 char name[TIPC_MAX_BEARER_NAME]; 139 char name[TIPC_MAX_BEARER_NAME];
134 struct tipc_media *media; 140 struct tipc_media *media;
135 struct tipc_media_addr bcast_addr; 141 struct tipc_media_addr bcast_addr;
142 struct rcu_head rcu;
136 u32 priority; 143 u32 priority;
137 u32 window; 144 u32 window;
138 u32 tolerance; 145 u32 tolerance;
@@ -150,7 +157,7 @@ struct tipc_bearer_names {
150 157
151struct tipc_link; 158struct tipc_link;
152 159
153extern struct tipc_bearer *bearer_list[]; 160extern struct tipc_bearer __rcu *bearer_list[];
154 161
155/* 162/*
156 * TIPC routines available to supported media types 163 * TIPC routines available to supported media types
@@ -173,22 +180,20 @@ int tipc_media_set_priority(const char *name, u32 new_value);
173int tipc_media_set_window(const char *name, u32 new_value); 180int tipc_media_set_window(const char *name, u32 new_value);
174void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 181void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
175struct sk_buff *tipc_media_get_names(void); 182struct sk_buff *tipc_media_get_names(void);
176void tipc_l2_media_addr_set(const struct tipc_bearer *b,
177 struct tipc_media_addr *a, char *mac);
178int tipc_enable_l2_media(struct tipc_bearer *b); 183int tipc_enable_l2_media(struct tipc_bearer *b);
179void tipc_disable_l2_media(struct tipc_bearer *b); 184void tipc_disable_l2_media(struct tipc_bearer *b);
180int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, 185int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
181 struct tipc_media_addr *dest); 186 struct tipc_media_addr *dest);
182 187
183struct sk_buff *tipc_bearer_get_names(void); 188struct sk_buff *tipc_bearer_get_names(void);
184void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); 189void tipc_bearer_add_dest(u32 bearer_id, u32 dest);
185void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); 190void tipc_bearer_remove_dest(u32 bearer_id, u32 dest);
186struct tipc_bearer *tipc_bearer_find(const char *name); 191struct tipc_bearer *tipc_bearer_find(const char *name);
187struct tipc_media *tipc_media_find(const char *name); 192struct tipc_media *tipc_media_find(const char *name);
188int tipc_bearer_setup(void); 193int tipc_bearer_setup(void);
189void tipc_bearer_cleanup(void); 194void tipc_bearer_cleanup(void);
190void tipc_bearer_stop(void); 195void tipc_bearer_stop(void);
191void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf, 196void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf,
192 struct tipc_media_addr *dest); 197 struct tipc_media_addr *dest);
193 198
194#endif /* _TIPC_BEARER_H */ 199#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 4b981c053823..2b42403ad33a 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -42,8 +42,6 @@
42 42
43#define REPLY_TRUNCATED "<truncated>\n" 43#define REPLY_TRUNCATED "<truncated>\n"
44 44
45static DEFINE_MUTEX(config_mutex);
46
47static const void *req_tlv_area; /* request message TLV area */ 45static const void *req_tlv_area; /* request message TLV area */
48static int req_tlv_space; /* request message TLV area size */ 46static int req_tlv_space; /* request message TLV area size */
49static int rep_headroom; /* reply message headroom to use */ 47static int rep_headroom; /* reply message headroom to use */
@@ -179,8 +177,10 @@ static struct sk_buff *cfg_set_own_addr(void)
179 if (tipc_own_addr) 177 if (tipc_own_addr)
180 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 178 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
181 " (cannot change node address once assigned)"); 179 " (cannot change node address once assigned)");
182 tipc_net_start(addr); 180 if (!tipc_net_start(addr))
183 return tipc_cfg_reply_none(); 181 return tipc_cfg_reply_none();
182
183 return tipc_cfg_reply_error_string("cannot change to network mode");
184} 184}
185 185
186static struct sk_buff *cfg_set_max_ports(void) 186static struct sk_buff *cfg_set_max_ports(void)
@@ -223,7 +223,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
223{ 223{
224 struct sk_buff *rep_tlv_buf; 224 struct sk_buff *rep_tlv_buf;
225 225
226 mutex_lock(&config_mutex); 226 rtnl_lock();
227 227
228 /* Save request and reply details in a well-known location */ 228 /* Save request and reply details in a well-known location */
229 req_tlv_area = request_area; 229 req_tlv_area = request_area;
@@ -337,6 +337,6 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
337 337
338 /* Return reply buffer */ 338 /* Return reply buffer */
339exit: 339exit:
340 mutex_unlock(&config_mutex); 340 rtnl_unlock();
341 return rep_tlv_buf; 341 return rep_tlv_buf;
342} 342}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 50d57429ebca..676d18015dd8 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -80,7 +80,6 @@ struct sk_buff *tipc_buf_acquire(u32 size)
80 */ 80 */
81static void tipc_core_stop(void) 81static void tipc_core_stop(void)
82{ 82{
83 tipc_handler_stop();
84 tipc_net_stop(); 83 tipc_net_stop();
85 tipc_bearer_cleanup(); 84 tipc_bearer_cleanup();
86 tipc_netlink_stop(); 85 tipc_netlink_stop();
@@ -100,10 +99,6 @@ static int tipc_core_start(void)
100 99
101 get_random_bytes(&tipc_random, sizeof(tipc_random)); 100 get_random_bytes(&tipc_random, sizeof(tipc_random));
102 101
103 err = tipc_handler_start();
104 if (err)
105 goto out_handler;
106
107 err = tipc_ref_table_init(tipc_max_ports, tipc_random); 102 err = tipc_ref_table_init(tipc_max_ports, tipc_random);
108 if (err) 103 if (err)
109 goto out_reftbl; 104 goto out_reftbl;
@@ -146,8 +141,6 @@ out_netlink:
146out_nametbl: 141out_nametbl:
147 tipc_ref_table_stop(); 142 tipc_ref_table_stop();
148out_reftbl: 143out_reftbl:
149 tipc_handler_stop();
150out_handler:
151 return err; 144 return err;
152} 145}
153 146
@@ -161,10 +154,11 @@ static int __init tipc_init(void)
161 tipc_max_ports = CONFIG_TIPC_PORTS; 154 tipc_max_ports = CONFIG_TIPC_PORTS;
162 tipc_net_id = 4711; 155 tipc_net_id = 4711;
163 156
164 sysctl_tipc_rmem[0] = CONN_OVERLOAD_LIMIT >> 4 << TIPC_LOW_IMPORTANCE; 157 sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
165 sysctl_tipc_rmem[1] = CONN_OVERLOAD_LIMIT >> 4 << 158 TIPC_LOW_IMPORTANCE;
159 sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
166 TIPC_CRITICAL_IMPORTANCE; 160 TIPC_CRITICAL_IMPORTANCE;
167 sysctl_tipc_rmem[2] = CONN_OVERLOAD_LIMIT; 161 sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
168 162
169 res = tipc_core_start(); 163 res = tipc_core_start();
170 if (res) 164 if (res)
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 8985bbcb942b..bb26ed1ee966 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -56,7 +56,8 @@
56#include <linux/list.h> 56#include <linux/list.h>
57#include <linux/slab.h> 57#include <linux/slab.h>
58#include <linux/vmalloc.h> 58#include <linux/vmalloc.h>
59 59#include <linux/rtnetlink.h>
60#include <linux/etherdevice.h>
60 61
61#define TIPC_MOD_VER "2.0.0" 62#define TIPC_MOD_VER "2.0.0"
62 63
@@ -89,8 +90,6 @@ extern int tipc_random __read_mostly;
89/* 90/*
90 * Routines available to privileged subsystems 91 * Routines available to privileged subsystems
91 */ 92 */
92int tipc_handler_start(void);
93void tipc_handler_stop(void);
94int tipc_netlink_start(void); 93int tipc_netlink_start(void);
95void tipc_netlink_stop(void); 94void tipc_netlink_stop(void);
96int tipc_socket_init(void); 95int tipc_socket_init(void);
@@ -109,12 +108,10 @@ void tipc_unregister_sysctl(void);
109#endif 108#endif
110 109
111/* 110/*
112 * TIPC timer and signal code 111 * TIPC timer code
113 */ 112 */
114typedef void (*Handler) (unsigned long); 113typedef void (*Handler) (unsigned long);
115 114
116u32 tipc_k_signal(Handler routine, unsigned long argument);
117
118/** 115/**
119 * k_init_timer - initialize a timer 116 * k_init_timer - initialize a timer
120 * @timer: pointer to timer structure 117 * @timer: pointer to timer structure
@@ -191,6 +188,7 @@ static inline void k_term_timer(struct timer_list *timer)
191struct tipc_skb_cb { 188struct tipc_skb_cb {
192 void *handle; 189 void *handle;
193 bool deferred; 190 bool deferred;
191 struct sk_buff *tail;
194}; 192};
195 193
196#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 194#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 542fe3413dc4..aa722a42ef8b 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, Ericsson AB 4 * Copyright (c) 2003-2006, 2014, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -46,8 +46,9 @@
46 46
47/** 47/**
48 * struct tipc_link_req - information about an ongoing link setup request 48 * struct tipc_link_req - information about an ongoing link setup request
49 * @bearer: bearer issuing requests 49 * @bearer_id: identity of bearer issuing requests
50 * @dest: destination address for request messages 50 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established
51 * @num_nodes: number of nodes currently discovered (i.e. with an active link) 52 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
52 * @lock: spinlock for controlling access to requests 53 * @lock: spinlock for controlling access to requests
53 * @buf: request message to be (repeatedly) sent 54 * @buf: request message to be (repeatedly) sent
@@ -55,8 +56,9 @@
55 * @timer_intv: current interval between requests (in ms) 56 * @timer_intv: current interval between requests (in ms)
56 */ 57 */
57struct tipc_link_req { 58struct tipc_link_req {
58 struct tipc_bearer *bearer; 59 u32 bearer_id;
59 struct tipc_media_addr dest; 60 struct tipc_media_addr dest;
61 u32 domain;
60 int num_nodes; 62 int num_nodes;
61 spinlock_t lock; 63 spinlock_t lock;
62 struct sk_buff *buf; 64 struct sk_buff *buf;
@@ -69,22 +71,19 @@ struct tipc_link_req {
69 * @type: message type (request or response) 71 * @type: message type (request or response)
70 * @b_ptr: ptr to bearer issuing message 72 * @b_ptr: ptr to bearer issuing message
71 */ 73 */
72static struct sk_buff *tipc_disc_init_msg(u32 type, struct tipc_bearer *b_ptr) 74static void tipc_disc_init_msg(struct sk_buff *buf, u32 type,
75 struct tipc_bearer *b_ptr)
73{ 76{
74 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE);
75 struct tipc_msg *msg; 77 struct tipc_msg *msg;
76 u32 dest_domain = b_ptr->domain; 78 u32 dest_domain = b_ptr->domain;
77 79
78 if (buf) { 80 msg = buf_msg(buf);
79 msg = buf_msg(buf); 81 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain);
80 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 82 msg_set_non_seq(msg, 1);
81 msg_set_non_seq(msg, 1); 83 msg_set_node_sig(msg, tipc_random);
82 msg_set_node_sig(msg, tipc_random); 84 msg_set_dest_domain(msg, dest_domain);
83 msg_set_dest_domain(msg, dest_domain); 85 msg_set_bc_netid(msg, tipc_net_id);
84 msg_set_bc_netid(msg, tipc_net_id); 86 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
85 b_ptr->media->addr2msg(&b_ptr->addr, msg_media_addr(msg));
86 }
87 return buf;
88} 87}
89 88
90/** 89/**
@@ -107,146 +106,150 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
107} 106}
108 107
109/** 108/**
110 * tipc_disc_rcv - handle incoming link setup message (request or response) 109 * tipc_disc_rcv - handle incoming discovery message (request or response)
111 * @buf: buffer containing message 110 * @buf: buffer containing message
112 * @b_ptr: bearer that message arrived on 111 * @bearer: bearer that message arrived on
113 */ 112 */
114void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr) 113void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
115{ 114{
116 struct tipc_node *n_ptr; 115 struct tipc_node *node;
117 struct tipc_link *link; 116 struct tipc_link *link;
118 struct tipc_media_addr media_addr; 117 struct tipc_media_addr maddr;
119 struct sk_buff *rbuf; 118 struct sk_buff *rbuf;
120 struct tipc_msg *msg = buf_msg(buf); 119 struct tipc_msg *msg = buf_msg(buf);
121 u32 dest = msg_dest_domain(msg); 120 u32 ddom = msg_dest_domain(msg);
122 u32 orig = msg_prevnode(msg); 121 u32 onode = msg_prevnode(msg);
123 u32 net_id = msg_bc_netid(msg); 122 u32 net_id = msg_bc_netid(msg);
124 u32 type = msg_type(msg); 123 u32 mtyp = msg_type(msg);
125 u32 signature = msg_node_sig(msg); 124 u32 signature = msg_node_sig(msg);
126 int addr_mismatch; 125 bool addr_match = false;
127 int link_fully_up; 126 bool sign_match = false;
128 127 bool link_up = false;
129 media_addr.broadcast = 1; 128 bool accept_addr = false;
130 b_ptr->media->msg2addr(b_ptr, &media_addr, msg_media_addr(msg)); 129 bool accept_sign = false;
130 bool respond = false;
131
132 bearer->media->msg2addr(bearer, &maddr, msg_media_addr(msg));
131 kfree_skb(buf); 133 kfree_skb(buf);
132 134
133 /* Ensure message from node is valid and communication is permitted */ 135 /* Ensure message from node is valid and communication is permitted */
134 if (net_id != tipc_net_id) 136 if (net_id != tipc_net_id)
135 return; 137 return;
136 if (media_addr.broadcast) 138 if (maddr.broadcast)
137 return; 139 return;
138 if (!tipc_addr_domain_valid(dest)) 140 if (!tipc_addr_domain_valid(ddom))
139 return; 141 return;
140 if (!tipc_addr_node_valid(orig)) 142 if (!tipc_addr_node_valid(onode))
141 return; 143 return;
142 if (orig == tipc_own_addr) { 144
143 if (memcmp(&media_addr, &b_ptr->addr, sizeof(media_addr))) 145 if (in_own_node(onode)) {
144 disc_dupl_alert(b_ptr, tipc_own_addr, &media_addr); 146 if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
147 disc_dupl_alert(bearer, tipc_own_addr, &maddr);
145 return; 148 return;
146 } 149 }
147 if (!tipc_in_scope(dest, tipc_own_addr)) 150 if (!tipc_in_scope(ddom, tipc_own_addr))
148 return; 151 return;
149 if (!tipc_in_scope(b_ptr->domain, orig)) 152 if (!tipc_in_scope(bearer->domain, onode))
150 return; 153 return;
151 154
152 /* Locate structure corresponding to requesting node */ 155 /* Locate, or if necessary, create, node: */
153 n_ptr = tipc_node_find(orig); 156 node = tipc_node_find(onode);
154 if (!n_ptr) { 157 if (!node)
155 n_ptr = tipc_node_create(orig); 158 node = tipc_node_create(onode);
156 if (!n_ptr) 159 if (!node)
157 return; 160 return;
158 }
159 tipc_node_lock(n_ptr);
160 161
161 /* Prepare to validate requesting node's signature and media address */ 162 tipc_node_lock(node);
162 link = n_ptr->links[b_ptr->identity]; 163 link = node->links[bearer->identity];
163 addr_mismatch = (link != NULL) &&
164 memcmp(&link->media_addr, &media_addr, sizeof(media_addr));
165 164
166 /* 165 /* Prepare to validate requesting node's signature and media address */
167 * Ensure discovery message's signature is correct 166 sign_match = (signature == node->signature);
168 * 167 addr_match = link && !memcmp(&link->media_addr, &maddr, sizeof(maddr));
169 * If signature is incorrect and there is no working link to the node, 168 link_up = link && tipc_link_is_up(link);
170 * accept the new signature but invalidate all existing links to the 169
171 * node so they won't re-activate without a new discovery message. 170
172 * 171 /* These three flags give us eight permutations: */
173 * If signature is incorrect and the requested link to the node is 172
174 * working, accept the new signature. (This is an instance of delayed 173 if (sign_match && addr_match && link_up) {
175 * rediscovery, where a link endpoint was able to re-establish contact 174 /* All is fine. Do nothing. */
176 * with its peer endpoint on a node that rebooted before receiving a 175 } else if (sign_match && addr_match && !link_up) {
177 * discovery message from that node.) 176 /* Respond. The link will come up in due time */
178 * 177 respond = true;
179 * If signature is incorrect and there is a working link to the node 178 } else if (sign_match && !addr_match && link_up) {
180 * that is not the requested link, reject the request (must be from 179 /* Peer has changed i/f address without rebooting.
181 * a duplicate node). 180 * If so, the link will reset soon, and the next
182 */ 181 * discovery will be accepted. So we can ignore it.
183 if (signature != n_ptr->signature) { 182 * It may also be an cloned or malicious peer having
184 if (n_ptr->working_links == 0) { 183 * chosen the same node address and signature as an
185 struct tipc_link *curr_link; 184 * existing one.
186 int i; 185 * Ignore requests until the link goes down, if ever.
187 186 */
188 for (i = 0; i < MAX_BEARERS; i++) { 187 disc_dupl_alert(bearer, onode, &maddr);
189 curr_link = n_ptr->links[i]; 188 } else if (sign_match && !addr_match && !link_up) {
190 if (curr_link) { 189 /* Peer link has changed i/f address without rebooting.
191 memset(&curr_link->media_addr, 0, 190 * It may also be a cloned or malicious peer; we can't
192 sizeof(media_addr)); 191 * distinguish between the two.
193 tipc_link_reset(curr_link); 192 * The signature is correct, so we must accept.
194 } 193 */
195 } 194 accept_addr = true;
196 addr_mismatch = (link != NULL); 195 respond = true;
197 } else if (tipc_link_is_up(link) && !addr_mismatch) { 196 } else if (!sign_match && addr_match && link_up) {
198 /* delayed rediscovery */ 197 /* Peer node rebooted. Two possibilities:
199 } else { 198 * - Delayed re-discovery; this link endpoint has already
200 disc_dupl_alert(b_ptr, orig, &media_addr); 199 * reset and re-established contact with the peer, before
201 tipc_node_unlock(n_ptr); 200 * receiving a discovery message from that node.
202 return; 201 * (The peer happened to receive one from this node first).
203 } 202 * - The peer came back so fast that our side has not
204 n_ptr->signature = signature; 203 * discovered it yet. Probing from this side will soon
204 * reset the link, since there can be no working link
205 * endpoint at the peer end, and the link will re-establish.
206 * Accept the signature, since it comes from a known peer.
207 */
208 accept_sign = true;
209 } else if (!sign_match && addr_match && !link_up) {
210 /* The peer node has rebooted.
211 * Accept signature, since it is a known peer.
212 */
213 accept_sign = true;
214 respond = true;
215 } else if (!sign_match && !addr_match && link_up) {
216 /* Peer rebooted with new address, or a new/duplicate peer.
217 * Ignore until the link goes down, if ever.
218 */
219 disc_dupl_alert(bearer, onode, &maddr);
220 } else if (!sign_match && !addr_match && !link_up) {
221 /* Peer rebooted with new address, or it is a new peer.
222 * Accept signature and address.
223 */
224 accept_sign = true;
225 accept_addr = true;
226 respond = true;
205 } 227 }
206 228
207 /* 229 if (accept_sign)
208 * Ensure requesting node's media address is correct 230 node->signature = signature;
209 *
210 * If media address doesn't match and the link is working, reject the
211 * request (must be from a duplicate node).
212 *
213 * If media address doesn't match and the link is not working, accept
214 * the new media address and reset the link to ensure it starts up
215 * cleanly.
216 */
217 if (addr_mismatch) {
218 if (tipc_link_is_up(link)) {
219 disc_dupl_alert(b_ptr, orig, &media_addr);
220 tipc_node_unlock(n_ptr);
221 return;
222 } else {
223 memcpy(&link->media_addr, &media_addr,
224 sizeof(media_addr));
225 tipc_link_reset(link);
226 }
227 }
228 231
229 /* Create a link endpoint for this bearer, if necessary */ 232 if (accept_addr) {
230 if (!link) { 233 if (!link)
231 link = tipc_link_create(n_ptr, b_ptr, &media_addr); 234 link = tipc_link_create(node, bearer, &maddr);
232 if (!link) { 235 if (link) {
233 tipc_node_unlock(n_ptr); 236 memcpy(&link->media_addr, &maddr, sizeof(maddr));
234 return; 237 tipc_link_reset(link);
238 } else {
239 respond = false;
235 } 240 }
236 } 241 }
237 242
238 /* Accept discovery message & send response, if necessary */ 243 /* Send response, if necessary */
239 link_fully_up = link_working_working(link); 244 if (respond && (mtyp == DSC_REQ_MSG)) {
240 245 rbuf = tipc_buf_acquire(INT_H_SIZE);
241 if ((type == DSC_REQ_MSG) && !link_fully_up) {
242 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, b_ptr);
243 if (rbuf) { 246 if (rbuf) {
244 tipc_bearer_send(b_ptr, rbuf, &media_addr); 247 tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer);
248 tipc_bearer_send(bearer->identity, rbuf, &maddr);
245 kfree_skb(rbuf); 249 kfree_skb(rbuf);
246 } 250 }
247 } 251 }
248 252 tipc_node_unlock(node);
249 tipc_node_unlock(n_ptr);
250} 253}
251 254
252/** 255/**
@@ -303,7 +306,7 @@ static void disc_timeout(struct tipc_link_req *req)
303 spin_lock_bh(&req->lock); 306 spin_lock_bh(&req->lock);
304 307
305 /* Stop searching if only desired node has been found */ 308 /* Stop searching if only desired node has been found */
306 if (tipc_node(req->bearer->domain) && req->num_nodes) { 309 if (tipc_node(req->domain) && req->num_nodes) {
307 req->timer_intv = TIPC_LINK_REQ_INACTIVE; 310 req->timer_intv = TIPC_LINK_REQ_INACTIVE;
308 goto exit; 311 goto exit;
309 } 312 }
@@ -315,7 +318,7 @@ static void disc_timeout(struct tipc_link_req *req)
315 * hold at fast polling rate if don't have any associated nodes, 318 * hold at fast polling rate if don't have any associated nodes,
316 * otherwise hold at slow polling rate 319 * otherwise hold at slow polling rate
317 */ 320 */
318 tipc_bearer_send(req->bearer, req->buf, &req->dest); 321 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
319 322
320 323
321 req->timer_intv *= 2; 324 req->timer_intv *= 2;
@@ -347,21 +350,23 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
347 if (!req) 350 if (!req)
348 return -ENOMEM; 351 return -ENOMEM;
349 352
350 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, b_ptr); 353 req->buf = tipc_buf_acquire(INT_H_SIZE);
351 if (!req->buf) { 354 if (!req->buf) {
352 kfree(req); 355 kfree(req);
353 return -ENOMSG; 356 return -ENOMEM;
354 } 357 }
355 358
359 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
356 memcpy(&req->dest, dest, sizeof(*dest)); 360 memcpy(&req->dest, dest, sizeof(*dest));
357 req->bearer = b_ptr; 361 req->bearer_id = b_ptr->identity;
362 req->domain = b_ptr->domain;
358 req->num_nodes = 0; 363 req->num_nodes = 0;
359 req->timer_intv = TIPC_LINK_REQ_INIT; 364 req->timer_intv = TIPC_LINK_REQ_INIT;
360 spin_lock_init(&req->lock); 365 spin_lock_init(&req->lock);
361 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 366 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
362 k_start_timer(&req->timer, req->timer_intv); 367 k_start_timer(&req->timer, req->timer_intv);
363 b_ptr->link_req = req; 368 b_ptr->link_req = req;
364 tipc_bearer_send(req->bearer, req->buf, &req->dest); 369 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
365 return 0; 370 return 0;
366} 371}
367 372
@@ -376,3 +381,23 @@ void tipc_disc_delete(struct tipc_link_req *req)
376 kfree_skb(req->buf); 381 kfree_skb(req->buf);
377 kfree(req); 382 kfree(req);
378} 383}
384
385/**
386 * tipc_disc_reset - reset object to send periodic link setup requests
387 * @b_ptr: ptr to bearer issuing requests
388 * @dest_domain: network domain to which links can be established
389 */
390void tipc_disc_reset(struct tipc_bearer *b_ptr)
391{
392 struct tipc_link_req *req = b_ptr->link_req;
393
394 spin_lock_bh(&req->lock);
395 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr);
396 req->bearer_id = b_ptr->identity;
397 req->domain = b_ptr->domain;
398 req->num_nodes = 0;
399 req->timer_intv = TIPC_LINK_REQ_INIT;
400 k_start_timer(&req->timer, req->timer_intv);
401 tipc_bearer_send(req->bearer_id, req->buf, &req->dest);
402 spin_unlock_bh(&req->lock);
403}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 07f34729459d..515b57392f4d 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -41,6 +41,7 @@ struct tipc_link_req;
41 41
42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); 42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest);
43void tipc_disc_delete(struct tipc_link_req *req); 43void tipc_disc_delete(struct tipc_link_req *req);
44void tipc_disc_reset(struct tipc_bearer *b_ptr);
44void tipc_disc_add_dest(struct tipc_link_req *req); 45void tipc_disc_add_dest(struct tipc_link_req *req);
45void tipc_disc_remove_dest(struct tipc_link_req *req); 46void tipc_disc_remove_dest(struct tipc_link_req *req);
46void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr); 47void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr);
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
index 67cf3f935dba..5e1426f1751f 100644
--- a/net/tipc/eth_media.c
+++ b/net/tipc/eth_media.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC 2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 * 3 *
4 * Copyright (c) 2001-2007, 2013, Ericsson AB 4 * Copyright (c) 2001-2007, 2013-2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems 5 * Copyright (c) 2005-2008, 2011-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,39 +37,52 @@
37#include "core.h" 37#include "core.h"
38#include "bearer.h" 38#include "bearer.h"
39 39
40#define ETH_ADDR_OFFSET 4 /* message header offset of MAC address */ 40#define ETH_ADDR_OFFSET 4 /* MAC addr position inside address field */
41 41
42/* convert Ethernet address to string */ 42/* Convert Ethernet address (media address format) to string */
43static int tipc_eth_addr2str(struct tipc_media_addr *a, char *str_buf, 43static int tipc_eth_addr2str(struct tipc_media_addr *addr,
44 int str_size) 44 char *strbuf, int bufsz)
45{ 45{
46 if (str_size < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */ 46 if (bufsz < 18) /* 18 = strlen("aa:bb:cc:dd:ee:ff\0") */
47 return 1; 47 return 1;
48 48
49 sprintf(str_buf, "%pM", a->value); 49 sprintf(strbuf, "%pM", addr->value);
50 return 0; 50 return 0;
51} 51}
52 52
53/* convert Ethernet address format to message header format */ 53/* Convert from media address format to discovery message addr format */
54static int tipc_eth_addr2msg(struct tipc_media_addr *a, char *msg_area) 54static int tipc_eth_addr2msg(char *msg, struct tipc_media_addr *addr)
55{ 55{
56 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 56 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
57 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH; 57 msg[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_ETH;
58 memcpy(msg_area + ETH_ADDR_OFFSET, a->value, ETH_ALEN); 58 memcpy(msg + ETH_ADDR_OFFSET, addr->value, ETH_ALEN);
59 return 0; 59 return 0;
60} 60}
61 61
62/* convert message header address format to Ethernet format */ 62/* Convert raw mac address format to media addr format */
63static int tipc_eth_msg2addr(const struct tipc_bearer *tb_ptr, 63static int tipc_eth_raw2addr(struct tipc_bearer *b,
64 struct tipc_media_addr *a, char *msg_area) 64 struct tipc_media_addr *addr,
65 char *msg)
65{ 66{
66 if (msg_area[TIPC_MEDIA_TYPE_OFFSET] != TIPC_MEDIA_TYPE_ETH) 67 char bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
67 return 1;
68 68
69 tipc_l2_media_addr_set(tb_ptr, a, msg_area + ETH_ADDR_OFFSET); 69 memset(addr, 0, sizeof(*addr));
70 ether_addr_copy(addr->value, msg);
71 addr->media_id = TIPC_MEDIA_TYPE_ETH;
72 addr->broadcast = !memcmp(addr->value, bcast_mac, ETH_ALEN);
70 return 0; 73 return 0;
71} 74}
72 75
76/* Convert discovery msg addr format to Ethernet media addr format */
77static int tipc_eth_msg2addr(struct tipc_bearer *b,
78 struct tipc_media_addr *addr,
79 char *msg)
80{
81 /* Skip past preamble: */
82 msg += ETH_ADDR_OFFSET;
83 return tipc_eth_raw2addr(b, addr, msg);
84}
85
73/* Ethernet media registration info */ 86/* Ethernet media registration info */
74struct tipc_media eth_media_info = { 87struct tipc_media eth_media_info = {
75 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -78,6 +91,7 @@ struct tipc_media eth_media_info = {
78 .addr2str = tipc_eth_addr2str, 91 .addr2str = tipc_eth_addr2str,
79 .addr2msg = tipc_eth_addr2msg, 92 .addr2msg = tipc_eth_addr2msg,
80 .msg2addr = tipc_eth_msg2addr, 93 .msg2addr = tipc_eth_msg2addr,
94 .raw2addr = tipc_eth_raw2addr,
81 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
82 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
83 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -85,4 +99,3 @@ struct tipc_media eth_media_info = {
85 .hwaddr_len = ETH_ALEN, 99 .hwaddr_len = ETH_ALEN,
86 .name = "eth" 100 .name = "eth"
87}; 101};
88
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
deleted file mode 100644
index 1fabf160501f..000000000000
--- a/net/tipc/handler.c
+++ /dev/null
@@ -1,134 +0,0 @@
1/*
2 * net/tipc/handler.c: TIPC signal handling
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static struct kmem_cache *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static DEFINE_SPINLOCK(qitem_lock);
48static int handler_enabled __read_mostly;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 spin_lock_bh(&qitem_lock);
60 if (!handler_enabled) {
61 spin_unlock_bh(&qitem_lock);
62 return -ENOPROTOOPT;
63 }
64
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 pr_err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int tipc_handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void tipc_handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 spin_lock_bh(&qitem_lock);
116 if (!handler_enabled) {
117 spin_unlock_bh(&qitem_lock);
118 return;
119 }
120 handler_enabled = 0;
121 spin_unlock_bh(&qitem_lock);
122
123 tasklet_kill(&tipc_tasklet);
124
125 spin_lock_bh(&qitem_lock);
126 list_for_each_safe(l, n, &signal_queue_head) {
127 item = list_entry(l, struct queue_item, next_signal);
128 list_del(&item->next_signal);
129 kmem_cache_free(tipc_queue_item_cache, item);
130 }
131 spin_unlock_bh(&qitem_lock);
132
133 kmem_cache_destroy(tipc_queue_item_cache);
134}
diff --git a/net/tipc/ib_media.c b/net/tipc/ib_media.c
index 844a77e25828..8522eef9c136 100644
--- a/net/tipc/ib_media.c
+++ b/net/tipc/ib_media.c
@@ -42,7 +42,7 @@
42#include "core.h" 42#include "core.h"
43#include "bearer.h" 43#include "bearer.h"
44 44
45/* convert InfiniBand address to string */ 45/* convert InfiniBand address (media address format) media address to string */
46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf, 46static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
47 int str_size) 47 int str_size)
48{ 48{
@@ -54,23 +54,35 @@ static int tipc_ib_addr2str(struct tipc_media_addr *a, char *str_buf,
54 return 0; 54 return 0;
55} 55}
56 56
57/* convert InfiniBand address format to message header format */ 57/* Convert from media address format to discovery message addr format */
58static int tipc_ib_addr2msg(struct tipc_media_addr *a, char *msg_area) 58static int tipc_ib_addr2msg(char *msg, struct tipc_media_addr *addr)
59{ 59{
60 memset(msg_area, 0, TIPC_MEDIA_ADDR_SIZE); 60 memset(msg, 0, TIPC_MEDIA_ADDR_SIZE);
61 msg_area[TIPC_MEDIA_TYPE_OFFSET] = TIPC_MEDIA_TYPE_IB; 61 memcpy(msg, addr->value, INFINIBAND_ALEN);
62 memcpy(msg_area, a->value, INFINIBAND_ALEN);
63 return 0; 62 return 0;
64} 63}
65 64
66/* convert message header address format to InfiniBand format */ 65/* Convert raw InfiniBand address format to media addr format */
67static int tipc_ib_msg2addr(const struct tipc_bearer *tb_ptr, 66static int tipc_ib_raw2addr(struct tipc_bearer *b,
68 struct tipc_media_addr *a, char *msg_area) 67 struct tipc_media_addr *addr,
68 char *msg)
69{ 69{
70 tipc_l2_media_addr_set(tb_ptr, a, msg_area); 70 memset(addr, 0, sizeof(*addr));
71 memcpy(addr->value, msg, INFINIBAND_ALEN);
72 addr->media_id = TIPC_MEDIA_TYPE_IB;
73 addr->broadcast = !memcmp(msg, b->bcast_addr.value,
74 INFINIBAND_ALEN);
71 return 0; 75 return 0;
72} 76}
73 77
78/* Convert discovery msg addr format to InfiniBand media addr format */
79static int tipc_ib_msg2addr(struct tipc_bearer *b,
80 struct tipc_media_addr *addr,
81 char *msg)
82{
83 return tipc_ib_raw2addr(b, addr, msg);
84}
85
74/* InfiniBand media registration info */ 86/* InfiniBand media registration info */
75struct tipc_media ib_media_info = { 87struct tipc_media ib_media_info = {
76 .send_msg = tipc_l2_send_msg, 88 .send_msg = tipc_l2_send_msg,
@@ -79,6 +91,7 @@ struct tipc_media ib_media_info = {
79 .addr2str = tipc_ib_addr2str, 91 .addr2str = tipc_ib_addr2str,
80 .addr2msg = tipc_ib_addr2msg, 92 .addr2msg = tipc_ib_addr2msg,
81 .msg2addr = tipc_ib_msg2addr, 93 .msg2addr = tipc_ib_msg2addr,
94 .raw2addr = tipc_ib_raw2addr,
82 .priority = TIPC_DEF_LINK_PRI, 95 .priority = TIPC_DEF_LINK_PRI,
83 .tolerance = TIPC_DEF_LINK_TOL, 96 .tolerance = TIPC_DEF_LINK_TOL,
84 .window = TIPC_DEF_LINK_WIN, 97 .window = TIPC_DEF_LINK_WIN,
@@ -86,4 +99,3 @@ struct tipc_media ib_media_info = {
86 .hwaddr_len = INFINIBAND_ALEN, 99 .hwaddr_len = INFINIBAND_ALEN,
87 .name = "ib" 100 .name = "ib"
88}; 101};
89
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c5190ab75290..ad2c57f5868d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -37,6 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "link.h" 38#include "link.h"
39#include "port.h" 39#include "port.h"
40#include "socket.h"
40#include "name_distr.h" 41#include "name_distr.h"
41#include "discover.h" 42#include "discover.h"
42#include "config.h" 43#include "config.h"
@@ -101,9 +102,18 @@ static unsigned int align(unsigned int i)
101 102
102static void link_init_max_pkt(struct tipc_link *l_ptr) 103static void link_init_max_pkt(struct tipc_link *l_ptr)
103{ 104{
105 struct tipc_bearer *b_ptr;
104 u32 max_pkt; 106 u32 max_pkt;
105 107
106 max_pkt = (l_ptr->b_ptr->mtu & ~3); 108 rcu_read_lock();
109 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
110 if (!b_ptr) {
111 rcu_read_unlock();
112 return;
113 }
114 max_pkt = (b_ptr->mtu & ~3);
115 rcu_read_unlock();
116
107 if (max_pkt > MAX_MSG_SIZE) 117 if (max_pkt > MAX_MSG_SIZE)
108 max_pkt = MAX_MSG_SIZE; 118 max_pkt = MAX_MSG_SIZE;
109 119
@@ -248,7 +258,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
248 l_ptr->owner = n_ptr; 258 l_ptr->owner = n_ptr;
249 l_ptr->checkpoint = 1; 259 l_ptr->checkpoint = 1;
250 l_ptr->peer_session = INVALID_SESSION; 260 l_ptr->peer_session = INVALID_SESSION;
251 l_ptr->b_ptr = b_ptr; 261 l_ptr->bearer_id = b_ptr->identity;
252 link_set_supervision_props(l_ptr, b_ptr->tolerance); 262 link_set_supervision_props(l_ptr, b_ptr->tolerance);
253 l_ptr->state = RESET_UNKNOWN; 263 l_ptr->state = RESET_UNKNOWN;
254 264
@@ -263,6 +273,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
263 l_ptr->priority = b_ptr->priority; 273 l_ptr->priority = b_ptr->priority;
264 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 274 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
265 275
276 l_ptr->net_plane = b_ptr->net_plane;
266 link_init_max_pkt(l_ptr); 277 link_init_max_pkt(l_ptr);
267 278
268 l_ptr->next_out_no = 1; 279 l_ptr->next_out_no = 1;
@@ -287,14 +298,14 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
287 298
288 rcu_read_lock(); 299 rcu_read_lock();
289 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 300 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
290 spin_lock_bh(&n_ptr->lock); 301 tipc_node_lock(n_ptr);
291 l_ptr = n_ptr->links[bearer_id]; 302 l_ptr = n_ptr->links[bearer_id];
292 if (l_ptr) { 303 if (l_ptr) {
293 tipc_link_reset(l_ptr); 304 tipc_link_reset(l_ptr);
294 if (shutting_down || !tipc_node_is_up(n_ptr)) { 305 if (shutting_down || !tipc_node_is_up(n_ptr)) {
295 tipc_node_detach_link(l_ptr->owner, l_ptr); 306 tipc_node_detach_link(l_ptr->owner, l_ptr);
296 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
297 spin_unlock_bh(&n_ptr->lock); 308 tipc_node_unlock(n_ptr);
298 309
299 /* Nobody else can access this link now: */ 310 /* Nobody else can access this link now: */
300 del_timer_sync(&l_ptr->timer); 311 del_timer_sync(&l_ptr->timer);
@@ -302,12 +313,12 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
302 } else { 313 } else {
303 /* Detach/delete when failover is finished: */ 314 /* Detach/delete when failover is finished: */
304 l_ptr->flags |= LINK_STOPPED; 315 l_ptr->flags |= LINK_STOPPED;
305 spin_unlock_bh(&n_ptr->lock); 316 tipc_node_unlock(n_ptr);
306 del_timer_sync(&l_ptr->timer); 317 del_timer_sync(&l_ptr->timer);
307 } 318 }
308 continue; 319 continue;
309 } 320 }
310 spin_unlock_bh(&n_ptr->lock); 321 tipc_node_unlock(n_ptr);
311 } 322 }
312 rcu_read_unlock(); 323 rcu_read_unlock();
313} 324}
@@ -388,9 +399,8 @@ static void link_release_outqueue(struct tipc_link *l_ptr)
388 */ 399 */
389void tipc_link_reset_fragments(struct tipc_link *l_ptr) 400void tipc_link_reset_fragments(struct tipc_link *l_ptr)
390{ 401{
391 kfree_skb(l_ptr->reasm_head); 402 kfree_skb(l_ptr->reasm_buf);
392 l_ptr->reasm_head = NULL; 403 l_ptr->reasm_buf = NULL;
393 l_ptr->reasm_tail = NULL;
394} 404}
395 405
396/** 406/**
@@ -426,7 +436,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
426 return; 436 return;
427 437
428 tipc_node_link_down(l_ptr->owner, l_ptr); 438 tipc_node_link_down(l_ptr->owner, l_ptr);
429 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr); 439 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr);
430 440
431 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 441 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
432 l_ptr->reset_checkpoint = checkpoint; 442 l_ptr->reset_checkpoint = checkpoint;
@@ -464,11 +474,11 @@ void tipc_link_reset_list(unsigned int bearer_id)
464 474
465 rcu_read_lock(); 475 rcu_read_lock();
466 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 476 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
467 spin_lock_bh(&n_ptr->lock); 477 tipc_node_lock(n_ptr);
468 l_ptr = n_ptr->links[bearer_id]; 478 l_ptr = n_ptr->links[bearer_id];
469 if (l_ptr) 479 if (l_ptr)
470 tipc_link_reset(l_ptr); 480 tipc_link_reset(l_ptr);
471 spin_unlock_bh(&n_ptr->lock); 481 tipc_node_unlock(n_ptr);
472 } 482 }
473 rcu_read_unlock(); 483 rcu_read_unlock();
474} 484}
@@ -477,7 +487,7 @@ static void link_activate(struct tipc_link *l_ptr)
477{ 487{
478 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 488 l_ptr->next_in_no = l_ptr->stats.recv_info = 1;
479 tipc_node_link_up(l_ptr->owner, l_ptr); 489 tipc_node_link_up(l_ptr->owner, l_ptr);
480 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr); 490 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr);
481} 491}
482 492
483/** 493/**
@@ -777,7 +787,7 @@ int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
777 if (likely(!link_congested(l_ptr))) { 787 if (likely(!link_congested(l_ptr))) {
778 link_add_to_outqueue(l_ptr, buf, msg); 788 link_add_to_outqueue(l_ptr, buf, msg);
779 789
780 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 790 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
781 l_ptr->unacked_window = 0; 791 l_ptr->unacked_window = 0;
782 return dsz; 792 return dsz;
783 } 793 }
@@ -825,7 +835,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
825 struct tipc_node *n_ptr; 835 struct tipc_node *n_ptr;
826 int res = -ELINKCONG; 836 int res = -ELINKCONG;
827 837
828 read_lock_bh(&tipc_net_lock);
829 n_ptr = tipc_node_find(dest); 838 n_ptr = tipc_node_find(dest);
830 if (n_ptr) { 839 if (n_ptr) {
831 tipc_node_lock(n_ptr); 840 tipc_node_lock(n_ptr);
@@ -838,7 +847,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
838 } else { 847 } else {
839 kfree_skb(buf); 848 kfree_skb(buf);
840 } 849 }
841 read_unlock_bh(&tipc_net_lock);
842 return res; 850 return res;
843} 851}
844 852
@@ -902,7 +910,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
902 if (list_empty(message_list)) 910 if (list_empty(message_list))
903 return; 911 return;
904 912
905 read_lock_bh(&tipc_net_lock);
906 n_ptr = tipc_node_find(dest); 913 n_ptr = tipc_node_find(dest);
907 if (n_ptr) { 914 if (n_ptr) {
908 tipc_node_lock(n_ptr); 915 tipc_node_lock(n_ptr);
@@ -917,7 +924,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
917 } 924 }
918 tipc_node_unlock(n_ptr); 925 tipc_node_unlock(n_ptr);
919 } 926 }
920 read_unlock_bh(&tipc_net_lock);
921 927
922 /* discard the messages if they couldn't be sent */ 928 /* discard the messages if they couldn't be sent */
923 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 929 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
@@ -941,7 +947,7 @@ static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
941 if (likely(!link_congested(l_ptr))) { 947 if (likely(!link_congested(l_ptr))) {
942 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 948 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
943 link_add_to_outqueue(l_ptr, buf, msg); 949 link_add_to_outqueue(l_ptr, buf, msg);
944 tipc_bearer_send(l_ptr->b_ptr, buf, 950 tipc_bearer_send(l_ptr->bearer_id, buf,
945 &l_ptr->media_addr); 951 &l_ptr->media_addr);
946 l_ptr->unacked_window = 0; 952 l_ptr->unacked_window = 0;
947 return res; 953 return res;
@@ -979,7 +985,6 @@ again:
979 if (unlikely(res < 0)) 985 if (unlikely(res < 0))
980 return res; 986 return res;
981 987
982 read_lock_bh(&tipc_net_lock);
983 node = tipc_node_find(destaddr); 988 node = tipc_node_find(destaddr);
984 if (likely(node)) { 989 if (likely(node)) {
985 tipc_node_lock(node); 990 tipc_node_lock(node);
@@ -990,7 +995,6 @@ again:
990 &sender->max_pkt); 995 &sender->max_pkt);
991exit: 996exit:
992 tipc_node_unlock(node); 997 tipc_node_unlock(node);
993 read_unlock_bh(&tipc_net_lock);
994 return res; 998 return res;
995 } 999 }
996 1000
@@ -1007,7 +1011,6 @@ exit:
1007 */ 1011 */
1008 sender->max_pkt = l_ptr->max_pkt; 1012 sender->max_pkt = l_ptr->max_pkt;
1009 tipc_node_unlock(node); 1013 tipc_node_unlock(node);
1010 read_unlock_bh(&tipc_net_lock);
1011 1014
1012 1015
1013 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1016 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1018,7 +1021,6 @@ exit:
1018 } 1021 }
1019 tipc_node_unlock(node); 1022 tipc_node_unlock(node);
1020 } 1023 }
1021 read_unlock_bh(&tipc_net_lock);
1022 1024
1023 /* Couldn't find a link to the destination node */ 1025 /* Couldn't find a link to the destination node */
1024 kfree_skb(buf); 1026 kfree_skb(buf);
@@ -1204,7 +1206,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1204 if (r_q_size && buf) { 1206 if (r_q_size && buf) {
1205 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1207 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1206 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1208 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1207 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1209 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1208 l_ptr->retransm_queue_head = mod(++r_q_head); 1210 l_ptr->retransm_queue_head = mod(++r_q_head);
1209 l_ptr->retransm_queue_size = --r_q_size; 1211 l_ptr->retransm_queue_size = --r_q_size;
1210 l_ptr->stats.retransmitted++; 1212 l_ptr->stats.retransmitted++;
@@ -1216,7 +1218,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1216 if (buf) { 1218 if (buf) {
1217 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1219 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1218 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1220 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1219 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1221 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1220 l_ptr->unacked_window = 0; 1222 l_ptr->unacked_window = 0;
1221 kfree_skb(buf); 1223 kfree_skb(buf);
1222 l_ptr->proto_msg_queue = NULL; 1224 l_ptr->proto_msg_queue = NULL;
@@ -1233,7 +1235,8 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1233 if (mod(next - first) < l_ptr->queue_limit[0]) { 1235 if (mod(next - first) < l_ptr->queue_limit[0]) {
1234 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1236 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1235 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1237 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1236 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1238 tipc_bearer_send(l_ptr->bearer_id, buf,
1239 &l_ptr->media_addr);
1237 if (msg_user(msg) == MSG_BUNDLER) 1240 if (msg_user(msg) == MSG_BUNDLER)
1238 msg_set_type(msg, CLOSED_MSG); 1241 msg_set_type(msg, CLOSED_MSG);
1239 l_ptr->next_out = buf->next; 1242 l_ptr->next_out = buf->next;
@@ -1256,33 +1259,24 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
1256 } while (!res); 1259 } while (!res);
1257} 1260}
1258 1261
1259static void link_reset_all(unsigned long addr) 1262void tipc_link_reset_all(struct tipc_node *node)
1260{ 1263{
1261 struct tipc_node *n_ptr;
1262 char addr_string[16]; 1264 char addr_string[16];
1263 u32 i; 1265 u32 i;
1264 1266
1265 read_lock_bh(&tipc_net_lock); 1267 tipc_node_lock(node);
1266 n_ptr = tipc_node_find((u32)addr);
1267 if (!n_ptr) {
1268 read_unlock_bh(&tipc_net_lock);
1269 return; /* node no longer exists */
1270 }
1271
1272 tipc_node_lock(n_ptr);
1273 1268
1274 pr_warn("Resetting all links to %s\n", 1269 pr_warn("Resetting all links to %s\n",
1275 tipc_addr_string_fill(addr_string, n_ptr->addr)); 1270 tipc_addr_string_fill(addr_string, node->addr));
1276 1271
1277 for (i = 0; i < MAX_BEARERS; i++) { 1272 for (i = 0; i < MAX_BEARERS; i++) {
1278 if (n_ptr->links[i]) { 1273 if (node->links[i]) {
1279 link_print(n_ptr->links[i], "Resetting link\n"); 1274 link_print(node->links[i], "Resetting link\n");
1280 tipc_link_reset(n_ptr->links[i]); 1275 tipc_link_reset(node->links[i]);
1281 } 1276 }
1282 } 1277 }
1283 1278
1284 tipc_node_unlock(n_ptr); 1279 tipc_node_unlock(node);
1285 read_unlock_bh(&tipc_net_lock);
1286} 1280}
1287 1281
1288static void link_retransmit_failure(struct tipc_link *l_ptr, 1282static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1319,10 +1313,9 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
1319 n_ptr->bclink.oos_state, 1313 n_ptr->bclink.oos_state,
1320 n_ptr->bclink.last_sent); 1314 n_ptr->bclink.last_sent);
1321 1315
1322 tipc_k_signal((Handler)link_reset_all, (unsigned long)n_ptr->addr);
1323
1324 tipc_node_unlock(n_ptr); 1316 tipc_node_unlock(n_ptr);
1325 1317
1318 tipc_bclink_set_flags(TIPC_BCLINK_RESET);
1326 l_ptr->stale_count = 0; 1319 l_ptr->stale_count = 0;
1327 } 1320 }
1328} 1321}
@@ -1352,7 +1345,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1352 msg = buf_msg(buf); 1345 msg = buf_msg(buf);
1353 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1346 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1354 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1347 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1355 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1348 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1356 buf = buf->next; 1349 buf = buf->next;
1357 retransmits--; 1350 retransmits--;
1358 l_ptr->stats.retransmitted++; 1351 l_ptr->stats.retransmitted++;
@@ -1440,14 +1433,13 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1440/** 1433/**
1441 * tipc_rcv - process TIPC packets/messages arriving from off-node 1434 * tipc_rcv - process TIPC packets/messages arriving from off-node
1442 * @head: pointer to message buffer chain 1435 * @head: pointer to message buffer chain
1443 * @tb_ptr: pointer to bearer message arrived on 1436 * @b_ptr: pointer to bearer message arrived on
1444 * 1437 *
1445 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1438 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1446 * structure (i.e. cannot be NULL), but bearer can be inactive. 1439 * structure (i.e. cannot be NULL), but bearer can be inactive.
1447 */ 1440 */
1448void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1441void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1449{ 1442{
1450 read_lock_bh(&tipc_net_lock);
1451 while (head) { 1443 while (head) {
1452 struct tipc_node *n_ptr; 1444 struct tipc_node *n_ptr;
1453 struct tipc_link *l_ptr; 1445 struct tipc_link *l_ptr;
@@ -1497,14 +1489,14 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1497 goto unlock_discard; 1489 goto unlock_discard;
1498 1490
1499 /* Verify that communication with node is currently allowed */ 1491 /* Verify that communication with node is currently allowed */
1500 if ((n_ptr->block_setup & WAIT_PEER_DOWN) && 1492 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
1501 msg_user(msg) == LINK_PROTOCOL && 1493 msg_user(msg) == LINK_PROTOCOL &&
1502 (msg_type(msg) == RESET_MSG || 1494 (msg_type(msg) == RESET_MSG ||
1503 msg_type(msg) == ACTIVATE_MSG) && 1495 msg_type(msg) == ACTIVATE_MSG) &&
1504 !msg_redundant_link(msg)) 1496 !msg_redundant_link(msg))
1505 n_ptr->block_setup &= ~WAIT_PEER_DOWN; 1497 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1506 1498
1507 if (n_ptr->block_setup) 1499 if (tipc_node_blocked(n_ptr))
1508 goto unlock_discard; 1500 goto unlock_discard;
1509 1501
1510 /* Validate message sequence number info */ 1502 /* Validate message sequence number info */
@@ -1581,17 +1573,12 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1581 } 1573 }
1582 msg = buf_msg(buf); 1574 msg = buf_msg(buf);
1583 } else if (msg_user(msg) == MSG_FRAGMENTER) { 1575 } else if (msg_user(msg) == MSG_FRAGMENTER) {
1584 int rc;
1585
1586 l_ptr->stats.recv_fragments++; 1576 l_ptr->stats.recv_fragments++;
1587 rc = tipc_link_frag_rcv(&l_ptr->reasm_head, 1577 if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
1588 &l_ptr->reasm_tail,
1589 &buf);
1590 if (rc == LINK_REASM_COMPLETE) {
1591 l_ptr->stats.recv_fragmented++; 1578 l_ptr->stats.recv_fragmented++;
1592 msg = buf_msg(buf); 1579 msg = buf_msg(buf);
1593 } else { 1580 } else {
1594 if (rc == LINK_REASM_ERROR) 1581 if (!l_ptr->reasm_buf)
1595 tipc_link_reset(l_ptr); 1582 tipc_link_reset(l_ptr);
1596 tipc_node_unlock(n_ptr); 1583 tipc_node_unlock(n_ptr);
1597 continue; 1584 continue;
@@ -1604,7 +1591,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1604 case TIPC_HIGH_IMPORTANCE: 1591 case TIPC_HIGH_IMPORTANCE:
1605 case TIPC_CRITICAL_IMPORTANCE: 1592 case TIPC_CRITICAL_IMPORTANCE:
1606 tipc_node_unlock(n_ptr); 1593 tipc_node_unlock(n_ptr);
1607 tipc_port_rcv(buf); 1594 tipc_sk_rcv(buf);
1608 continue; 1595 continue;
1609 case MSG_BUNDLER: 1596 case MSG_BUNDLER:
1610 l_ptr->stats.recv_bundles++; 1597 l_ptr->stats.recv_bundles++;
@@ -1635,7 +1622,6 @@ unlock_discard:
1635discard: 1622discard:
1636 kfree_skb(buf); 1623 kfree_skb(buf);
1637 } 1624 }
1638 read_unlock_bh(&tipc_net_lock);
1639} 1625}
1640 1626
1641/** 1627/**
@@ -1747,12 +1733,12 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1747 return; 1733 return;
1748 1734
1749 /* Abort non-RESET send if communication with node is prohibited */ 1735 /* Abort non-RESET send if communication with node is prohibited */
1750 if ((l_ptr->owner->block_setup) && (msg_typ != RESET_MSG)) 1736 if ((tipc_node_blocked(l_ptr->owner)) && (msg_typ != RESET_MSG))
1751 return; 1737 return;
1752 1738
1753 /* Create protocol message with "out-of-sequence" sequence number */ 1739 /* Create protocol message with "out-of-sequence" sequence number */
1754 msg_set_type(msg, msg_typ); 1740 msg_set_type(msg, msg_typ);
1755 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane); 1741 msg_set_net_plane(msg, l_ptr->net_plane);
1756 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1742 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1757 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1743 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1758 1744
@@ -1818,7 +1804,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1818 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1804 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1819 buf->priority = TC_PRIO_CONTROL; 1805 buf->priority = TC_PRIO_CONTROL;
1820 1806
1821 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr); 1807 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
1822 l_ptr->unacked_window = 0; 1808 l_ptr->unacked_window = 0;
1823 kfree_skb(buf); 1809 kfree_skb(buf);
1824} 1810}
@@ -1840,12 +1826,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1840 if (l_ptr->exp_msg_count) 1826 if (l_ptr->exp_msg_count)
1841 goto exit; 1827 goto exit;
1842 1828
1843 /* record unnumbered packet arrival (force mismatch on next timeout) */ 1829 if (l_ptr->net_plane != msg_net_plane(msg))
1844 l_ptr->checkpoint--;
1845
1846 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
1847 if (tipc_own_addr > msg_prevnode(msg)) 1830 if (tipc_own_addr > msg_prevnode(msg))
1848 l_ptr->b_ptr->net_plane = msg_net_plane(msg); 1831 l_ptr->net_plane = msg_net_plane(msg);
1849 1832
1850 switch (msg_type(msg)) { 1833 switch (msg_type(msg)) {
1851 1834
@@ -1862,7 +1845,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1862 * peer has lost contact -- don't allow peer's links 1845 * peer has lost contact -- don't allow peer's links
1863 * to reactivate before we recognize loss & clean up 1846 * to reactivate before we recognize loss & clean up
1864 */ 1847 */
1865 l_ptr->owner->block_setup = WAIT_NODE_DOWN; 1848 l_ptr->owner->action_flags |= TIPC_WAIT_OWN_LINKS_DOWN;
1866 } 1849 }
1867 1850
1868 link_state_event(l_ptr, RESET_MSG); 1851 link_state_event(l_ptr, RESET_MSG);
@@ -1918,6 +1901,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1918 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1901 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1919 break; 1902 break;
1920 } 1903 }
1904
1905 /* Record reception; force mismatch at next timeout: */
1906 l_ptr->checkpoint--;
1907
1921 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1908 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1922 l_ptr->stats.recv_states++; 1909 l_ptr->stats.recv_states++;
1923 if (link_reset_unknown(l_ptr)) 1910 if (link_reset_unknown(l_ptr))
@@ -2177,9 +2164,7 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
2177 } 2164 }
2178 if (msg_user(msg) == MSG_FRAGMENTER) { 2165 if (msg_user(msg) == MSG_FRAGMENTER) {
2179 l_ptr->stats.recv_fragments++; 2166 l_ptr->stats.recv_fragments++;
2180 tipc_link_frag_rcv(&l_ptr->reasm_head, 2167 tipc_buf_append(&l_ptr->reasm_buf, &buf);
2181 &l_ptr->reasm_tail,
2182 &buf);
2183 } 2168 }
2184 } 2169 }
2185exit: 2170exit:
@@ -2317,53 +2302,6 @@ static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
2317 return dsz; 2302 return dsz;
2318} 2303}
2319 2304
2320/* tipc_link_frag_rcv(): Called with node lock on. Returns
2321 * the reassembled buffer if message is complete.
2322 */
2323int tipc_link_frag_rcv(struct sk_buff **head, struct sk_buff **tail,
2324 struct sk_buff **fbuf)
2325{
2326 struct sk_buff *frag = *fbuf;
2327 struct tipc_msg *msg = buf_msg(frag);
2328 u32 fragid = msg_type(msg);
2329 bool headstolen;
2330 int delta;
2331
2332 skb_pull(frag, msg_hdr_sz(msg));
2333 if (fragid == FIRST_FRAGMENT) {
2334 if (*head || skb_unclone(frag, GFP_ATOMIC))
2335 goto out_free;
2336 *head = frag;
2337 skb_frag_list_init(*head);
2338 *fbuf = NULL;
2339 return 0;
2340 } else if (*head &&
2341 skb_try_coalesce(*head, frag, &headstolen, &delta)) {
2342 kfree_skb_partial(frag, headstolen);
2343 } else {
2344 if (!*head)
2345 goto out_free;
2346 if (!skb_has_frag_list(*head))
2347 skb_shinfo(*head)->frag_list = frag;
2348 else
2349 (*tail)->next = frag;
2350 *tail = frag;
2351 (*head)->truesize += frag->truesize;
2352 }
2353 if (fragid == LAST_FRAGMENT) {
2354 *fbuf = *head;
2355 *tail = *head = NULL;
2356 return LINK_REASM_COMPLETE;
2357 }
2358 *fbuf = NULL;
2359 return 0;
2360out_free:
2361 pr_warn_ratelimited("Link unable to reassemble fragmented message\n");
2362 kfree_skb(*fbuf);
2363 *fbuf = NULL;
2364 return LINK_REASM_ERROR;
2365}
2366
2367static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 2305static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
2368{ 2306{
2369 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL)) 2307 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
@@ -2397,8 +2335,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2397/* tipc_link_find_owner - locate owner node of link by link's name 2335/* tipc_link_find_owner - locate owner node of link by link's name
2398 * @name: pointer to link name string 2336 * @name: pointer to link name string
2399 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2337 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2400 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2401 * this also prevents link deletion.
2402 * 2338 *
2403 * Returns pointer to node owning the link, or 0 if no matching link is found. 2339 * Returns pointer to node owning the link, or 0 if no matching link is found.
2404 */ 2340 */
@@ -2460,7 +2396,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2460 * @new_value: new value of link, bearer, or media setting 2396 * @new_value: new value of link, bearer, or media setting
2461 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2397 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2462 * 2398 *
2463 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2399 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2464 * 2400 *
2465 * Returns 0 if value updated and negative value on error. 2401 * Returns 0 if value updated and negative value on error.
2466 */ 2402 */
@@ -2566,9 +2502,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2566 " (cannot change setting on broadcast link)"); 2502 " (cannot change setting on broadcast link)");
2567 } 2503 }
2568 2504
2569 read_lock_bh(&tipc_net_lock);
2570 res = link_cmd_set_value(args->name, new_value, cmd); 2505 res = link_cmd_set_value(args->name, new_value, cmd);
2571 read_unlock_bh(&tipc_net_lock);
2572 if (res) 2506 if (res)
2573 return tipc_cfg_reply_error_string("cannot change link setting"); 2507 return tipc_cfg_reply_error_string("cannot change link setting");
2574 2508
@@ -2602,22 +2536,18 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2602 return tipc_cfg_reply_error_string("link not found"); 2536 return tipc_cfg_reply_error_string("link not found");
2603 return tipc_cfg_reply_none(); 2537 return tipc_cfg_reply_none();
2604 } 2538 }
2605 read_lock_bh(&tipc_net_lock);
2606 node = tipc_link_find_owner(link_name, &bearer_id); 2539 node = tipc_link_find_owner(link_name, &bearer_id);
2607 if (!node) { 2540 if (!node)
2608 read_unlock_bh(&tipc_net_lock);
2609 return tipc_cfg_reply_error_string("link not found"); 2541 return tipc_cfg_reply_error_string("link not found");
2610 } 2542
2611 tipc_node_lock(node); 2543 tipc_node_lock(node);
2612 l_ptr = node->links[bearer_id]; 2544 l_ptr = node->links[bearer_id];
2613 if (!l_ptr) { 2545 if (!l_ptr) {
2614 tipc_node_unlock(node); 2546 tipc_node_unlock(node);
2615 read_unlock_bh(&tipc_net_lock);
2616 return tipc_cfg_reply_error_string("link not found"); 2547 return tipc_cfg_reply_error_string("link not found");
2617 } 2548 }
2618 link_reset_statistics(l_ptr); 2549 link_reset_statistics(l_ptr);
2619 tipc_node_unlock(node); 2550 tipc_node_unlock(node);
2620 read_unlock_bh(&tipc_net_lock);
2621 return tipc_cfg_reply_none(); 2551 return tipc_cfg_reply_none();
2622} 2552}
2623 2553
@@ -2650,18 +2580,15 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2650 if (!strcmp(name, tipc_bclink_name)) 2580 if (!strcmp(name, tipc_bclink_name))
2651 return tipc_bclink_stats(buf, buf_size); 2581 return tipc_bclink_stats(buf, buf_size);
2652 2582
2653 read_lock_bh(&tipc_net_lock);
2654 node = tipc_link_find_owner(name, &bearer_id); 2583 node = tipc_link_find_owner(name, &bearer_id);
2655 if (!node) { 2584 if (!node)
2656 read_unlock_bh(&tipc_net_lock);
2657 return 0; 2585 return 0;
2658 } 2586
2659 tipc_node_lock(node); 2587 tipc_node_lock(node);
2660 2588
2661 l = node->links[bearer_id]; 2589 l = node->links[bearer_id];
2662 if (!l) { 2590 if (!l) {
2663 tipc_node_unlock(node); 2591 tipc_node_unlock(node);
2664 read_unlock_bh(&tipc_net_lock);
2665 return 0; 2592 return 0;
2666 } 2593 }
2667 2594
@@ -2727,7 +2654,6 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2727 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2654 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2728 2655
2729 tipc_node_unlock(node); 2656 tipc_node_unlock(node);
2730 read_unlock_bh(&tipc_net_lock);
2731 return ret; 2657 return ret;
2732} 2658}
2733 2659
@@ -2778,7 +2704,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2778 if (dest == tipc_own_addr) 2704 if (dest == tipc_own_addr)
2779 return MAX_MSG_SIZE; 2705 return MAX_MSG_SIZE;
2780 2706
2781 read_lock_bh(&tipc_net_lock);
2782 n_ptr = tipc_node_find(dest); 2707 n_ptr = tipc_node_find(dest);
2783 if (n_ptr) { 2708 if (n_ptr) {
2784 tipc_node_lock(n_ptr); 2709 tipc_node_lock(n_ptr);
@@ -2787,13 +2712,18 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2787 res = l_ptr->max_pkt; 2712 res = l_ptr->max_pkt;
2788 tipc_node_unlock(n_ptr); 2713 tipc_node_unlock(n_ptr);
2789 } 2714 }
2790 read_unlock_bh(&tipc_net_lock);
2791 return res; 2715 return res;
2792} 2716}
2793 2717
2794static void link_print(struct tipc_link *l_ptr, const char *str) 2718static void link_print(struct tipc_link *l_ptr, const char *str)
2795{ 2719{
2796 pr_info("%s Link %x<%s>:", str, l_ptr->addr, l_ptr->b_ptr->name); 2720 struct tipc_bearer *b_ptr;
2721
2722 rcu_read_lock();
2723 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]);
2724 if (b_ptr)
2725 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2726 rcu_read_unlock();
2797 2727
2798 if (link_working_unknown(l_ptr)) 2728 if (link_working_unknown(l_ptr))
2799 pr_cont(":WU\n"); 2729 pr_cont(":WU\n");
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 8c0b49b5b2ee..200d518b218e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,11 +40,6 @@
40#include "msg.h" 40#include "msg.h"
41#include "node.h" 41#include "node.h"
42 42
43/* Link reassembly status codes
44 */
45#define LINK_REASM_ERROR -1
46#define LINK_REASM_COMPLETE 1
47
48/* Out-of-range value for link sequence numbers 43/* Out-of-range value for link sequence numbers
49 */ 44 */
50#define INVALID_LINK_SEQ 0x10000 45#define INVALID_LINK_SEQ 0x10000
@@ -107,7 +102,7 @@ struct tipc_stats {
107 * @checkpoint: reference point for triggering link continuity checking 102 * @checkpoint: reference point for triggering link continuity checking
108 * @peer_session: link session # being used by peer end of link 103 * @peer_session: link session # being used by peer end of link
109 * @peer_bearer_id: bearer id used by link's peer endpoint 104 * @peer_bearer_id: bearer id used by link's peer endpoint
110 * @b_ptr: pointer to bearer used by link 105 * @bearer_id: local bearer id used by link
111 * @tolerance: minimum link continuity loss needed to reset link [in ms] 106 * @tolerance: minimum link continuity loss needed to reset link [in ms]
112 * @continuity_interval: link continuity testing interval [in ms] 107 * @continuity_interval: link continuity testing interval [in ms]
113 * @abort_limit: # of unacknowledged continuity probes needed to reset link 108 * @abort_limit: # of unacknowledged continuity probes needed to reset link
@@ -116,6 +111,7 @@ struct tipc_stats {
116 * @proto_msg: template for control messages generated by link 111 * @proto_msg: template for control messages generated by link
117 * @pmsg: convenience pointer to "proto_msg" field 112 * @pmsg: convenience pointer to "proto_msg" field
118 * @priority: current link priority 113 * @priority: current link priority
114 * @net_plane: current link network plane ('A' through 'H')
119 * @queue_limit: outbound message queue congestion thresholds (indexed by user) 115 * @queue_limit: outbound message queue congestion thresholds (indexed by user)
120 * @exp_msg_count: # of tunnelled messages expected during link changeover 116 * @exp_msg_count: # of tunnelled messages expected during link changeover
121 * @reset_checkpoint: seq # of last acknowledged message at time of link reset 117 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
@@ -139,8 +135,7 @@ struct tipc_stats {
139 * @next_out: ptr to first unsent outbound message in queue 135 * @next_out: ptr to first unsent outbound message in queue
140 * @waiting_ports: linked list of ports waiting for link congestion to abate 136 * @waiting_ports: linked list of ports waiting for link congestion to abate
141 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 137 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
142 * @reasm_head: list head of partially reassembled inbound message fragments 138 * @reasm_buf: head of partially reassembled inbound message fragments
143 * @reasm_tail: last fragment received
144 * @stats: collects statistics regarding link activity 139 * @stats: collects statistics regarding link activity
145 */ 140 */
146struct tipc_link { 141struct tipc_link {
@@ -155,7 +150,7 @@ struct tipc_link {
155 u32 checkpoint; 150 u32 checkpoint;
156 u32 peer_session; 151 u32 peer_session;
157 u32 peer_bearer_id; 152 u32 peer_bearer_id;
158 struct tipc_bearer *b_ptr; 153 u32 bearer_id;
159 u32 tolerance; 154 u32 tolerance;
160 u32 continuity_interval; 155 u32 continuity_interval;
161 u32 abort_limit; 156 u32 abort_limit;
@@ -167,6 +162,7 @@ struct tipc_link {
167 } proto_msg; 162 } proto_msg;
168 struct tipc_msg *pmsg; 163 struct tipc_msg *pmsg;
169 u32 priority; 164 u32 priority;
165 char net_plane;
170 u32 queue_limit[15]; /* queue_limit[0]==window limit */ 166 u32 queue_limit[15]; /* queue_limit[0]==window limit */
171 167
172 /* Changeover */ 168 /* Changeover */
@@ -202,8 +198,7 @@ struct tipc_link {
202 198
203 /* Fragmentation/reassembly */ 199 /* Fragmentation/reassembly */
204 u32 long_msg_seq_no; 200 u32 long_msg_seq_no;
205 struct sk_buff *reasm_head; 201 struct sk_buff *reasm_buf;
206 struct sk_buff *reasm_tail;
207 202
208 /* Statistics */ 203 /* Statistics */
209 struct tipc_stats stats; 204 struct tipc_stats stats;
@@ -228,6 +223,7 @@ struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
228 int req_tlv_space); 223 int req_tlv_space);
229struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, 224struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
230 int req_tlv_space); 225 int req_tlv_space);
226void tipc_link_reset_all(struct tipc_node *node);
231void tipc_link_reset(struct tipc_link *l_ptr); 227void tipc_link_reset(struct tipc_link *l_ptr);
232void tipc_link_reset_list(unsigned int bearer_id); 228void tipc_link_reset_list(unsigned int bearer_id);
233int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); 229int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
@@ -239,9 +235,6 @@ int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
239 struct iovec const *msg_sect, 235 struct iovec const *msg_sect,
240 unsigned int len, u32 destnode); 236 unsigned int len, u32 destnode);
241void tipc_link_bundle_rcv(struct sk_buff *buf); 237void tipc_link_bundle_rcv(struct sk_buff *buf);
242int tipc_link_frag_rcv(struct sk_buff **reasm_head,
243 struct sk_buff **reasm_tail,
244 struct sk_buff **fbuf);
245void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 238void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
246 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 239 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
247void tipc_link_push_queue(struct tipc_link *l_ptr); 240void tipc_link_push_queue(struct tipc_link *l_ptr);
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index e525f8ce1dee..8be6e94a1ca9 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.c: TIPC message header routines 2 * net/tipc/msg.c: TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -99,3 +99,56 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
99 } 99 }
100 return dsz; 100 return dsz;
101} 101}
102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
106 * Leaves headbuf pointer at NULL if failure
107 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{
110 struct sk_buff *head = *headbuf;
111 struct sk_buff *frag = *buf;
112 struct sk_buff *tail;
113 struct tipc_msg *msg = buf_msg(frag);
114 u32 fragid = msg_type(msg);
115 bool headstolen;
116 int delta;
117
118 skb_pull(frag, msg_hdr_sz(msg));
119
120 if (fragid == FIRST_FRAGMENT) {
121 if (head || skb_unclone(frag, GFP_ATOMIC))
122 goto out_free;
123 head = *headbuf = frag;
124 skb_frag_list_init(head);
125 return 0;
126 }
127 if (!head)
128 goto out_free;
129 tail = TIPC_SKB_CB(head)->tail;
130 if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
131 kfree_skb_partial(frag, headstolen);
132 } else {
133 if (!skb_has_frag_list(head))
134 skb_shinfo(head)->frag_list = frag;
135 else
136 tail->next = frag;
137 head->truesize += frag->truesize;
138 head->data_len += frag->len;
139 head->len += frag->len;
140 TIPC_SKB_CB(head)->tail = frag;
141 }
142 if (fragid == LAST_FRAGMENT) {
143 *buf = head;
144 TIPC_SKB_CB(head)->tail = NULL;
145 *headbuf = NULL;
146 return 1;
147 }
148 *buf = NULL;
149 return 0;
150out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf);
153 return 0;
154}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 76d1269b9443..503511903d1d 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines 2 * net/tipc/msg.h: Include file for TIPC message header routines
3 * 3 *
4 * Copyright (c) 2000-2007, Ericsson AB 4 * Copyright (c) 2000-2007, 2014, Ericsson AB
5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2008, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -711,4 +711,7 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
711 u32 destnode); 711 u32 destnode);
712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, 712int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
713 unsigned int len, int max_size, struct sk_buff **buf); 713 unsigned int len, int max_size, struct sk_buff **buf);
714
715int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
716
714#endif 717#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index aff8041dc157..8ce730984aa1 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -38,34 +38,6 @@
38#include "link.h" 38#include "link.h"
39#include "name_distr.h" 39#include "name_distr.h"
40 40
41#define ITEM_SIZE sizeof(struct distr_item)
42
43/**
44 * struct distr_item - publication info distributed to other nodes
45 * @type: name sequence type
46 * @lower: name sequence lower bound
47 * @upper: name sequence upper bound
48 * @ref: publishing port reference
49 * @key: publication key
50 *
51 * ===> All fields are stored in network byte order. <===
52 *
53 * First 3 fields identify (name or) name sequence being published.
54 * Reference field uniquely identifies port that published name sequence.
55 * Key field uniquely identifies publication, in the event a port has
56 * multiple publications of the same name sequence.
57 *
58 * Note: There is no field that identifies the publishing node because it is
59 * the same for all items contained within a publication message.
60 */
61struct distr_item {
62 __be32 type;
63 __be32 lower;
64 __be32 upper;
65 __be32 ref;
66 __be32 key;
67};
68
69/** 41/**
70 * struct publ_list - list of publications made by this node 42 * struct publ_list - list of publications made by this node
71 * @list: circular list of publications 43 * @list: circular list of publications
@@ -127,7 +99,7 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
127 return buf; 99 return buf;
128} 100}
129 101
130static void named_cluster_distribute(struct sk_buff *buf) 102void named_cluster_distribute(struct sk_buff *buf)
131{ 103{
132 struct sk_buff *buf_copy; 104 struct sk_buff *buf_copy;
133 struct tipc_node *n_ptr; 105 struct tipc_node *n_ptr;
@@ -135,18 +107,18 @@ static void named_cluster_distribute(struct sk_buff *buf)
135 107
136 rcu_read_lock(); 108 rcu_read_lock();
137 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 109 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
138 spin_lock_bh(&n_ptr->lock); 110 tipc_node_lock(n_ptr);
139 l_ptr = n_ptr->active_links[n_ptr->addr & 1]; 111 l_ptr = n_ptr->active_links[n_ptr->addr & 1];
140 if (l_ptr) { 112 if (l_ptr) {
141 buf_copy = skb_copy(buf, GFP_ATOMIC); 113 buf_copy = skb_copy(buf, GFP_ATOMIC);
142 if (!buf_copy) { 114 if (!buf_copy) {
143 spin_unlock_bh(&n_ptr->lock); 115 tipc_node_unlock(n_ptr);
144 break; 116 break;
145 } 117 }
146 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr); 118 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
147 __tipc_link_xmit(l_ptr, buf_copy); 119 __tipc_link_xmit(l_ptr, buf_copy);
148 } 120 }
149 spin_unlock_bh(&n_ptr->lock); 121 tipc_node_unlock(n_ptr);
150 } 122 }
151 rcu_read_unlock(); 123 rcu_read_unlock();
152 124
@@ -156,7 +128,7 @@ static void named_cluster_distribute(struct sk_buff *buf)
156/** 128/**
157 * tipc_named_publish - tell other nodes about a new publication by this node 129 * tipc_named_publish - tell other nodes about a new publication by this node
158 */ 130 */
159void tipc_named_publish(struct publication *publ) 131struct sk_buff *tipc_named_publish(struct publication *publ)
160{ 132{
161 struct sk_buff *buf; 133 struct sk_buff *buf;
162 struct distr_item *item; 134 struct distr_item *item;
@@ -165,23 +137,23 @@ void tipc_named_publish(struct publication *publ)
165 publ_lists[publ->scope]->size++; 137 publ_lists[publ->scope]->size++;
166 138
167 if (publ->scope == TIPC_NODE_SCOPE) 139 if (publ->scope == TIPC_NODE_SCOPE)
168 return; 140 return NULL;
169 141
170 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 142 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
171 if (!buf) { 143 if (!buf) {
172 pr_warn("Publication distribution failure\n"); 144 pr_warn("Publication distribution failure\n");
173 return; 145 return NULL;
174 } 146 }
175 147
176 item = (struct distr_item *)msg_data(buf_msg(buf)); 148 item = (struct distr_item *)msg_data(buf_msg(buf));
177 publ_to_item(item, publ); 149 publ_to_item(item, publ);
178 named_cluster_distribute(buf); 150 return buf;
179} 151}
180 152
181/** 153/**
182 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 154 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
183 */ 155 */
184void tipc_named_withdraw(struct publication *publ) 156struct sk_buff *tipc_named_withdraw(struct publication *publ)
185{ 157{
186 struct sk_buff *buf; 158 struct sk_buff *buf;
187 struct distr_item *item; 159 struct distr_item *item;
@@ -190,17 +162,17 @@ void tipc_named_withdraw(struct publication *publ)
190 publ_lists[publ->scope]->size--; 162 publ_lists[publ->scope]->size--;
191 163
192 if (publ->scope == TIPC_NODE_SCOPE) 164 if (publ->scope == TIPC_NODE_SCOPE)
193 return; 165 return NULL;
194 166
195 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 167 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
196 if (!buf) { 168 if (!buf) {
197 pr_warn("Withdrawal distribution failure\n"); 169 pr_warn("Withdrawal distribution failure\n");
198 return; 170 return NULL;
199 } 171 }
200 172
201 item = (struct distr_item *)msg_data(buf_msg(buf)); 173 item = (struct distr_item *)msg_data(buf_msg(buf));
202 publ_to_item(item, publ); 174 publ_to_item(item, publ);
203 named_cluster_distribute(buf); 175 return buf;
204} 176}
205 177
206/* 178/*
@@ -239,31 +211,9 @@ static void named_distribute(struct list_head *message_list, u32 node,
239/** 211/**
240 * tipc_named_node_up - tell specified node about all publications by this node 212 * tipc_named_node_up - tell specified node about all publications by this node
241 */ 213 */
242void tipc_named_node_up(unsigned long nodearg) 214void tipc_named_node_up(u32 max_item_buf, u32 node)
243{ 215{
244 struct tipc_node *n_ptr; 216 LIST_HEAD(message_list);
245 struct tipc_link *l_ptr;
246 struct list_head message_list;
247 u32 node = (u32)nodearg;
248 u32 max_item_buf = 0;
249
250 /* compute maximum amount of publication data to send per message */
251 read_lock_bh(&tipc_net_lock);
252 n_ptr = tipc_node_find(node);
253 if (n_ptr) {
254 tipc_node_lock(n_ptr);
255 l_ptr = n_ptr->active_links[0];
256 if (l_ptr)
257 max_item_buf = ((l_ptr->max_pkt - INT_H_SIZE) /
258 ITEM_SIZE) * ITEM_SIZE;
259 tipc_node_unlock(n_ptr);
260 }
261 read_unlock_bh(&tipc_net_lock);
262 if (!max_item_buf)
263 return;
264
265 /* create list of publication messages, then send them as a unit */
266 INIT_LIST_HEAD(&message_list);
267 217
268 read_lock_bh(&tipc_nametbl_lock); 218 read_lock_bh(&tipc_nametbl_lock);
269 named_distribute(&message_list, node, &publ_cluster, max_item_buf); 219 named_distribute(&message_list, node, &publ_cluster, max_item_buf);
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 9b312ccfd43e..b2eed4ec1526 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -39,9 +39,38 @@
39 39
40#include "name_table.h" 40#include "name_table.h"
41 41
42void tipc_named_publish(struct publication *publ); 42#define ITEM_SIZE sizeof(struct distr_item)
43void tipc_named_withdraw(struct publication *publ); 43
44void tipc_named_node_up(unsigned long node); 44/**
45 * struct distr_item - publication info distributed to other nodes
46 * @type: name sequence type
47 * @lower: name sequence lower bound
48 * @upper: name sequence upper bound
49 * @ref: publishing port reference
50 * @key: publication key
51 *
52 * ===> All fields are stored in network byte order. <===
53 *
54 * First 3 fields identify (name or) name sequence being published.
55 * Reference field uniquely identifies port that published name sequence.
56 * Key field uniquely identifies publication, in the event a port has
57 * multiple publications of the same name sequence.
58 *
59 * Note: There is no field that identifies the publishing node because it is
60 * the same for all items contained within a publication message.
61 */
62struct distr_item {
63 __be32 type;
64 __be32 lower;
65 __be32 upper;
66 __be32 ref;
67 __be32 key;
68};
69
70struct sk_buff *tipc_named_publish(struct publication *publ);
71struct sk_buff *tipc_named_withdraw(struct publication *publ);
72void named_cluster_distribute(struct sk_buff *buf);
73void tipc_named_node_up(u32 max_item_buf, u32 node);
45void tipc_named_rcv(struct sk_buff *buf); 74void tipc_named_rcv(struct sk_buff *buf);
46void tipc_named_reinit(void); 75void tipc_named_reinit(void);
47 76
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 042e8e3cabc0..9d7d37d95187 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -664,6 +664,7 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
664 u32 scope, u32 port_ref, u32 key) 664 u32 scope, u32 port_ref, u32 key)
665{ 665{
666 struct publication *publ; 666 struct publication *publ;
667 struct sk_buff *buf = NULL;
667 668
668 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) { 669 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) {
669 pr_warn("Publication failed, local publication limit reached (%u)\n", 670 pr_warn("Publication failed, local publication limit reached (%u)\n",
@@ -676,9 +677,12 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
676 tipc_own_addr, port_ref, key); 677 tipc_own_addr, port_ref, key);
677 if (likely(publ)) { 678 if (likely(publ)) {
678 table.local_publ_count++; 679 table.local_publ_count++;
679 tipc_named_publish(publ); 680 buf = tipc_named_publish(publ);
680 } 681 }
681 write_unlock_bh(&tipc_nametbl_lock); 682 write_unlock_bh(&tipc_nametbl_lock);
683
684 if (buf)
685 named_cluster_distribute(buf);
682 return publ; 686 return publ;
683} 687}
684 688
@@ -688,15 +692,19 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
688int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 692int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
689{ 693{
690 struct publication *publ; 694 struct publication *publ;
695 struct sk_buff *buf;
691 696
692 write_lock_bh(&tipc_nametbl_lock); 697 write_lock_bh(&tipc_nametbl_lock);
693 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 698 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
694 if (likely(publ)) { 699 if (likely(publ)) {
695 table.local_publ_count--; 700 table.local_publ_count--;
696 tipc_named_withdraw(publ); 701 buf = tipc_named_withdraw(publ);
697 write_unlock_bh(&tipc_nametbl_lock); 702 write_unlock_bh(&tipc_nametbl_lock);
698 list_del_init(&publ->pport_list); 703 list_del_init(&publ->pport_list);
699 kfree(publ); 704 kfree(publ);
705
706 if (buf)
707 named_cluster_distribute(buf);
700 return 1; 708 return 1;
701 } 709 }
702 write_unlock_bh(&tipc_nametbl_lock); 710 write_unlock_bh(&tipc_nametbl_lock);
@@ -961,6 +969,7 @@ static void tipc_purge_publications(struct name_seq *seq)
961 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { 969 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
962 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, 970 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
963 publ->ref, publ->key); 971 publ->ref, publ->key);
972 kfree(publ);
964 } 973 }
965} 974}
966 975
@@ -982,7 +991,6 @@ void tipc_nametbl_stop(void)
982 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { 991 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
983 tipc_purge_publications(seq); 992 tipc_purge_publications(seq);
984 } 993 }
985 continue;
986 } 994 }
987 kfree(table.types); 995 kfree(table.types);
988 table.types = NULL; 996 table.types = NULL;
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4c564eb69e1a..f64375e7f99f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -39,45 +39,41 @@
39#include "name_distr.h" 39#include "name_distr.h"
40#include "subscr.h" 40#include "subscr.h"
41#include "port.h" 41#include "port.h"
42#include "socket.h"
42#include "node.h" 43#include "node.h"
43#include "config.h" 44#include "config.h"
44 45
45/* 46/*
46 * The TIPC locking policy is designed to ensure a very fine locking 47 * The TIPC locking policy is designed to ensure a very fine locking
47 * granularity, permitting complete parallel access to individual 48 * granularity, permitting complete parallel access to individual
48 * port and node/link instances. The code consists of three major 49 * port and node/link instances. The code consists of four major
49 * locking domains, each protected with their own disjunct set of locks. 50 * locking domains, each protected with their own disjunct set of locks.
50 * 51 *
51 * 1: The routing hierarchy. 52 * 1: The bearer level.
52 * Comprises the structures 'zone', 'cluster', 'node', 'link' 53 * RTNL lock is used to serialize the process of configuring bearer
53 * and 'bearer'. The whole hierarchy is protected by a big 54 * on update side, and RCU lock is applied on read side to make
54 * read/write lock, tipc_net_lock, to enssure that nothing is added 55 * bearer instance valid on both paths of message transmission and
55 * or removed while code is accessing any of these structures. 56 * reception.
56 * This layer must not be called from the two others while they
57 * hold any of their own locks.
58 * Neither must it itself do any upcalls to the other two before
59 * it has released tipc_net_lock and other protective locks.
60 * 57 *
61 * Within the tipc_net_lock domain there are two sub-domains;'node' and 58 * 2: The node and link level.
62 * 'bearer', where local write operations are permitted, 59 * All node instances are saved into two tipc_node_list and node_htable
63 * provided that those are protected by individual spin_locks 60 * lists. The two lists are protected by node_list_lock on write side,
64 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 61 * and they are guarded with RCU lock on read side. Especially node
65 * is permitted to poke around in both the node itself and its 62 * instance is destroyed only when TIPC module is removed, and we can
66 * subordinate links. I.e, it can update link counters and queues, 63 * confirm that there has no any user who is accessing the node at the
67 * change link state, send protocol messages, and alter the 64 * moment. Therefore, Except for iterating the two lists within RCU
68 * "active_links" array in the node; but it can _not_ remove a link 65 * protection, it's no needed to hold RCU that we access node instance
69 * or a node from the overall structure. 66 * in other places.
70 * Correspondingly, individual bearers may change status within a
71 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
72 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
73 * 67 *
68 * In addition, all members in node structure including link instances
69 * are protected by node spin lock.
74 * 70 *
75 * 2: The transport level of the protocol. 71 * 3: The transport level of the protocol.
76 * This consists of the structures port, (and its user level 72 * This consists of the structures port, (and its user level
77 * representations, such as user_port and tipc_sock), reference and 73 * representations, such as user_port and tipc_sock), reference and
78 * tipc_user (port.c, reg.c, socket.c). 74 * tipc_user (port.c, reg.c, socket.c).
79 * 75 *
80 * This layer has four different locks: 76 * This layer has four different locks:
81 * - The tipc_port spin_lock. This is protecting each port instance 77 * - The tipc_port spin_lock. This is protecting each port instance
82 * from parallel data access and removal. Since we can not place 78 * from parallel data access and removal. Since we can not place
83 * this lock in the port itself, it has been placed in the 79 * this lock in the port itself, it has been placed in the
@@ -96,7 +92,7 @@
96 * There are two such lists; 'port_list', which is used for management, 92 * There are two such lists; 'port_list', which is used for management,
97 * and 'wait_list', which is used to queue ports during congestion. 93 * and 'wait_list', which is used to queue ports during congestion.
98 * 94 *
99 * 3: The name table (name_table.c, name_distr.c, subscription.c) 95 * 4: The name table (name_table.c, name_distr.c, subscription.c)
100 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 96 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
101 * overall name table structure. Nothing must be added/removed to 97 * overall name table structure. Nothing must be added/removed to
102 * this structure without holding write access to it. 98 * this structure without holding write access to it.
@@ -108,8 +104,6 @@
108 * - A local spin_lock protecting the queue of subscriber events. 104 * - A local spin_lock protecting the queue of subscriber events.
109*/ 105*/
110 106
111DEFINE_RWLOCK(tipc_net_lock);
112
113static void net_route_named_msg(struct sk_buff *buf) 107static void net_route_named_msg(struct sk_buff *buf)
114{ 108{
115 struct tipc_msg *msg = buf_msg(buf); 109 struct tipc_msg *msg = buf_msg(buf);
@@ -148,7 +142,7 @@ void tipc_net_route_msg(struct sk_buff *buf)
148 if (msg_mcast(msg)) 142 if (msg_mcast(msg))
149 tipc_port_mcast_rcv(buf, NULL); 143 tipc_port_mcast_rcv(buf, NULL);
150 else if (msg_destport(msg)) 144 else if (msg_destport(msg))
151 tipc_port_rcv(buf); 145 tipc_sk_rcv(buf);
152 else 146 else
153 net_route_named_msg(buf); 147 net_route_named_msg(buf);
154 return; 148 return;
@@ -171,22 +165,25 @@ void tipc_net_route_msg(struct sk_buff *buf)
171 tipc_link_xmit(buf, dnode, msg_link_selector(msg)); 165 tipc_link_xmit(buf, dnode, msg_link_selector(msg));
172} 166}
173 167
174void tipc_net_start(u32 addr) 168int tipc_net_start(u32 addr)
175{ 169{
176 char addr_string[16]; 170 char addr_string[16];
171 int res;
177 172
178 write_lock_bh(&tipc_net_lock);
179 tipc_own_addr = addr; 173 tipc_own_addr = addr;
180 tipc_named_reinit(); 174 tipc_named_reinit();
181 tipc_port_reinit(); 175 tipc_port_reinit();
182 tipc_bclink_init(); 176 res = tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock); 177 if (res)
178 return res;
184 179
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 180 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 181 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
182
187 pr_info("Started in network mode\n"); 183 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 184 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 185 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
186 return 0;
190} 187}
191 188
192void tipc_net_stop(void) 189void tipc_net_stop(void)
@@ -195,11 +192,11 @@ void tipc_net_stop(void)
195 return; 192 return;
196 193
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 194 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
198 write_lock_bh(&tipc_net_lock); 195 rtnl_lock();
199 tipc_bearer_stop(); 196 tipc_bearer_stop();
200 tipc_bclink_stop(); 197 tipc_bclink_stop();
201 tipc_node_stop(); 198 tipc_node_stop();
202 write_unlock_bh(&tipc_net_lock); 199 rtnl_unlock();
203 200
204 pr_info("Left network mode\n"); 201 pr_info("Left network mode\n");
205} 202}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 079daadb3f72..c6c2b46f7c28 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -37,11 +37,9 @@
37#ifndef _TIPC_NET_H 37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H 38#define _TIPC_NET_H
39 39
40extern rwlock_t tipc_net_lock;
41
42void tipc_net_route_msg(struct sk_buff *buf); 40void tipc_net_route_msg(struct sk_buff *buf);
43 41
44void tipc_net_start(u32 addr); 42int tipc_net_start(u32 addr);
45void tipc_net_stop(void); 43void tipc_net_stop(void);
46 44
47#endif 45#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 1d3a4999a70f..5b44c3041be4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -108,7 +108,7 @@ struct tipc_node *tipc_node_create(u32 addr)
108 break; 108 break;
109 } 109 }
110 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
111 n_ptr->block_setup = WAIT_PEER_DOWN; 111 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
112 n_ptr->signature = INVALID_NODE_SIG; 112 n_ptr->signature = INVALID_NODE_SIG;
113 113
114 tipc_num_nodes++; 114 tipc_num_nodes++;
@@ -144,11 +144,13 @@ void tipc_node_stop(void)
144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
145{ 145{
146 struct tipc_link **active = &n_ptr->active_links[0]; 146 struct tipc_link **active = &n_ptr->active_links[0];
147 u32 addr = n_ptr->addr;
147 148
148 n_ptr->working_links++; 149 n_ptr->working_links++;
149 150 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE,
151 l_ptr->bearer_id, addr);
150 pr_info("Established link <%s> on network plane %c\n", 152 pr_info("Established link <%s> on network plane %c\n",
151 l_ptr->name, l_ptr->b_ptr->net_plane); 153 l_ptr->name, l_ptr->net_plane);
152 154
153 if (!active[0]) { 155 if (!active[0]) {
154 active[0] = active[1] = l_ptr; 156 active[0] = active[1] = l_ptr;
@@ -203,16 +205,18 @@ static void node_select_active_links(struct tipc_node *n_ptr)
203void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 205void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
204{ 206{
205 struct tipc_link **active; 207 struct tipc_link **active;
208 u32 addr = n_ptr->addr;
206 209
207 n_ptr->working_links--; 210 n_ptr->working_links--;
211 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr);
208 212
209 if (!tipc_link_is_active(l_ptr)) { 213 if (!tipc_link_is_active(l_ptr)) {
210 pr_info("Lost standby link <%s> on network plane %c\n", 214 pr_info("Lost standby link <%s> on network plane %c\n",
211 l_ptr->name, l_ptr->b_ptr->net_plane); 215 l_ptr->name, l_ptr->net_plane);
212 return; 216 return;
213 } 217 }
214 pr_info("Lost link <%s> on network plane %c\n", 218 pr_info("Lost link <%s> on network plane %c\n",
215 l_ptr->name, l_ptr->b_ptr->net_plane); 219 l_ptr->name, l_ptr->net_plane);
216 220
217 active = &n_ptr->active_links[0]; 221 active = &n_ptr->active_links[0];
218 if (active[0] == l_ptr) 222 if (active[0] == l_ptr)
@@ -239,7 +243,7 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
239 243
240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 244void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
241{ 245{
242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 246 n_ptr->links[l_ptr->bearer_id] = l_ptr;
243 spin_lock_bh(&node_list_lock); 247 spin_lock_bh(&node_list_lock);
244 tipc_num_links++; 248 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock); 249 spin_unlock_bh(&node_list_lock);
@@ -263,26 +267,12 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
263 267
264static void node_established_contact(struct tipc_node *n_ptr) 268static void node_established_contact(struct tipc_node *n_ptr)
265{ 269{
266 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr); 270 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
267 n_ptr->bclink.oos_state = 0; 271 n_ptr->bclink.oos_state = 0;
268 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 272 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
269 tipc_bclink_add_node(n_ptr->addr); 273 tipc_bclink_add_node(n_ptr->addr);
270} 274}
271 275
272static void node_name_purge_complete(unsigned long node_addr)
273{
274 struct tipc_node *n_ptr;
275
276 read_lock_bh(&tipc_net_lock);
277 n_ptr = tipc_node_find(node_addr);
278 if (n_ptr) {
279 tipc_node_lock(n_ptr);
280 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
281 tipc_node_unlock(n_ptr);
282 }
283 read_unlock_bh(&tipc_net_lock);
284}
285
286static void node_lost_contact(struct tipc_node *n_ptr) 276static void node_lost_contact(struct tipc_node *n_ptr)
287{ 277{
288 char addr_string[16]; 278 char addr_string[16];
@@ -296,10 +286,9 @@ static void node_lost_contact(struct tipc_node *n_ptr)
296 kfree_skb_list(n_ptr->bclink.deferred_head); 286 kfree_skb_list(n_ptr->bclink.deferred_head);
297 n_ptr->bclink.deferred_size = 0; 287 n_ptr->bclink.deferred_size = 0;
298 288
299 if (n_ptr->bclink.reasm_head) { 289 if (n_ptr->bclink.reasm_buf) {
300 kfree_skb(n_ptr->bclink.reasm_head); 290 kfree_skb(n_ptr->bclink.reasm_buf);
301 n_ptr->bclink.reasm_head = NULL; 291 n_ptr->bclink.reasm_buf = NULL;
302 n_ptr->bclink.reasm_tail = NULL;
303 } 292 }
304 293
305 tipc_bclink_remove_node(n_ptr->addr); 294 tipc_bclink_remove_node(n_ptr->addr);
@@ -318,12 +307,13 @@ static void node_lost_contact(struct tipc_node *n_ptr)
318 tipc_link_reset_fragments(l_ptr); 307 tipc_link_reset_fragments(l_ptr);
319 } 308 }
320 309
321 /* Notify subscribers */ 310 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
322 tipc_nodesub_notify(n_ptr);
323 311
324 /* Prevent re-contact with node until cleanup is done */ 312 /* Notify subscribers and prevent re-contact with node until
325 n_ptr->block_setup = WAIT_PEER_DOWN | WAIT_NAMES_GONE; 313 * cleanup is done.
326 tipc_k_signal((Handler)node_name_purge_complete, n_ptr->addr); 314 */
315 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
316 TIPC_NOTIFY_NODE_DOWN;
327} 317}
328 318
329struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 319struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
@@ -436,3 +426,63 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
436 rcu_read_unlock(); 426 rcu_read_unlock();
437 return buf; 427 return buf;
438} 428}
429
430/**
431 * tipc_node_get_linkname - get the name of a link
432 *
433 * @bearer_id: id of the bearer
434 * @node: peer node address
435 * @linkname: link name output buffer
436 *
437 * Returns 0 on success
438 */
439int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
440{
441 struct tipc_link *link;
442 struct tipc_node *node = tipc_node_find(addr);
443
444 if ((bearer_id >= MAX_BEARERS) || !node)
445 return -EINVAL;
446 tipc_node_lock(node);
447 link = node->links[bearer_id];
448 if (link) {
449 strncpy(linkname, link->name, len);
450 tipc_node_unlock(node);
451 return 0;
452 }
453 tipc_node_unlock(node);
454 return -EINVAL;
455}
456
457void tipc_node_unlock(struct tipc_node *node)
458{
459 LIST_HEAD(nsub_list);
460 struct tipc_link *link;
461 int pkt_sz = 0;
462 u32 addr = 0;
463
464 if (likely(!node->action_flags)) {
465 spin_unlock_bh(&node->lock);
466 return;
467 }
468
469 if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
470 list_replace_init(&node->nsub, &nsub_list);
471 node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
472 }
473 if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
474 link = node->active_links[0];
475 node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
476 if (link) {
477 pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
478 ITEM_SIZE;
479 addr = node->addr;
480 }
481 }
482 spin_unlock_bh(&node->lock);
483
484 if (!list_empty(&nsub_list))
485 tipc_nodesub_notify(&nsub_list);
486 if (pkt_sz)
487 tipc_named_node_up(pkt_sz, addr);
488}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 7cbb8cec1a93..9087063793f2 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -47,62 +47,73 @@
47 */ 47 */
48#define INVALID_NODE_SIG 0x10000 48#define INVALID_NODE_SIG 0x10000
49 49
50/* Flags used to block (re)establishment of contact with a neighboring node */ 50/* Flags used to take different actions according to flag type
51#define WAIT_PEER_DOWN 0x0001 /* wait to see that peer's links are down */ 51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
52#define WAIT_NAMES_GONE 0x0002 /* wait for peer's publications to be purged */ 52 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
53#define WAIT_NODE_DOWN 0x0004 /* wait until peer node is declared down */ 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 */
56enum {
57 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
58 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
59 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
60 TIPC_NOTIFY_NODE_UP = (1 << 4)
61};
62
63/**
64 * struct tipc_node_bclink - TIPC node bclink structure
65 * @acked: sequence # of last outbound b'cast message acknowledged by node
66 * @last_in: sequence # of last in-sequence b'cast message received from node
67 * @last_sent: sequence # of last b'cast message sent by node
68 * @oos_state: state tracker for handling OOS b'cast messages
69 * @deferred_size: number of OOS b'cast messages in deferred queue
70 * @deferred_head: oldest OOS b'cast message received from node
71 * @deferred_tail: newest OOS b'cast message received from node
72 * @reasm_buf: broadcast reassembly queue head from node
73 * @recv_permitted: true if node is allowed to receive b'cast messages
74 */
75struct tipc_node_bclink {
76 u32 acked;
77 u32 last_in;
78 u32 last_sent;
79 u32 oos_state;
80 u32 deferred_size;
81 struct sk_buff *deferred_head;
82 struct sk_buff *deferred_tail;
83 struct sk_buff *reasm_buf;
84 bool recv_permitted;
85};
54 86
55/** 87/**
56 * struct tipc_node - TIPC node structure 88 * struct tipc_node - TIPC node structure
57 * @addr: network address of node 89 * @addr: network address of node
58 * @lock: spinlock governing access to structure 90 * @lock: spinlock governing access to structure
59 * @hash: links to adjacent nodes in unsorted hash chain 91 * @hash: links to adjacent nodes in unsorted hash chain
60 * @list: links to adjacent nodes in sorted list of cluster's nodes
61 * @nsub: list of "node down" subscriptions monitoring node
62 * @active_links: pointers to active links to node 92 * @active_links: pointers to active links to node
63 * @links: pointers to all links to node 93 * @links: pointers to all links to node
94 * @action_flags: bit mask of different types of node actions
95 * @bclink: broadcast-related info
96 * @list: links to adjacent nodes in sorted list of cluster's nodes
64 * @working_links: number of working links to node (both active and standby) 97 * @working_links: number of working links to node (both active and standby)
65 * @block_setup: bit mask of conditions preventing link establishment to node
66 * @link_cnt: number of links to node 98 * @link_cnt: number of links to node
67 * @signature: node instance identifier 99 * @signature: node instance identifier
68 * @bclink: broadcast-related info 100 * @nsub: list of "node down" subscriptions monitoring node
69 * @rcu: rcu struct for tipc_node 101 * @rcu: rcu struct for tipc_node
70 * @acked: sequence # of last outbound b'cast message acknowledged by node
71 * @last_in: sequence # of last in-sequence b'cast message received from node
72 * @last_sent: sequence # of last b'cast message sent by node
73 * @oos_state: state tracker for handling OOS b'cast messages
74 * @deferred_size: number of OOS b'cast messages in deferred queue
75 * @deferred_head: oldest OOS b'cast message received from node
76 * @deferred_tail: newest OOS b'cast message received from node
77 * @reasm_head: broadcast reassembly queue head from node
78 * @reasm_tail: last broadcast fragment received from node
79 * @recv_permitted: true if node is allowed to receive b'cast messages
80 */ 102 */
81struct tipc_node { 103struct tipc_node {
82 u32 addr; 104 u32 addr;
83 spinlock_t lock; 105 spinlock_t lock;
84 struct hlist_node hash; 106 struct hlist_node hash;
85 struct list_head list;
86 struct list_head nsub;
87 struct tipc_link *active_links[2]; 107 struct tipc_link *active_links[2];
88 struct tipc_link *links[MAX_BEARERS]; 108 struct tipc_link *links[MAX_BEARERS];
109 unsigned int action_flags;
110 struct tipc_node_bclink bclink;
111 struct list_head list;
89 int link_cnt; 112 int link_cnt;
90 int working_links; 113 int working_links;
91 int block_setup;
92 u32 signature; 114 u32 signature;
115 struct list_head nsub;
93 struct rcu_head rcu; 116 struct rcu_head rcu;
94 struct {
95 u32 acked;
96 u32 last_in;
97 u32 last_sent;
98 u32 oos_state;
99 u32 deferred_size;
100 struct sk_buff *deferred_head;
101 struct sk_buff *deferred_tail;
102 struct sk_buff *reasm_head;
103 struct sk_buff *reasm_tail;
104 bool recv_permitted;
105 } bclink;
106}; 117};
107 118
108extern struct list_head tipc_node_list; 119extern struct list_head tipc_node_list;
@@ -118,15 +129,18 @@ int tipc_node_active_links(struct tipc_node *n_ptr);
118int tipc_node_is_up(struct tipc_node *n_ptr); 129int tipc_node_is_up(struct tipc_node *n_ptr);
119struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 130struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
120struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 131struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
132int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
133void tipc_node_unlock(struct tipc_node *node);
121 134
122static inline void tipc_node_lock(struct tipc_node *n_ptr) 135static inline void tipc_node_lock(struct tipc_node *node)
123{ 136{
124 spin_lock_bh(&n_ptr->lock); 137 spin_lock_bh(&node->lock);
125} 138}
126 139
127static inline void tipc_node_unlock(struct tipc_node *n_ptr) 140static inline bool tipc_node_blocked(struct tipc_node *node)
128{ 141{
129 spin_unlock_bh(&n_ptr->lock); 142 return (node->action_flags & (TIPC_WAIT_PEER_LINKS_DOWN |
143 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
130} 144}
131 145
132#endif 146#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
index 8a7384c04add..7c59ab1d6ecb 100644
--- a/net/tipc/node_subscr.c
+++ b/net/tipc/node_subscr.c
@@ -81,14 +81,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
81 * 81 *
82 * Note: node is locked by caller 82 * Note: node is locked by caller
83 */ 83 */
84void tipc_nodesub_notify(struct tipc_node *node) 84void tipc_nodesub_notify(struct list_head *nsub_list)
85{ 85{
86 struct tipc_node_subscr *ns; 86 struct tipc_node_subscr *ns, *safe;
87 87
88 list_for_each_entry(ns, &node->nsub, nodesub_list) { 88 list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
89 if (ns->handle_node_down) { 89 if (ns->handle_node_down) {
90 tipc_k_signal((Handler)ns->handle_node_down, 90 ns->handle_node_down(ns->usr_handle);
91 (unsigned long)ns->usr_handle);
92 ns->handle_node_down = NULL; 91 ns->handle_node_down = NULL;
93 } 92 }
94 } 93 }
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
index c95d20727ded..d91b8cc81e3d 100644
--- a/net/tipc/node_subscr.h
+++ b/net/tipc/node_subscr.h
@@ -58,6 +58,6 @@ struct tipc_node_subscr {
58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr, 58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
59 void *usr_handle, net_ev_handler handle_down); 59 void *usr_handle, net_ev_handler handle_down);
60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub); 60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
61void tipc_nodesub_notify(struct tipc_node *node); 61void tipc_nodesub_notify(struct list_head *nsub_list);
62 62
63#endif 63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
index 5c14c7801ee6..5fd7acce01ea 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -165,7 +165,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
165 msg_set_destnode(msg, tipc_own_addr); 165 msg_set_destnode(msg, tipc_own_addr);
166 if (dp->count == 1) { 166 if (dp->count == 1) {
167 msg_set_destport(msg, dp->ports[0]); 167 msg_set_destport(msg, dp->ports[0]);
168 tipc_port_rcv(buf); 168 tipc_sk_rcv(buf);
169 tipc_port_list_free(dp); 169 tipc_port_list_free(dp);
170 return; 170 return;
171 } 171 }
@@ -180,7 +180,7 @@ void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
180 if ((index == 0) && (cnt != 0)) 180 if ((index == 0) && (cnt != 0))
181 item = item->next; 181 item = item->next;
182 msg_set_destport(buf_msg(b), item->ports[index]); 182 msg_set_destport(buf_msg(b), item->ports[index]);
183 tipc_port_rcv(b); 183 tipc_sk_rcv(b);
184 } 184 }
185 } 185 }
186exit: 186exit:
@@ -343,7 +343,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err)
343 /* send returned message & dispose of rejected message */ 343 /* send returned message & dispose of rejected message */
344 src_node = msg_prevnode(msg); 344 src_node = msg_prevnode(msg);
345 if (in_own_node(src_node)) 345 if (in_own_node(src_node))
346 tipc_port_rcv(rbuf); 346 tipc_sk_rcv(rbuf);
347 else 347 else
348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg)); 348 tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
349exit: 349exit:
@@ -754,37 +754,6 @@ int tipc_port_shutdown(u32 ref)
754 return tipc_port_disconnect(ref); 754 return tipc_port_disconnect(ref);
755} 755}
756 756
757/**
758 * tipc_port_rcv - receive message from lower layer and deliver to port user
759 */
760int tipc_port_rcv(struct sk_buff *buf)
761{
762 struct tipc_port *p_ptr;
763 struct tipc_msg *msg = buf_msg(buf);
764 u32 destport = msg_destport(msg);
765 u32 dsz = msg_data_sz(msg);
766 u32 err;
767
768 /* forward unresolved named message */
769 if (unlikely(!destport)) {
770 tipc_net_route_msg(buf);
771 return dsz;
772 }
773
774 /* validate destination & pass to port, otherwise reject message */
775 p_ptr = tipc_port_lock(destport);
776 if (likely(p_ptr)) {
777 err = tipc_sk_rcv(&tipc_port_to_sock(p_ptr)->sk, buf);
778 tipc_port_unlock(p_ptr);
779 if (likely(!err))
780 return dsz;
781 } else {
782 err = TIPC_ERR_NO_PORT;
783 }
784
785 return tipc_reject_msg(buf, err);
786}
787
788/* 757/*
789 * tipc_port_iovec_rcv: Concatenate and deliver sectioned 758 * tipc_port_iovec_rcv: Concatenate and deliver sectioned
790 * message for this node. 759 * message for this node.
@@ -798,7 +767,7 @@ static int tipc_port_iovec_rcv(struct tipc_port *sender,
798 767
799 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf); 768 res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
800 if (likely(buf)) 769 if (likely(buf))
801 tipc_port_rcv(buf); 770 tipc_sk_rcv(buf);
802 return res; 771 return res;
803} 772}
804 773
diff --git a/net/tipc/port.h b/net/tipc/port.h
index a00397393bd1..cf4ca5b1d9a4 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -42,9 +42,10 @@
42#include "msg.h" 42#include "msg.h"
43#include "node_subscr.h" 43#include "node_subscr.h"
44 44
45#define TIPC_FLOW_CONTROL_WIN 512 45#define TIPC_CONNACK_INTV 256
46#define CONN_OVERLOAD_LIMIT ((TIPC_FLOW_CONTROL_WIN * 2 + 1) * \ 46#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
47 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) 47#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
48 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
48 49
49/** 50/**
50 * struct tipc_port - TIPC port structure 51 * struct tipc_port - TIPC port structure
@@ -134,7 +135,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
134/* 135/*
135 * TIPC messaging routines 136 * TIPC messaging routines
136 */ 137 */
137int tipc_port_rcv(struct sk_buff *buf);
138 138
139int tipc_send(struct tipc_port *port, 139int tipc_send(struct tipc_port *port,
140 struct iovec const *msg_sect, 140 struct iovec const *msg_sect,
@@ -187,7 +187,7 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
187 187
188static inline int tipc_port_congested(struct tipc_port *p_ptr) 188static inline int tipc_port_congested(struct tipc_port *p_ptr)
189{ 189{
190 return (p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2); 190 return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
191} 191}
192 192
193 193
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3c0256962f7d..ef0475568f9e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -36,6 +36,7 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "port.h" 38#include "port.h"
39#include "node.h"
39 40
40#include <linux/export.h> 41#include <linux/export.h>
41 42
@@ -44,7 +45,7 @@
44 45
45#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 46#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
46 47
47static int backlog_rcv(struct sock *sk, struct sk_buff *skb); 48static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
48static void tipc_data_ready(struct sock *sk); 49static void tipc_data_ready(struct sock *sk);
49static void tipc_write_space(struct sock *sk); 50static void tipc_write_space(struct sock *sk);
50static int tipc_release(struct socket *sock); 51static int tipc_release(struct socket *sock);
@@ -195,11 +196,12 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
195 sock->state = state; 196 sock->state = state;
196 197
197 sock_init_data(sock, sk); 198 sock_init_data(sock, sk);
198 sk->sk_backlog_rcv = backlog_rcv; 199 sk->sk_backlog_rcv = tipc_backlog_rcv;
199 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 200 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
200 sk->sk_data_ready = tipc_data_ready; 201 sk->sk_data_ready = tipc_data_ready;
201 sk->sk_write_space = tipc_write_space; 202 sk->sk_write_space = tipc_write_space;
202 tipc_sk(sk)->conn_timeout = CONN_TIMEOUT_DEFAULT; 203 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
204 atomic_set(&tsk->dupl_rcvcnt, 0);
203 tipc_port_unlock(port); 205 tipc_port_unlock(port);
204 206
205 if (sock->state == SS_READY) { 207 if (sock->state == SS_READY) {
@@ -983,10 +985,11 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
983 return 0; 985 return 0;
984} 986}
985 987
986static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) 988static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
987{ 989{
988 struct sock *sk = sock->sk; 990 struct sock *sk = sock->sk;
989 DEFINE_WAIT(wait); 991 DEFINE_WAIT(wait);
992 long timeo = *timeop;
990 int err; 993 int err;
991 994
992 for (;;) { 995 for (;;) {
@@ -1011,6 +1014,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
1011 break; 1014 break;
1012 } 1015 }
1013 finish_wait(sk_sleep(sk), &wait); 1016 finish_wait(sk_sleep(sk), &wait);
1017 *timeop = timeo;
1014 return err; 1018 return err;
1015} 1019}
1016 1020
@@ -1054,7 +1058,7 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1054restart: 1058restart:
1055 1059
1056 /* Look for a message in receive queue; wait if necessary */ 1060 /* Look for a message in receive queue; wait if necessary */
1057 res = tipc_wait_for_rcvmsg(sock, timeo); 1061 res = tipc_wait_for_rcvmsg(sock, &timeo);
1058 if (res) 1062 if (res)
1059 goto exit; 1063 goto exit;
1060 1064
@@ -1100,7 +1104,7 @@ restart:
1100 /* Consume received message (optional) */ 1104 /* Consume received message (optional) */
1101 if (likely(!(flags & MSG_PEEK))) { 1105 if (likely(!(flags & MSG_PEEK))) {
1102 if ((sock->state != SS_READY) && 1106 if ((sock->state != SS_READY) &&
1103 (++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1107 (++port->conn_unacked >= TIPC_CONNACK_INTV))
1104 tipc_acknowledge(port->ref, port->conn_unacked); 1108 tipc_acknowledge(port->ref, port->conn_unacked);
1105 advance_rx_queue(sk); 1109 advance_rx_queue(sk);
1106 } 1110 }
@@ -1152,7 +1156,7 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1152 1156
1153restart: 1157restart:
1154 /* Look for a message in receive queue; wait if necessary */ 1158 /* Look for a message in receive queue; wait if necessary */
1155 res = tipc_wait_for_rcvmsg(sock, timeo); 1159 res = tipc_wait_for_rcvmsg(sock, &timeo);
1156 if (res) 1160 if (res)
1157 goto exit; 1161 goto exit;
1158 1162
@@ -1209,7 +1213,7 @@ restart:
1209 1213
1210 /* Consume received message (optional) */ 1214 /* Consume received message (optional) */
1211 if (likely(!(flags & MSG_PEEK))) { 1215 if (likely(!(flags & MSG_PEEK))) {
1212 if (unlikely(++port->conn_unacked >= TIPC_FLOW_CONTROL_WIN)) 1216 if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
1213 tipc_acknowledge(port->ref, port->conn_unacked); 1217 tipc_acknowledge(port->ref, port->conn_unacked);
1214 advance_rx_queue(sk); 1218 advance_rx_queue(sk);
1215 } 1219 }
@@ -1415,7 +1419,7 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1415} 1419}
1416 1420
1417/** 1421/**
1418 * backlog_rcv - handle incoming message from backlog queue 1422 * tipc_backlog_rcv - handle incoming message from backlog queue
1419 * @sk: socket 1423 * @sk: socket
1420 * @buf: message 1424 * @buf: message
1421 * 1425 *
@@ -1423,47 +1427,74 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
1423 * 1427 *
1424 * Returns 0 1428 * Returns 0
1425 */ 1429 */
1426static int backlog_rcv(struct sock *sk, struct sk_buff *buf) 1430static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1427{ 1431{
1428 u32 res; 1432 u32 res;
1433 struct tipc_sock *tsk = tipc_sk(sk);
1434 uint truesize = buf->truesize;
1429 1435
1430 res = filter_rcv(sk, buf); 1436 res = filter_rcv(sk, buf);
1431 if (res) 1437 if (unlikely(res))
1432 tipc_reject_msg(buf, res); 1438 tipc_reject_msg(buf, res);
1439
1440 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1441 atomic_add(truesize, &tsk->dupl_rcvcnt);
1442
1433 return 0; 1443 return 0;
1434} 1444}
1435 1445
1436/** 1446/**
1437 * tipc_sk_rcv - handle incoming message 1447 * tipc_sk_rcv - handle incoming message
1438 * @sk: socket receiving message 1448 * @buf: buffer containing arriving message
1439 * @buf: message 1449 * Consumes buffer
1440 * 1450 * Returns 0 if success, or errno: -EHOSTUNREACH
1441 * Called with port lock already taken.
1442 *
1443 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1444 */ 1451 */
1445u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf) 1452int tipc_sk_rcv(struct sk_buff *buf)
1446{ 1453{
1447 u32 res; 1454 struct tipc_sock *tsk;
1455 struct tipc_port *port;
1456 struct sock *sk;
1457 u32 dport = msg_destport(buf_msg(buf));
1458 int err = TIPC_OK;
1459 uint limit;
1448 1460
1449 /* 1461 /* Forward unresolved named message */
1450 * Process message if socket is unlocked; otherwise add to backlog queue 1462 if (unlikely(!dport)) {
1451 * 1463 tipc_net_route_msg(buf);
1452 * This code is based on sk_receive_skb(), but must be distinct from it 1464 return 0;
1453 * since a TIPC-specific filter/reject mechanism is utilized 1465 }
1454 */ 1466
1467 /* Validate destination */
1468 port = tipc_port_lock(dport);
1469 if (unlikely(!port)) {
1470 err = TIPC_ERR_NO_PORT;
1471 goto exit;
1472 }
1473
1474 tsk = tipc_port_to_sock(port);
1475 sk = &tsk->sk;
1476
1477 /* Queue message */
1455 bh_lock_sock(sk); 1478 bh_lock_sock(sk);
1479
1456 if (!sock_owned_by_user(sk)) { 1480 if (!sock_owned_by_user(sk)) {
1457 res = filter_rcv(sk, buf); 1481 err = filter_rcv(sk, buf);
1458 } else { 1482 } else {
1459 if (sk_add_backlog(sk, buf, rcvbuf_limit(sk, buf))) 1483 if (sk->sk_backlog.len == 0)
1460 res = TIPC_ERR_OVERLOAD; 1484 atomic_set(&tsk->dupl_rcvcnt, 0);
1461 else 1485 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
1462 res = TIPC_OK; 1486 if (sk_add_backlog(sk, buf, limit))
1487 err = TIPC_ERR_OVERLOAD;
1463 } 1488 }
1489
1464 bh_unlock_sock(sk); 1490 bh_unlock_sock(sk);
1491 tipc_port_unlock(port);
1465 1492
1466 return res; 1493 if (likely(!err))
1494 return 0;
1495exit:
1496 tipc_reject_msg(buf, err);
1497 return -EHOSTUNREACH;
1467} 1498}
1468 1499
1469static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 1500static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1905,6 +1936,28 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
1905 return put_user(sizeof(value), ol); 1936 return put_user(sizeof(value), ol);
1906} 1937}
1907 1938
1939int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
1940{
1941 struct tipc_sioc_ln_req lnr;
1942 void __user *argp = (void __user *)arg;
1943
1944 switch (cmd) {
1945 case SIOCGETLINKNAME:
1946 if (copy_from_user(&lnr, argp, sizeof(lnr)))
1947 return -EFAULT;
1948 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer,
1949 lnr.linkname, TIPC_MAX_LINK_NAME)) {
1950 if (copy_to_user(argp, &lnr, sizeof(lnr)))
1951 return -EFAULT;
1952 return 0;
1953 }
1954 return -EADDRNOTAVAIL;
1955 break;
1956 default:
1957 return -ENOIOCTLCMD;
1958 }
1959}
1960
1908/* Protocol switches for the various types of TIPC sockets */ 1961/* Protocol switches for the various types of TIPC sockets */
1909 1962
1910static const struct proto_ops msg_ops = { 1963static const struct proto_ops msg_ops = {
@@ -1917,7 +1970,7 @@ static const struct proto_ops msg_ops = {
1917 .accept = sock_no_accept, 1970 .accept = sock_no_accept,
1918 .getname = tipc_getname, 1971 .getname = tipc_getname,
1919 .poll = tipc_poll, 1972 .poll = tipc_poll,
1920 .ioctl = sock_no_ioctl, 1973 .ioctl = tipc_ioctl,
1921 .listen = sock_no_listen, 1974 .listen = sock_no_listen,
1922 .shutdown = tipc_shutdown, 1975 .shutdown = tipc_shutdown,
1923 .setsockopt = tipc_setsockopt, 1976 .setsockopt = tipc_setsockopt,
@@ -1938,7 +1991,7 @@ static const struct proto_ops packet_ops = {
1938 .accept = tipc_accept, 1991 .accept = tipc_accept,
1939 .getname = tipc_getname, 1992 .getname = tipc_getname,
1940 .poll = tipc_poll, 1993 .poll = tipc_poll,
1941 .ioctl = sock_no_ioctl, 1994 .ioctl = tipc_ioctl,
1942 .listen = tipc_listen, 1995 .listen = tipc_listen,
1943 .shutdown = tipc_shutdown, 1996 .shutdown = tipc_shutdown,
1944 .setsockopt = tipc_setsockopt, 1997 .setsockopt = tipc_setsockopt,
@@ -1959,7 +2012,7 @@ static const struct proto_ops stream_ops = {
1959 .accept = tipc_accept, 2012 .accept = tipc_accept,
1960 .getname = tipc_getname, 2013 .getname = tipc_getname,
1961 .poll = tipc_poll, 2014 .poll = tipc_poll,
1962 .ioctl = sock_no_ioctl, 2015 .ioctl = tipc_ioctl,
1963 .listen = tipc_listen, 2016 .listen = tipc_listen,
1964 .shutdown = tipc_shutdown, 2017 .shutdown = tipc_shutdown,
1965 .setsockopt = tipc_setsockopt, 2018 .setsockopt = tipc_setsockopt,
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 74e5c7f195a6..3afcd2a70b31 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -44,12 +44,14 @@
44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack 44 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
45 * @peer_name: the peer of the connection, if any 45 * @peer_name: the peer of the connection, if any
46 * @conn_timeout: the time we can wait for an unresponded setup request 46 * @conn_timeout: the time we can wait for an unresponded setup request
47 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
47 */ 48 */
48 49
49struct tipc_sock { 50struct tipc_sock {
50 struct sock sk; 51 struct sock sk;
51 struct tipc_port port; 52 struct tipc_port port;
52 unsigned int conn_timeout; 53 unsigned int conn_timeout;
54 atomic_t dupl_rcvcnt;
53}; 55};
54 56
55static inline struct tipc_sock *tipc_sk(const struct sock *sk) 57static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -67,6 +69,6 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
67 tsk->sk.sk_write_space(&tsk->sk); 69 tsk->sk.sk_write_space(&tsk->sk);
68} 70}
69 71
70u32 tipc_sk_rcv(struct sock *sk, struct sk_buff *buf); 72int tipc_sk_rcv(struct sk_buff *buf);
71 73
72#endif 74#endif