aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Makefile4
-rw-r--r--net/tipc/bcast.c244
-rw-r--r--net/tipc/bcast.h6
-rw-r--r--net/tipc/bearer.c447
-rw-r--r--net/tipc/bearer.h16
-rw-r--r--net/tipc/config.c4
-rw-r--r--net/tipc/core.c9
-rw-r--r--net/tipc/core.h8
-rw-r--r--net/tipc/link.c1084
-rw-r--r--net/tipc/link.h62
-rw-r--r--net/tipc/msg.c159
-rw-r--r--net/tipc/msg.h21
-rw-r--r--net/tipc/name_distr.c303
-rw-r--r--net/tipc/name_distr.h2
-rw-r--r--net/tipc/name_table.c382
-rw-r--r--net/tipc/name_table.h30
-rw-r--r--net/tipc/net.c109
-rw-r--r--net/tipc/net.h8
-rw-r--r--net/tipc/netlink.c133
-rw-r--r--net/tipc/netlink.h (renamed from net/tipc/ref.h)22
-rw-r--r--net/tipc/node.c237
-rw-r--r--net/tipc/node.h29
-rw-r--r--net/tipc/node_subscr.c96
-rw-r--r--net/tipc/node_subscr.h63
-rw-r--r--net/tipc/port.c514
-rw-r--r--net/tipc/port.h190
-rw-r--r--net/tipc/ref.c266
-rw-r--r--net/tipc/socket.c1271
-rw-r--r--net/tipc/socket.h58
-rw-r--r--net/tipc/subscr.c2
-rw-r--r--net/tipc/sysctl.c7
31 files changed, 3563 insertions, 2223 deletions
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index a080c66d819a..333e4592772c 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -7,8 +7,8 @@ obj-$(CONFIG_TIPC) := tipc.o
7tipc-y += addr.o bcast.o bearer.o config.o \ 7tipc-y += addr.o bcast.o bearer.o config.o \
8 core.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \ 10 netlink.o node.o socket.o log.o eth_media.o \
11 socket.o log.o eth_media.o server.o 11 server.o
12 12
13tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o 13tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
14tipc-$(CONFIG_SYSCTL) += sysctl.o 14tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index dd13bfa09333..96ceefeb9daf 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -37,7 +37,6 @@
37 37
38#include "core.h" 38#include "core.h"
39#include "link.h" 39#include "link.h"
40#include "port.h"
41#include "socket.h" 40#include "socket.h"
42#include "msg.h" 41#include "msg.h"
43#include "bcast.h" 42#include "bcast.h"
@@ -218,12 +217,27 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
218 */ 217 */
219static void bclink_retransmit_pkt(u32 after, u32 to) 218static void bclink_retransmit_pkt(u32 after, u32 to)
220{ 219{
221 struct sk_buff *buf; 220 struct sk_buff *skb;
221
222 skb_queue_walk(&bcl->outqueue, skb) {
223 if (more(buf_seqno(skb), after))
224 break;
225 }
226 tipc_link_retransmit(bcl, skb, mod(to - after));
227}
228
229/**
230 * tipc_bclink_wakeup_users - wake up pending users
231 *
232 * Called with no locks taken
233 */
234void tipc_bclink_wakeup_users(void)
235{
236 struct sk_buff *skb;
237
238 while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
239 tipc_sk_rcv(skb);
222 240
223 buf = bcl->first_out;
224 while (buf && less_eq(buf_seqno(buf), after))
225 buf = buf->next;
226 tipc_link_retransmit(bcl, buf, mod(to - after));
227} 241}
228 242
229/** 243/**
@@ -235,14 +249,14 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
235 */ 249 */
236void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 250void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
237{ 251{
238 struct sk_buff *crs; 252 struct sk_buff *skb, *tmp;
239 struct sk_buff *next; 253 struct sk_buff *next;
240 unsigned int released = 0; 254 unsigned int released = 0;
241 255
242 tipc_bclink_lock(); 256 tipc_bclink_lock();
243 /* Bail out if tx queue is empty (no clean up is required) */ 257 /* Bail out if tx queue is empty (no clean up is required) */
244 crs = bcl->first_out; 258 skb = skb_peek(&bcl->outqueue);
245 if (!crs) 259 if (!skb)
246 goto exit; 260 goto exit;
247 261
248 /* Determine which messages need to be acknowledged */ 262 /* Determine which messages need to be acknowledged */
@@ -261,47 +275,48 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
261 * Bail out if specified sequence number does not correspond 275 * Bail out if specified sequence number does not correspond
262 * to a message that has been sent and not yet acknowledged 276 * to a message that has been sent and not yet acknowledged
263 */ 277 */
264 if (less(acked, buf_seqno(crs)) || 278 if (less(acked, buf_seqno(skb)) ||
265 less(bcl->fsm_msg_cnt, acked) || 279 less(bcl->fsm_msg_cnt, acked) ||
266 less_eq(acked, n_ptr->bclink.acked)) 280 less_eq(acked, n_ptr->bclink.acked))
267 goto exit; 281 goto exit;
268 } 282 }
269 283
270 /* Skip over packets that node has previously acknowledged */ 284 /* Skip over packets that node has previously acknowledged */
271 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) 285 skb_queue_walk(&bcl->outqueue, skb) {
272 crs = crs->next; 286 if (more(buf_seqno(skb), n_ptr->bclink.acked))
287 break;
288 }
273 289
274 /* Update packets that node is now acknowledging */ 290 /* Update packets that node is now acknowledging */
291 skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) {
292 if (more(buf_seqno(skb), acked))
293 break;
275 294
276 while (crs && less_eq(buf_seqno(crs), acked)) { 295 next = tipc_skb_queue_next(&bcl->outqueue, skb);
277 next = crs->next; 296 if (skb != bcl->next_out) {
278 297 bcbuf_decr_acks(skb);
279 if (crs != bcl->next_out) 298 } else {
280 bcbuf_decr_acks(crs); 299 bcbuf_set_acks(skb, 0);
281 else {
282 bcbuf_set_acks(crs, 0);
283 bcl->next_out = next; 300 bcl->next_out = next;
284 bclink_set_last_sent(); 301 bclink_set_last_sent();
285 } 302 }
286 303
287 if (bcbuf_acks(crs) == 0) { 304 if (bcbuf_acks(skb) == 0) {
288 bcl->first_out = next; 305 __skb_unlink(skb, &bcl->outqueue);
289 bcl->out_queue_size--; 306 kfree_skb(skb);
290 kfree_skb(crs);
291 released = 1; 307 released = 1;
292 } 308 }
293 crs = next;
294 } 309 }
295 n_ptr->bclink.acked = acked; 310 n_ptr->bclink.acked = acked;
296 311
297 /* Try resolving broadcast link congestion, if necessary */ 312 /* Try resolving broadcast link congestion, if necessary */
298
299 if (unlikely(bcl->next_out)) { 313 if (unlikely(bcl->next_out)) {
300 tipc_link_push_queue(bcl); 314 tipc_link_push_packets(bcl);
301 bclink_set_last_sent(); 315 bclink_set_last_sent();
302 } 316 }
303 if (unlikely(released && !list_empty(&bcl->waiting_ports))) 317 if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks)))
304 tipc_link_wakeup_ports(bcl, 0); 318 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
319
305exit: 320exit:
306 tipc_bclink_unlock(); 321 tipc_bclink_unlock();
307} 322}
@@ -316,19 +331,16 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
316 struct sk_buff *buf; 331 struct sk_buff *buf;
317 332
318 /* Ignore "stale" link state info */ 333 /* Ignore "stale" link state info */
319
320 if (less_eq(last_sent, n_ptr->bclink.last_in)) 334 if (less_eq(last_sent, n_ptr->bclink.last_in))
321 return; 335 return;
322 336
323 /* Update link synchronization state; quit if in sync */ 337 /* Update link synchronization state; quit if in sync */
324
325 bclink_update_last_sent(n_ptr, last_sent); 338 bclink_update_last_sent(n_ptr, last_sent);
326 339
327 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in) 340 if (n_ptr->bclink.last_sent == n_ptr->bclink.last_in)
328 return; 341 return;
329 342
330 /* Update out-of-sync state; quit if loss is still unconfirmed */ 343 /* Update out-of-sync state; quit if loss is still unconfirmed */
331
332 if ((++n_ptr->bclink.oos_state) == 1) { 344 if ((++n_ptr->bclink.oos_state) == 1) {
333 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2)) 345 if (n_ptr->bclink.deferred_size < (TIPC_MIN_LINK_WIN / 2))
334 return; 346 return;
@@ -336,15 +348,15 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
336 } 348 }
337 349
338 /* Don't NACK if one has been recently sent (or seen) */ 350 /* Don't NACK if one has been recently sent (or seen) */
339
340 if (n_ptr->bclink.oos_state & 0x1) 351 if (n_ptr->bclink.oos_state & 0x1)
341 return; 352 return;
342 353
343 /* Send NACK */ 354 /* Send NACK */
344
345 buf = tipc_buf_acquire(INT_H_SIZE); 355 buf = tipc_buf_acquire(INT_H_SIZE);
346 if (buf) { 356 if (buf) {
347 struct tipc_msg *msg = buf_msg(buf); 357 struct tipc_msg *msg = buf_msg(buf);
358 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
359 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
348 360
349 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 361 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
350 INT_H_SIZE, n_ptr->addr); 362 INT_H_SIZE, n_ptr->addr);
@@ -352,9 +364,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
352 msg_set_mc_netid(msg, tipc_net_id); 364 msg_set_mc_netid(msg, tipc_net_id);
353 msg_set_bcast_ack(msg, n_ptr->bclink.last_in); 365 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
354 msg_set_bcgap_after(msg, n_ptr->bclink.last_in); 366 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
355 msg_set_bcgap_to(msg, n_ptr->bclink.deferred_head 367 msg_set_bcgap_to(msg, to);
356 ? buf_seqno(n_ptr->bclink.deferred_head) - 1
357 : n_ptr->bclink.last_sent);
358 368
359 tipc_bclink_lock(); 369 tipc_bclink_lock();
360 tipc_bearer_send(MAX_BEARERS, buf, NULL); 370 tipc_bearer_send(MAX_BEARERS, buf, NULL);
@@ -391,20 +401,20 @@ static void bclink_peek_nack(struct tipc_msg *msg)
391 401
392/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster 402/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
393 * and to identified node local sockets 403 * and to identified node local sockets
394 * @buf: chain of buffers containing message 404 * @list: chain of buffers containing message
395 * Consumes the buffer chain, except when returning -ELINKCONG 405 * Consumes the buffer chain, except when returning -ELINKCONG
396 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 406 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
397 */ 407 */
398int tipc_bclink_xmit(struct sk_buff *buf) 408int tipc_bclink_xmit(struct sk_buff_head *list)
399{ 409{
400 int rc = 0; 410 int rc = 0;
401 int bc = 0; 411 int bc = 0;
402 struct sk_buff *clbuf; 412 struct sk_buff *skb;
403 413
404 /* Prepare clone of message for local node */ 414 /* Prepare clone of message for local node */
405 clbuf = tipc_msg_reassemble(buf); 415 skb = tipc_msg_reassemble(list);
406 if (unlikely(!clbuf)) { 416 if (unlikely(!skb)) {
407 kfree_skb_list(buf); 417 __skb_queue_purge(list);
408 return -EHOSTUNREACH; 418 return -EHOSTUNREACH;
409 } 419 }
410 420
@@ -412,11 +422,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
412 if (likely(bclink)) { 422 if (likely(bclink)) {
413 tipc_bclink_lock(); 423 tipc_bclink_lock();
414 if (likely(bclink->bcast_nodes.count)) { 424 if (likely(bclink->bcast_nodes.count)) {
415 rc = __tipc_link_xmit(bcl, buf); 425 rc = __tipc_link_xmit(bcl, list);
416 if (likely(!rc)) { 426 if (likely(!rc)) {
427 u32 len = skb_queue_len(&bcl->outqueue);
428
417 bclink_set_last_sent(); 429 bclink_set_last_sent();
418 bcl->stats.queue_sz_counts++; 430 bcl->stats.queue_sz_counts++;
419 bcl->stats.accu_queue_sz += bcl->out_queue_size; 431 bcl->stats.accu_queue_sz += len;
420 } 432 }
421 bc = 1; 433 bc = 1;
422 } 434 }
@@ -424,13 +436,13 @@ int tipc_bclink_xmit(struct sk_buff *buf)
424 } 436 }
425 437
426 if (unlikely(!bc)) 438 if (unlikely(!bc))
427 kfree_skb_list(buf); 439 __skb_queue_purge(list);
428 440
429 /* Deliver message clone */ 441 /* Deliver message clone */
430 if (likely(!rc)) 442 if (likely(!rc))
431 tipc_sk_mcast_rcv(clbuf); 443 tipc_sk_mcast_rcv(skb);
432 else 444 else
433 kfree_skb(clbuf); 445 kfree_skb(skb);
434 446
435 return rc; 447 return rc;
436} 448}
@@ -451,7 +463,6 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
451 * Unicast an ACK periodically, ensuring that 463 * Unicast an ACK periodically, ensuring that
452 * all nodes in the cluster don't ACK at the same time 464 * all nodes in the cluster don't ACK at the same time
453 */ 465 */
454
455 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { 466 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) {
456 tipc_link_proto_xmit(node->active_links[node->addr & 1], 467 tipc_link_proto_xmit(node->active_links[node->addr & 1],
457 STATE_MSG, 0, 0, 0, 0, 0); 468 STATE_MSG, 0, 0, 0, 0, 0);
@@ -473,7 +484,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
473 int deferred = 0; 484 int deferred = 0;
474 485
475 /* Screen out unwanted broadcast messages */ 486 /* Screen out unwanted broadcast messages */
476
477 if (msg_mc_netid(msg) != tipc_net_id) 487 if (msg_mc_netid(msg) != tipc_net_id)
478 goto exit; 488 goto exit;
479 489
@@ -486,7 +496,6 @@ void tipc_bclink_rcv(struct sk_buff *buf)
486 goto unlock; 496 goto unlock;
487 497
488 /* Handle broadcast protocol message */ 498 /* Handle broadcast protocol message */
489
490 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 499 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
491 if (msg_type(msg) != STATE_MSG) 500 if (msg_type(msg) != STATE_MSG)
492 goto unlock; 501 goto unlock;
@@ -507,14 +516,12 @@ void tipc_bclink_rcv(struct sk_buff *buf)
507 } 516 }
508 517
509 /* Handle in-sequence broadcast message */ 518 /* Handle in-sequence broadcast message */
510
511 seqno = msg_seqno(msg); 519 seqno = msg_seqno(msg);
512 next_in = mod(node->bclink.last_in + 1); 520 next_in = mod(node->bclink.last_in + 1);
513 521
514 if (likely(seqno == next_in)) { 522 if (likely(seqno == next_in)) {
515receive: 523receive:
516 /* Deliver message to destination */ 524 /* Deliver message to destination */
517
518 if (likely(msg_isdata(msg))) { 525 if (likely(msg_isdata(msg))) {
519 tipc_bclink_lock(); 526 tipc_bclink_lock();
520 bclink_accept_pkt(node, seqno); 527 bclink_accept_pkt(node, seqno);
@@ -563,7 +570,6 @@ receive:
563 buf = NULL; 570 buf = NULL;
564 571
565 /* Determine new synchronization state */ 572 /* Determine new synchronization state */
566
567 tipc_node_lock(node); 573 tipc_node_lock(node);
568 if (unlikely(!tipc_node_is_up(node))) 574 if (unlikely(!tipc_node_is_up(node)))
569 goto unlock; 575 goto unlock;
@@ -571,33 +577,26 @@ receive:
571 if (node->bclink.last_in == node->bclink.last_sent) 577 if (node->bclink.last_in == node->bclink.last_sent)
572 goto unlock; 578 goto unlock;
573 579
574 if (!node->bclink.deferred_head) { 580 if (skb_queue_empty(&node->bclink.deferred_queue)) {
575 node->bclink.oos_state = 1; 581 node->bclink.oos_state = 1;
576 goto unlock; 582 goto unlock;
577 } 583 }
578 584
579 msg = buf_msg(node->bclink.deferred_head); 585 msg = buf_msg(skb_peek(&node->bclink.deferred_queue));
580 seqno = msg_seqno(msg); 586 seqno = msg_seqno(msg);
581 next_in = mod(next_in + 1); 587 next_in = mod(next_in + 1);
582 if (seqno != next_in) 588 if (seqno != next_in)
583 goto unlock; 589 goto unlock;
584 590
585 /* Take in-sequence message from deferred queue & deliver it */ 591 /* Take in-sequence message from deferred queue & deliver it */
586 592 buf = __skb_dequeue(&node->bclink.deferred_queue);
587 buf = node->bclink.deferred_head;
588 node->bclink.deferred_head = buf->next;
589 buf->next = NULL;
590 node->bclink.deferred_size--;
591 goto receive; 593 goto receive;
592 } 594 }
593 595
594 /* Handle out-of-sequence broadcast message */ 596 /* Handle out-of-sequence broadcast message */
595
596 if (less(next_in, seqno)) { 597 if (less(next_in, seqno)) {
597 deferred = tipc_link_defer_pkt(&node->bclink.deferred_head, 598 deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue,
598 &node->bclink.deferred_tail,
599 buf); 599 buf);
600 node->bclink.deferred_size += deferred;
601 bclink_update_last_sent(node, seqno); 600 bclink_update_last_sent(node, seqno);
602 buf = NULL; 601 buf = NULL;
603 } 602 }
@@ -756,6 +755,118 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
756 tipc_bclink_unlock(); 755 tipc_bclink_unlock();
757} 756}
758 757
758static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
759 struct tipc_stats *stats)
760{
761 int i;
762 struct nlattr *nest;
763
764 struct nla_map {
765 __u32 key;
766 __u32 val;
767 };
768
769 struct nla_map map[] = {
770 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
771 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
772 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
773 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
774 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
775 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
776 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
777 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
778 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
779 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
780 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
781 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
782 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
783 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
784 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
785 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
786 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
787 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
788 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
789 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
790 };
791
792 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
793 if (!nest)
794 return -EMSGSIZE;
795
796 for (i = 0; i < ARRAY_SIZE(map); i++)
797 if (nla_put_u32(skb, map[i].key, map[i].val))
798 goto msg_full;
799
800 nla_nest_end(skb, nest);
801
802 return 0;
803msg_full:
804 nla_nest_cancel(skb, nest);
805
806 return -EMSGSIZE;
807}
808
809int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
810{
811 int err;
812 void *hdr;
813 struct nlattr *attrs;
814 struct nlattr *prop;
815
816 if (!bcl)
817 return 0;
818
819 tipc_bclink_lock();
820
821 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
822 NLM_F_MULTI, TIPC_NL_LINK_GET);
823 if (!hdr)
824 return -EMSGSIZE;
825
826 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
827 if (!attrs)
828 goto msg_full;
829
830 /* The broadcast link is always up */
831 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
832 goto attr_msg_full;
833
834 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
835 goto attr_msg_full;
836 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
837 goto attr_msg_full;
838 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->next_in_no))
839 goto attr_msg_full;
840 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->next_out_no))
841 goto attr_msg_full;
842
843 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
844 if (!prop)
845 goto attr_msg_full;
846 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0]))
847 goto prop_msg_full;
848 nla_nest_end(msg->skb, prop);
849
850 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
851 if (err)
852 goto attr_msg_full;
853
854 tipc_bclink_unlock();
855 nla_nest_end(msg->skb, attrs);
856 genlmsg_end(msg->skb, hdr);
857
858 return 0;
859
860prop_msg_full:
861 nla_nest_cancel(msg->skb, prop);
862attr_msg_full:
863 nla_nest_cancel(msg->skb, attrs);
864msg_full:
865 tipc_bclink_unlock();
866 genlmsg_cancel(msg->skb, hdr);
867
868 return -EMSGSIZE;
869}
759 870
760int tipc_bclink_stats(char *buf, const u32 buf_size) 871int tipc_bclink_stats(char *buf, const u32 buf_size)
761{ 872{
@@ -840,9 +951,12 @@ int tipc_bclink_init(void)
840 sprintf(bcbearer->media.name, "tipc-broadcast"); 951 sprintf(bcbearer->media.name, "tipc-broadcast");
841 952
842 spin_lock_init(&bclink->lock); 953 spin_lock_init(&bclink->lock);
843 INIT_LIST_HEAD(&bcl->waiting_ports); 954 __skb_queue_head_init(&bcl->outqueue);
955 __skb_queue_head_init(&bcl->deferred_queue);
956 skb_queue_head_init(&bcl->waiting_sks);
844 bcl->next_out_no = 1; 957 bcl->next_out_no = 1;
845 spin_lock_init(&bclink->node.lock); 958 spin_lock_init(&bclink->node.lock);
959 __skb_queue_head_init(&bclink->node.waiting_sks);
846 bcl->owner = &bclink->node; 960 bcl->owner = &bclink->node;
847 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 961 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
848 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 962 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 4875d9536aee..644d79129fba 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -37,6 +37,8 @@
37#ifndef _TIPC_BCAST_H 37#ifndef _TIPC_BCAST_H
38#define _TIPC_BCAST_H 38#define _TIPC_BCAST_H
39 39
40#include "netlink.h"
41
40#define MAX_NODES 4096 42#define MAX_NODES 4096
41#define WSIZE 32 43#define WSIZE 32
42#define TIPC_BCLINK_RESET 1 44#define TIPC_BCLINK_RESET 1
@@ -98,6 +100,8 @@ int tipc_bclink_reset_stats(void);
98int tipc_bclink_set_queue_limits(u32 limit); 100int tipc_bclink_set_queue_limits(u32 limit);
99void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); 101void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
100uint tipc_bclink_get_mtu(void); 102uint tipc_bclink_get_mtu(void);
101int tipc_bclink_xmit(struct sk_buff *buf); 103int tipc_bclink_xmit(struct sk_buff_head *list);
104void tipc_bclink_wakeup_users(void);
105int tipc_nl_add_bc_link(struct tipc_nl_msg *msg);
102 106
103#endif 107#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 264474394f9f..463db5b15b8b 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bearer.c: TIPC bearer code 2 * net/tipc/bearer.c: TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, 2013, Ericsson AB 4 * Copyright (c) 1996-2006, 2013-2014, Ericsson AB
5 * Copyright (c) 2004-2006, 2010-2013, Wind River Systems 5 * Copyright (c) 2004-2006, 2010-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,6 +37,7 @@
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "bearer.h" 39#include "bearer.h"
40#include "link.h"
40#include "discover.h" 41#include "discover.h"
41 42
42#define MAX_ADDR_STR 60 43#define MAX_ADDR_STR 60
@@ -49,6 +50,23 @@ static struct tipc_media * const media_info_array[] = {
49 NULL 50 NULL
50}; 51};
51 52
53static const struct nla_policy
54tipc_nl_bearer_policy[TIPC_NLA_BEARER_MAX + 1] = {
55 [TIPC_NLA_BEARER_UNSPEC] = { .type = NLA_UNSPEC },
56 [TIPC_NLA_BEARER_NAME] = {
57 .type = NLA_STRING,
58 .len = TIPC_MAX_BEARER_NAME
59 },
60 [TIPC_NLA_BEARER_PROP] = { .type = NLA_NESTED },
61 [TIPC_NLA_BEARER_DOMAIN] = { .type = NLA_U32 }
62};
63
64static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
65 [TIPC_NLA_MEDIA_UNSPEC] = { .type = NLA_UNSPEC },
66 [TIPC_NLA_MEDIA_NAME] = { .type = NLA_STRING },
67 [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
68};
69
52struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; 70struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
53 71
54static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down); 72static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
@@ -627,3 +645,430 @@ void tipc_bearer_stop(void)
627 } 645 }
628 } 646 }
629} 647}
648
649/* Caller should hold rtnl_lock to protect the bearer */
650static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
651 struct tipc_bearer *bearer)
652{
653 void *hdr;
654 struct nlattr *attrs;
655 struct nlattr *prop;
656
657 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
658 NLM_F_MULTI, TIPC_NL_BEARER_GET);
659 if (!hdr)
660 return -EMSGSIZE;
661
662 attrs = nla_nest_start(msg->skb, TIPC_NLA_BEARER);
663 if (!attrs)
664 goto msg_full;
665
666 if (nla_put_string(msg->skb, TIPC_NLA_BEARER_NAME, bearer->name))
667 goto attr_msg_full;
668
669 prop = nla_nest_start(msg->skb, TIPC_NLA_BEARER_PROP);
670 if (!prop)
671 goto prop_msg_full;
672 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, bearer->priority))
673 goto prop_msg_full;
674 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, bearer->tolerance))
675 goto prop_msg_full;
676 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bearer->window))
677 goto prop_msg_full;
678
679 nla_nest_end(msg->skb, prop);
680 nla_nest_end(msg->skb, attrs);
681 genlmsg_end(msg->skb, hdr);
682
683 return 0;
684
685prop_msg_full:
686 nla_nest_cancel(msg->skb, prop);
687attr_msg_full:
688 nla_nest_cancel(msg->skb, attrs);
689msg_full:
690 genlmsg_cancel(msg->skb, hdr);
691
692 return -EMSGSIZE;
693}
694
695int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
696{
697 int err;
698 int i = cb->args[0];
699 struct tipc_bearer *bearer;
700 struct tipc_nl_msg msg;
701
702 if (i == MAX_BEARERS)
703 return 0;
704
705 msg.skb = skb;
706 msg.portid = NETLINK_CB(cb->skb).portid;
707 msg.seq = cb->nlh->nlmsg_seq;
708
709 rtnl_lock();
710 for (i = 0; i < MAX_BEARERS; i++) {
711 bearer = rtnl_dereference(bearer_list[i]);
712 if (!bearer)
713 continue;
714
715 err = __tipc_nl_add_bearer(&msg, bearer);
716 if (err)
717 break;
718 }
719 rtnl_unlock();
720
721 cb->args[0] = i;
722 return skb->len;
723}
724
725int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
726{
727 int err;
728 char *name;
729 struct sk_buff *rep;
730 struct tipc_bearer *bearer;
731 struct tipc_nl_msg msg;
732 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
733
734 if (!info->attrs[TIPC_NLA_BEARER])
735 return -EINVAL;
736
737 err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
738 info->attrs[TIPC_NLA_BEARER],
739 tipc_nl_bearer_policy);
740 if (err)
741 return err;
742
743 if (!attrs[TIPC_NLA_BEARER_NAME])
744 return -EINVAL;
745 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
746
747 rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
748 if (!rep)
749 return -ENOMEM;
750
751 msg.skb = rep;
752 msg.portid = info->snd_portid;
753 msg.seq = info->snd_seq;
754
755 rtnl_lock();
756 bearer = tipc_bearer_find(name);
757 if (!bearer) {
758 err = -EINVAL;
759 goto err_out;
760 }
761
762 err = __tipc_nl_add_bearer(&msg, bearer);
763 if (err)
764 goto err_out;
765 rtnl_unlock();
766
767 return genlmsg_reply(rep, info);
768err_out:
769 rtnl_unlock();
770 nlmsg_free(rep);
771
772 return err;
773}
774
775int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
776{
777 int err;
778 char *name;
779 struct tipc_bearer *bearer;
780 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
781
782 if (!info->attrs[TIPC_NLA_BEARER])
783 return -EINVAL;
784
785 err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
786 info->attrs[TIPC_NLA_BEARER],
787 tipc_nl_bearer_policy);
788 if (err)
789 return err;
790
791 if (!attrs[TIPC_NLA_BEARER_NAME])
792 return -EINVAL;
793
794 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
795
796 rtnl_lock();
797 bearer = tipc_bearer_find(name);
798 if (!bearer) {
799 rtnl_unlock();
800 return -EINVAL;
801 }
802
803 bearer_disable(bearer, false);
804 rtnl_unlock();
805
806 return 0;
807}
808
809int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
810{
811 int err;
812 char *bearer;
813 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
814 u32 domain;
815 u32 prio;
816
817 prio = TIPC_MEDIA_LINK_PRI;
818 domain = tipc_own_addr & TIPC_CLUSTER_MASK;
819
820 if (!info->attrs[TIPC_NLA_BEARER])
821 return -EINVAL;
822
823 err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
824 info->attrs[TIPC_NLA_BEARER],
825 tipc_nl_bearer_policy);
826 if (err)
827 return err;
828
829 if (!attrs[TIPC_NLA_BEARER_NAME])
830 return -EINVAL;
831
832 bearer = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
833
834 if (attrs[TIPC_NLA_BEARER_DOMAIN])
835 domain = nla_get_u32(attrs[TIPC_NLA_BEARER_DOMAIN]);
836
837 if (attrs[TIPC_NLA_BEARER_PROP]) {
838 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
839
840 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
841 props);
842 if (err)
843 return err;
844
845 if (props[TIPC_NLA_PROP_PRIO])
846 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
847 }
848
849 rtnl_lock();
850 err = tipc_enable_bearer(bearer, domain, prio);
851 if (err) {
852 rtnl_unlock();
853 return err;
854 }
855 rtnl_unlock();
856
857 return 0;
858}
859
860int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
861{
862 int err;
863 char *name;
864 struct tipc_bearer *b;
865 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
866
867 if (!info->attrs[TIPC_NLA_BEARER])
868 return -EINVAL;
869
870 err = nla_parse_nested(attrs, TIPC_NLA_BEARER_MAX,
871 info->attrs[TIPC_NLA_BEARER],
872 tipc_nl_bearer_policy);
873 if (err)
874 return err;
875
876 if (!attrs[TIPC_NLA_BEARER_NAME])
877 return -EINVAL;
878 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
879
880 rtnl_lock();
881 b = tipc_bearer_find(name);
882 if (!b) {
883 rtnl_unlock();
884 return -EINVAL;
885 }
886
887 if (attrs[TIPC_NLA_BEARER_PROP]) {
888 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
889
890 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_BEARER_PROP],
891 props);
892 if (err) {
893 rtnl_unlock();
894 return err;
895 }
896
897 if (props[TIPC_NLA_PROP_TOL])
898 b->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
899 if (props[TIPC_NLA_PROP_PRIO])
900 b->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
901 if (props[TIPC_NLA_PROP_WIN])
902 b->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
903 }
904 rtnl_unlock();
905
906 return 0;
907}
908
909static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
910 struct tipc_media *media)
911{
912 void *hdr;
913 struct nlattr *attrs;
914 struct nlattr *prop;
915
916 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
917 NLM_F_MULTI, TIPC_NL_MEDIA_GET);
918 if (!hdr)
919 return -EMSGSIZE;
920
921 attrs = nla_nest_start(msg->skb, TIPC_NLA_MEDIA);
922 if (!attrs)
923 goto msg_full;
924
925 if (nla_put_string(msg->skb, TIPC_NLA_MEDIA_NAME, media->name))
926 goto attr_msg_full;
927
928 prop = nla_nest_start(msg->skb, TIPC_NLA_MEDIA_PROP);
929 if (!prop)
930 goto prop_msg_full;
931 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, media->priority))
932 goto prop_msg_full;
933 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, media->tolerance))
934 goto prop_msg_full;
935 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, media->window))
936 goto prop_msg_full;
937
938 nla_nest_end(msg->skb, prop);
939 nla_nest_end(msg->skb, attrs);
940 genlmsg_end(msg->skb, hdr);
941
942 return 0;
943
944prop_msg_full:
945 nla_nest_cancel(msg->skb, prop);
946attr_msg_full:
947 nla_nest_cancel(msg->skb, attrs);
948msg_full:
949 genlmsg_cancel(msg->skb, hdr);
950
951 return -EMSGSIZE;
952}
953
954int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
955{
956 int err;
957 int i = cb->args[0];
958 struct tipc_nl_msg msg;
959
960 if (i == MAX_MEDIA)
961 return 0;
962
963 msg.skb = skb;
964 msg.portid = NETLINK_CB(cb->skb).portid;
965 msg.seq = cb->nlh->nlmsg_seq;
966
967 rtnl_lock();
968 for (; media_info_array[i] != NULL; i++) {
969 err = __tipc_nl_add_media(&msg, media_info_array[i]);
970 if (err)
971 break;
972 }
973 rtnl_unlock();
974
975 cb->args[0] = i;
976 return skb->len;
977}
978
979int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
980{
981 int err;
982 char *name;
983 struct tipc_nl_msg msg;
984 struct tipc_media *media;
985 struct sk_buff *rep;
986 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
987
988 if (!info->attrs[TIPC_NLA_MEDIA])
989 return -EINVAL;
990
991 err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
992 info->attrs[TIPC_NLA_MEDIA],
993 tipc_nl_media_policy);
994 if (err)
995 return err;
996
997 if (!attrs[TIPC_NLA_MEDIA_NAME])
998 return -EINVAL;
999 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
1000
1001 rep = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1002 if (!rep)
1003 return -ENOMEM;
1004
1005 msg.skb = rep;
1006 msg.portid = info->snd_portid;
1007 msg.seq = info->snd_seq;
1008
1009 rtnl_lock();
1010 media = tipc_media_find(name);
1011 if (!media) {
1012 err = -EINVAL;
1013 goto err_out;
1014 }
1015
1016 err = __tipc_nl_add_media(&msg, media);
1017 if (err)
1018 goto err_out;
1019 rtnl_unlock();
1020
1021 return genlmsg_reply(rep, info);
1022err_out:
1023 rtnl_unlock();
1024 nlmsg_free(rep);
1025
1026 return err;
1027}
1028
1029int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info)
1030{
1031 int err;
1032 char *name;
1033 struct tipc_media *m;
1034 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
1035
1036 if (!info->attrs[TIPC_NLA_MEDIA])
1037 return -EINVAL;
1038
1039 err = nla_parse_nested(attrs, TIPC_NLA_MEDIA_MAX,
1040 info->attrs[TIPC_NLA_MEDIA],
1041 tipc_nl_media_policy);
1042
1043 if (!attrs[TIPC_NLA_MEDIA_NAME])
1044 return -EINVAL;
1045 name = nla_data(attrs[TIPC_NLA_MEDIA_NAME]);
1046
1047 rtnl_lock();
1048 m = tipc_media_find(name);
1049 if (!m) {
1050 rtnl_unlock();
1051 return -EINVAL;
1052 }
1053
1054 if (attrs[TIPC_NLA_MEDIA_PROP]) {
1055 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1056
1057 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_MEDIA_PROP],
1058 props);
1059 if (err) {
1060 rtnl_unlock();
1061 return err;
1062 }
1063
1064 if (props[TIPC_NLA_PROP_TOL])
1065 m->tolerance = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1066 if (props[TIPC_NLA_PROP_PRIO])
1067 m->priority = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1068 if (props[TIPC_NLA_PROP_WIN])
1069 m->window = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1070 }
1071 rtnl_unlock();
1072
1073 return 0;
1074}
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 78fccc49de23..2c1230ac5dfe 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bearer.h: Include file for TIPC bearer code 2 * net/tipc/bearer.h: Include file for TIPC bearer code
3 * 3 *
4 * Copyright (c) 1996-2006, 2013, Ericsson AB 4 * Copyright (c) 1996-2006, 2013-2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -38,6 +38,8 @@
38#define _TIPC_BEARER_H 38#define _TIPC_BEARER_H
39 39
40#include "bcast.h" 40#include "bcast.h"
41#include "netlink.h"
42#include <net/genetlink.h>
41 43
42#define MAX_BEARERS 2 44#define MAX_BEARERS 2
43#define MAX_MEDIA 2 45#define MAX_MEDIA 2
@@ -163,7 +165,7 @@ extern struct tipc_bearer __rcu *bearer_list[];
163 * TIPC routines available to supported media types 165 * TIPC routines available to supported media types
164 */ 166 */
165 167
166void tipc_rcv(struct sk_buff *buf, struct tipc_bearer *tb_ptr); 168void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr);
167int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority); 169int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
168int tipc_disable_bearer(const char *name); 170int tipc_disable_bearer(const char *name);
169 171
@@ -176,6 +178,16 @@ extern struct tipc_media eth_media_info;
176extern struct tipc_media ib_media_info; 178extern struct tipc_media ib_media_info;
177#endif 179#endif
178 180
181int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info);
182int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info);
183int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb);
184int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info);
185int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info);
186
187int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb);
188int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info);
189int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
190
179int tipc_media_set_priority(const char *name, u32 new_value); 191int tipc_media_set_priority(const char *name, u32 new_value);
180int tipc_media_set_window(const char *name, u32 new_value); 192int tipc_media_set_window(const char *name, u32 new_value);
181void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 193void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
diff --git a/net/tipc/config.c b/net/tipc/config.c
index 2b42403ad33a..876f4c6a2631 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -35,7 +35,7 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "port.h" 38#include "socket.h"
39#include "name_table.h" 39#include "name_table.h"
40#include "config.h" 40#include "config.h"
41#include "server.h" 41#include "server.h"
@@ -266,7 +266,7 @@ struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area
266 rep_tlv_buf = tipc_media_get_names(); 266 rep_tlv_buf = tipc_media_get_names();
267 break; 267 break;
268 case TIPC_CMD_SHOW_PORTS: 268 case TIPC_CMD_SHOW_PORTS:
269 rep_tlv_buf = tipc_port_get_ports(); 269 rep_tlv_buf = tipc_sk_socks_show();
270 break; 270 break;
271 case TIPC_CMD_SHOW_STATS: 271 case TIPC_CMD_SHOW_STATS:
272 rep_tlv_buf = tipc_show_stats(); 272 rep_tlv_buf = tipc_show_stats();
diff --git a/net/tipc/core.c b/net/tipc/core.c
index 676d18015dd8..a5737b8407dd 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -35,11 +35,10 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "ref.h"
39#include "name_table.h" 38#include "name_table.h"
40#include "subscr.h" 39#include "subscr.h"
41#include "config.h" 40#include "config.h"
42#include "port.h" 41#include "socket.h"
43 42
44#include <linux/module.h> 43#include <linux/module.h>
45 44
@@ -85,7 +84,7 @@ static void tipc_core_stop(void)
85 tipc_netlink_stop(); 84 tipc_netlink_stop();
86 tipc_subscr_stop(); 85 tipc_subscr_stop();
87 tipc_nametbl_stop(); 86 tipc_nametbl_stop();
88 tipc_ref_table_stop(); 87 tipc_sk_ref_table_stop();
89 tipc_socket_stop(); 88 tipc_socket_stop();
90 tipc_unregister_sysctl(); 89 tipc_unregister_sysctl();
91} 90}
@@ -99,7 +98,7 @@ static int tipc_core_start(void)
99 98
100 get_random_bytes(&tipc_random, sizeof(tipc_random)); 99 get_random_bytes(&tipc_random, sizeof(tipc_random));
101 100
102 err = tipc_ref_table_init(tipc_max_ports, tipc_random); 101 err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
103 if (err) 102 if (err)
104 goto out_reftbl; 103 goto out_reftbl;
105 104
@@ -139,7 +138,7 @@ out_socket:
139out_netlink: 138out_netlink:
140 tipc_nametbl_stop(); 139 tipc_nametbl_stop();
141out_nametbl: 140out_nametbl:
142 tipc_ref_table_stop(); 141 tipc_sk_ref_table_stop();
143out_reftbl: 142out_reftbl:
144 return err; 143 return err;
145} 144}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index bb26ed1ee966..84602137ce20 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -41,6 +41,7 @@
41 41
42#include <linux/tipc.h> 42#include <linux/tipc.h>
43#include <linux/tipc_config.h> 43#include <linux/tipc_config.h>
44#include <linux/tipc_netlink.h>
44#include <linux/types.h> 45#include <linux/types.h>
45#include <linux/kernel.h> 46#include <linux/kernel.h>
46#include <linux/errno.h> 47#include <linux/errno.h>
@@ -81,6 +82,7 @@ extern u32 tipc_own_addr __read_mostly;
81extern int tipc_max_ports __read_mostly; 82extern int tipc_max_ports __read_mostly;
82extern int tipc_net_id __read_mostly; 83extern int tipc_net_id __read_mostly;
83extern int sysctl_tipc_rmem[3] __read_mostly; 84extern int sysctl_tipc_rmem[3] __read_mostly;
85extern int sysctl_tipc_named_timeout __read_mostly;
84 86
85/* 87/*
86 * Other global variables 88 * Other global variables
@@ -187,8 +189,12 @@ static inline void k_term_timer(struct timer_list *timer)
187 189
188struct tipc_skb_cb { 190struct tipc_skb_cb {
189 void *handle; 191 void *handle;
190 bool deferred;
191 struct sk_buff *tail; 192 struct sk_buff *tail;
193 bool deferred;
194 bool wakeup_pending;
195 bool bundling;
196 u16 chain_sz;
197 u16 chain_imp;
192}; 198};
193 199
194#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0])) 200#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/link.c b/net/tipc/link.c
index fb1485dc6736..23bcc1132365 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -36,11 +36,12 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "link.h" 38#include "link.h"
39#include "port.h" 39#include "bcast.h"
40#include "socket.h" 40#include "socket.h"
41#include "name_distr.h" 41#include "name_distr.h"
42#include "discover.h" 42#include "discover.h"
43#include "config.h" 43#include "config.h"
44#include "netlink.h"
44 45
45#include <linux/pkt_sched.h> 46#include <linux/pkt_sched.h>
46 47
@@ -51,6 +52,30 @@ static const char *link_co_err = "Link changeover error, ";
51static const char *link_rst_msg = "Resetting link "; 52static const char *link_rst_msg = "Resetting link ";
52static const char *link_unk_evt = "Unknown link event "; 53static const char *link_unk_evt = "Unknown link event ";
53 54
55static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
56 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
57 [TIPC_NLA_LINK_NAME] = {
58 .type = NLA_STRING,
59 .len = TIPC_MAX_LINK_NAME
60 },
61 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
62 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
63 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
64 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
65 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
66 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
67 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
68 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
69};
70
71/* Properties valid for media, bearar and link */
72static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
73 [TIPC_NLA_PROP_UNSPEC] = { .type = NLA_UNSPEC },
74 [TIPC_NLA_PROP_PRIO] = { .type = NLA_U32 },
75 [TIPC_NLA_PROP_TOL] = { .type = NLA_U32 },
76 [TIPC_NLA_PROP_WIN] = { .type = NLA_U32 }
77};
78
54/* 79/*
55 * Out-of-range value for link session numbers 80 * Out-of-range value for link session numbers
56 */ 81 */
@@ -124,18 +149,6 @@ static void link_init_max_pkt(struct tipc_link *l_ptr)
124 l_ptr->max_pkt_probes = 0; 149 l_ptr->max_pkt_probes = 0;
125} 150}
126 151
127static u32 link_next_sent(struct tipc_link *l_ptr)
128{
129 if (l_ptr->next_out)
130 return buf_seqno(l_ptr->next_out);
131 return mod(l_ptr->next_out_no);
132}
133
134static u32 link_last_sent(struct tipc_link *l_ptr)
135{
136 return mod(link_next_sent(l_ptr) - 1);
137}
138
139/* 152/*
140 * Simple non-static link routines (i.e. referenced outside this file) 153 * Simple non-static link routines (i.e. referenced outside this file)
141 */ 154 */
@@ -158,14 +171,17 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
158 */ 171 */
159static void link_timeout(struct tipc_link *l_ptr) 172static void link_timeout(struct tipc_link *l_ptr)
160{ 173{
174 struct sk_buff *skb;
175
161 tipc_node_lock(l_ptr->owner); 176 tipc_node_lock(l_ptr->owner);
162 177
163 /* update counters used in statistical profiling of send traffic */ 178 /* update counters used in statistical profiling of send traffic */
164 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size; 179 l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue);
165 l_ptr->stats.queue_sz_counts++; 180 l_ptr->stats.queue_sz_counts++;
166 181
167 if (l_ptr->first_out) { 182 skb = skb_peek(&l_ptr->outqueue);
168 struct tipc_msg *msg = buf_msg(l_ptr->first_out); 183 if (skb) {
184 struct tipc_msg *msg = buf_msg(skb);
169 u32 length = msg_size(msg); 185 u32 length = msg_size(msg);
170 186
171 if ((msg_user(msg) == MSG_FRAGMENTER) && 187 if ((msg_user(msg) == MSG_FRAGMENTER) &&
@@ -193,11 +209,10 @@ static void link_timeout(struct tipc_link *l_ptr)
193 } 209 }
194 210
195 /* do all other link processing performed on a periodic basis */ 211 /* do all other link processing performed on a periodic basis */
196
197 link_state_event(l_ptr, TIMEOUT_EVT); 212 link_state_event(l_ptr, TIMEOUT_EVT);
198 213
199 if (l_ptr->next_out) 214 if (l_ptr->next_out)
200 tipc_link_push_queue(l_ptr); 215 tipc_link_push_packets(l_ptr);
201 216
202 tipc_node_unlock(l_ptr->owner); 217 tipc_node_unlock(l_ptr->owner);
203} 218}
@@ -225,9 +240,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
225 char addr_string[16]; 240 char addr_string[16];
226 u32 peer = n_ptr->addr; 241 u32 peer = n_ptr->addr;
227 242
228 if (n_ptr->link_cnt >= 2) { 243 if (n_ptr->link_cnt >= MAX_BEARERS) {
229 tipc_addr_string_fill(addr_string, n_ptr->addr); 244 tipc_addr_string_fill(addr_string, n_ptr->addr);
230 pr_err("Attempt to establish third link to %s\n", addr_string); 245 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
246 n_ptr->link_cnt, addr_string, MAX_BEARERS);
231 return NULL; 247 return NULL;
232 } 248 }
233 249
@@ -275,7 +291,9 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
275 link_init_max_pkt(l_ptr); 291 link_init_max_pkt(l_ptr);
276 292
277 l_ptr->next_out_no = 1; 293 l_ptr->next_out_no = 1;
278 INIT_LIST_HEAD(&l_ptr->waiting_ports); 294 __skb_queue_head_init(&l_ptr->outqueue);
295 __skb_queue_head_init(&l_ptr->deferred_queue);
296 skb_queue_head_init(&l_ptr->waiting_sks);
279 297
280 link_reset_statistics(l_ptr); 298 link_reset_statistics(l_ptr);
281 299
@@ -322,77 +340,47 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
322} 340}
323 341
324/** 342/**
325 * link_schedule_port - schedule port for deferred sending 343 * link_schedule_user - schedule user for wakeup after congestion
326 * @l_ptr: pointer to link 344 * @link: congested link
327 * @origport: reference to sending port 345 * @oport: sending port
328 * @sz: amount of data to be sent 346 * @chain_sz: size of buffer chain that was attempted sent
329 * 347 * @imp: importance of message attempted sent
330 * Schedules port for renewed sending of messages after link congestion 348 * Create pseudo msg to send back to user when congestion abates
331 * has abated.
332 */ 349 */
333static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz) 350static bool link_schedule_user(struct tipc_link *link, u32 oport,
351 uint chain_sz, uint imp)
334{ 352{
335 struct tipc_port *p_ptr; 353 struct sk_buff *buf;
336 struct tipc_sock *tsk;
337 354
338 spin_lock_bh(&tipc_port_list_lock); 355 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr,
339 p_ptr = tipc_port_lock(origport); 356 tipc_own_addr, oport, 0, 0);
340 if (p_ptr) { 357 if (!buf)
341 if (!list_empty(&p_ptr->wait_list)) 358 return false;
342 goto exit; 359 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
343 tsk = tipc_port_to_sock(p_ptr); 360 TIPC_SKB_CB(buf)->chain_imp = imp;
344 tsk->link_cong = 1; 361 skb_queue_tail(&link->waiting_sks, buf);
345 p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt); 362 link->stats.link_congs++;
346 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports); 363 return true;
347 l_ptr->stats.link_congs++;
348exit:
349 tipc_port_unlock(p_ptr);
350 }
351 spin_unlock_bh(&tipc_port_list_lock);
352 return -ELINKCONG;
353} 364}
354 365
355void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all) 366/**
367 * link_prepare_wakeup - prepare users for wakeup after congestion
368 * @link: congested link
369 * Move a number of waiting users, as permitted by available space in
370 * the send queue, from link wait queue to node wait queue for wakeup
371 */
372static void link_prepare_wakeup(struct tipc_link *link)
356{ 373{
357 struct tipc_port *p_ptr; 374 uint pend_qsz = skb_queue_len(&link->outqueue);
358 struct tipc_sock *tsk; 375 struct sk_buff *skb, *tmp;
359 struct tipc_port *temp_p_ptr;
360 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
361 376
362 if (all) 377 skb_queue_walk_safe(&link->waiting_sks, skb, tmp) {
363 win = 100000; 378 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
364 if (win <= 0)
365 return;
366 if (!spin_trylock_bh(&tipc_port_list_lock))
367 return;
368 if (link_congested(l_ptr))
369 goto exit;
370 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
371 wait_list) {
372 if (win <= 0)
373 break; 379 break;
374 tsk = tipc_port_to_sock(p_ptr); 380 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
375 list_del_init(&p_ptr->wait_list); 381 skb_unlink(skb, &link->waiting_sks);
376 spin_lock_bh(p_ptr->lock); 382 skb_queue_tail(&link->owner->waiting_sks, skb);
377 tsk->link_cong = 0;
378 tipc_sock_wakeup(tsk);
379 win -= p_ptr->waiting_pkts;
380 spin_unlock_bh(p_ptr->lock);
381 } 383 }
382
383exit:
384 spin_unlock_bh(&tipc_port_list_lock);
385}
386
387/**
388 * link_release_outqueue - purge link's outbound message queue
389 * @l_ptr: pointer to link
390 */
391static void link_release_outqueue(struct tipc_link *l_ptr)
392{
393 kfree_skb_list(l_ptr->first_out);
394 l_ptr->first_out = NULL;
395 l_ptr->out_queue_size = 0;
396} 384}
397 385
398/** 386/**
@@ -411,11 +399,9 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
411 */ 399 */
412void tipc_link_purge_queues(struct tipc_link *l_ptr) 400void tipc_link_purge_queues(struct tipc_link *l_ptr)
413{ 401{
414 kfree_skb_list(l_ptr->oldest_deferred_in); 402 __skb_queue_purge(&l_ptr->deferred_queue);
415 kfree_skb_list(l_ptr->first_out); 403 __skb_queue_purge(&l_ptr->outqueue);
416 tipc_link_reset_fragments(l_ptr); 404 tipc_link_reset_fragments(l_ptr);
417 kfree_skb(l_ptr->proto_msg_queue);
418 l_ptr->proto_msg_queue = NULL;
419} 405}
420 406
421void tipc_link_reset(struct tipc_link *l_ptr) 407void tipc_link_reset(struct tipc_link *l_ptr)
@@ -423,6 +409,7 @@ void tipc_link_reset(struct tipc_link *l_ptr)
423 u32 prev_state = l_ptr->state; 409 u32 prev_state = l_ptr->state;
424 u32 checkpoint = l_ptr->next_in_no; 410 u32 checkpoint = l_ptr->next_in_no;
425 int was_active_link = tipc_link_is_active(l_ptr); 411 int was_active_link = tipc_link_is_active(l_ptr);
412 struct tipc_node *owner = l_ptr->owner;
426 413
427 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); 414 msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff));
428 415
@@ -446,24 +433,16 @@ void tipc_link_reset(struct tipc_link *l_ptr)
446 } 433 }
447 434
448 /* Clean up all queues: */ 435 /* Clean up all queues: */
449 link_release_outqueue(l_ptr); 436 __skb_queue_purge(&l_ptr->outqueue);
450 kfree_skb(l_ptr->proto_msg_queue); 437 __skb_queue_purge(&l_ptr->deferred_queue);
451 l_ptr->proto_msg_queue = NULL; 438 if (!skb_queue_empty(&l_ptr->waiting_sks)) {
452 kfree_skb_list(l_ptr->oldest_deferred_in); 439 skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks);
453 if (!list_empty(&l_ptr->waiting_ports)) 440 owner->action_flags |= TIPC_WAKEUP_USERS;
454 tipc_link_wakeup_ports(l_ptr, 1); 441 }
455
456 l_ptr->retransm_queue_head = 0;
457 l_ptr->retransm_queue_size = 0;
458 l_ptr->last_out = NULL;
459 l_ptr->first_out = NULL;
460 l_ptr->next_out = NULL; 442 l_ptr->next_out = NULL;
461 l_ptr->unacked_window = 0; 443 l_ptr->unacked_window = 0;
462 l_ptr->checkpoint = 1; 444 l_ptr->checkpoint = 1;
463 l_ptr->next_out_no = 1; 445 l_ptr->next_out_no = 1;
464 l_ptr->deferred_inqueue_sz = 0;
465 l_ptr->oldest_deferred_in = NULL;
466 l_ptr->newest_deferred_in = NULL;
467 l_ptr->fsm_msg_cnt = 0; 446 l_ptr->fsm_msg_cnt = 0;
468 l_ptr->stale_count = 0; 447 l_ptr->stale_count = 0;
469 link_reset_statistics(l_ptr); 448 link_reset_statistics(l_ptr);
@@ -685,41 +664,46 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
685 * - For all other messages we discard the buffer and return -EHOSTUNREACH 664 * - For all other messages we discard the buffer and return -EHOSTUNREACH
686 * - For TIPC internal messages we also reset the link 665 * - For TIPC internal messages we also reset the link
687 */ 666 */
688static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf) 667static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
689{ 668{
690 struct tipc_msg *msg = buf_msg(buf); 669 struct sk_buff *skb = skb_peek(list);
691 uint psz = msg_size(msg); 670 struct tipc_msg *msg = buf_msg(skb);
692 uint imp = tipc_msg_tot_importance(msg); 671 uint imp = tipc_msg_tot_importance(msg);
693 u32 oport = msg_tot_origport(msg); 672 u32 oport = msg_tot_origport(msg);
694 673
695 if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) { 674 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
696 if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) {
697 link_schedule_port(link, oport, psz);
698 return -ELINKCONG;
699 }
700 } else {
701 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); 675 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
702 tipc_link_reset(link); 676 tipc_link_reset(link);
677 goto drop;
703 } 678 }
704 kfree_skb_list(buf); 679 if (unlikely(msg_errcode(msg)))
680 goto drop;
681 if (unlikely(msg_reroute_cnt(msg)))
682 goto drop;
683 if (TIPC_SKB_CB(skb)->wakeup_pending)
684 return -ELINKCONG;
685 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
686 return -ELINKCONG;
687drop:
688 __skb_queue_purge(list);
705 return -EHOSTUNREACH; 689 return -EHOSTUNREACH;
706} 690}
707 691
708/** 692/**
709 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked 693 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
710 * @link: link to use 694 * @link: link to use
711 * @buf: chain of buffers containing message 695 * @list: chain of buffers containing message
696 *
712 * Consumes the buffer chain, except when returning -ELINKCONG 697 * Consumes the buffer chain, except when returning -ELINKCONG
713 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket 698 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
714 * user data messages) or -EHOSTUNREACH (all other messages/senders) 699 * user data messages) or -EHOSTUNREACH (all other messages/senders)
715 * Only the socket functions tipc_send_stream() and tipc_send_packet() need 700 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
716 * to act on the return value, since they may need to do more send attempts. 701 * to act on the return value, since they may need to do more send attempts.
717 */ 702 */
718int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf) 703int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
719{ 704{
720 struct tipc_msg *msg = buf_msg(buf); 705 struct tipc_msg *msg = buf_msg(skb_peek(list));
721 uint psz = msg_size(msg); 706 uint psz = msg_size(msg);
722 uint qsz = link->out_queue_size;
723 uint sndlim = link->queue_limit[0]; 707 uint sndlim = link->queue_limit[0];
724 uint imp = tipc_msg_tot_importance(msg); 708 uint imp = tipc_msg_tot_importance(msg);
725 uint mtu = link->max_pkt; 709 uint mtu = link->max_pkt;
@@ -727,71 +711,83 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
727 uint seqno = link->next_out_no; 711 uint seqno = link->next_out_no;
728 uint bc_last_in = link->owner->bclink.last_in; 712 uint bc_last_in = link->owner->bclink.last_in;
729 struct tipc_media_addr *addr = &link->media_addr; 713 struct tipc_media_addr *addr = &link->media_addr;
730 struct sk_buff *next = buf->next; 714 struct sk_buff_head *outqueue = &link->outqueue;
715 struct sk_buff *skb, *tmp;
731 716
732 /* Match queue limits against msg importance: */ 717 /* Match queue limits against msg importance: */
733 if (unlikely(qsz >= link->queue_limit[imp])) 718 if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp]))
734 return tipc_link_cong(link, buf); 719 return tipc_link_cong(link, list);
735 720
736 /* Has valid packet limit been used ? */ 721 /* Has valid packet limit been used ? */
737 if (unlikely(psz > mtu)) { 722 if (unlikely(psz > mtu)) {
738 kfree_skb_list(buf); 723 __skb_queue_purge(list);
739 return -EMSGSIZE; 724 return -EMSGSIZE;
740 } 725 }
741 726
742 /* Prepare each packet for sending, and add to outqueue: */ 727 /* Prepare each packet for sending, and add to outqueue: */
743 while (buf) { 728 skb_queue_walk_safe(list, skb, tmp) {
744 next = buf->next; 729 __skb_unlink(skb, list);
745 msg = buf_msg(buf); 730 msg = buf_msg(skb);
746 msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); 731 msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
747 msg_set_bcast_ack(msg, bc_last_in); 732 msg_set_bcast_ack(msg, bc_last_in);
748 733
749 if (!link->first_out) { 734 if (skb_queue_len(outqueue) < sndlim) {
750 link->first_out = buf; 735 __skb_queue_tail(outqueue, skb);
751 } else if (qsz < sndlim) { 736 tipc_bearer_send(link->bearer_id, skb, addr);
752 link->last_out->next = buf; 737 link->next_out = NULL;
753 } else if (tipc_msg_bundle(link->last_out, buf, mtu)) { 738 link->unacked_window = 0;
739 } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
754 link->stats.sent_bundled++; 740 link->stats.sent_bundled++;
755 buf = next;
756 next = buf->next;
757 continue; 741 continue;
758 } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) { 742 } else if (tipc_msg_make_bundle(outqueue, skb, mtu,
743 link->addr)) {
759 link->stats.sent_bundled++; 744 link->stats.sent_bundled++;
760 link->stats.sent_bundles++; 745 link->stats.sent_bundles++;
761 link->last_out->next = buf;
762 if (!link->next_out) 746 if (!link->next_out)
763 link->next_out = buf; 747 link->next_out = skb_peek_tail(outqueue);
764 } else { 748 } else {
765 link->last_out->next = buf; 749 __skb_queue_tail(outqueue, skb);
766 if (!link->next_out) 750 if (!link->next_out)
767 link->next_out = buf; 751 link->next_out = skb;
768 }
769
770 /* Send packet if possible: */
771 if (likely(++qsz <= sndlim)) {
772 tipc_bearer_send(link->bearer_id, buf, addr);
773 link->next_out = next;
774 link->unacked_window = 0;
775 } 752 }
776 seqno++; 753 seqno++;
777 link->last_out = buf;
778 buf = next;
779 } 754 }
780 link->next_out_no = seqno; 755 link->next_out_no = seqno;
781 link->out_queue_size = qsz;
782 return 0; 756 return 0;
783} 757}
784 758
759static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
760{
761 __skb_queue_head_init(list);
762 __skb_queue_tail(list, skb);
763}
764
765static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
766{
767 struct sk_buff_head head;
768
769 skb2list(skb, &head);
770 return __tipc_link_xmit(link, &head);
771}
772
773int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
774{
775 struct sk_buff_head head;
776
777 skb2list(skb, &head);
778 return tipc_link_xmit(&head, dnode, selector);
779}
780
785/** 781/**
786 * tipc_link_xmit() is the general link level function for message sending 782 * tipc_link_xmit() is the general link level function for message sending
787 * @buf: chain of buffers containing message 783 * @list: chain of buffers containing message
788 * @dsz: amount of user data to be sent 784 * @dsz: amount of user data to be sent
789 * @dnode: address of destination node 785 * @dnode: address of destination node
790 * @selector: a number used for deterministic link selection 786 * @selector: a number used for deterministic link selection
791 * Consumes the buffer chain, except when returning -ELINKCONG 787 * Consumes the buffer chain, except when returning -ELINKCONG
792 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 788 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
793 */ 789 */
794int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector) 790int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector)
795{ 791{
796 struct tipc_link *link = NULL; 792 struct tipc_link *link = NULL;
797 struct tipc_node *node; 793 struct tipc_node *node;
@@ -802,17 +798,22 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
802 tipc_node_lock(node); 798 tipc_node_lock(node);
803 link = node->active_links[selector & 1]; 799 link = node->active_links[selector & 1];
804 if (link) 800 if (link)
805 rc = __tipc_link_xmit(link, buf); 801 rc = __tipc_link_xmit(link, list);
806 tipc_node_unlock(node); 802 tipc_node_unlock(node);
807 } 803 }
808 804
809 if (link) 805 if (link)
810 return rc; 806 return rc;
811 807
812 if (likely(in_own_node(dnode))) 808 if (likely(in_own_node(dnode))) {
813 return tipc_sk_rcv(buf); 809 /* As a node local message chain never contains more than one
810 * buffer, we just need to dequeue one SKB buffer from the
811 * head list.
812 */
813 return tipc_sk_rcv(__skb_dequeue(list));
814 }
815 __skb_queue_purge(list);
814 816
815 kfree_skb_list(buf);
816 return rc; 817 return rc;
817} 818}
818 819
@@ -826,17 +827,17 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
826 */ 827 */
827static void tipc_link_sync_xmit(struct tipc_link *link) 828static void tipc_link_sync_xmit(struct tipc_link *link)
828{ 829{
829 struct sk_buff *buf; 830 struct sk_buff *skb;
830 struct tipc_msg *msg; 831 struct tipc_msg *msg;
831 832
832 buf = tipc_buf_acquire(INT_H_SIZE); 833 skb = tipc_buf_acquire(INT_H_SIZE);
833 if (!buf) 834 if (!skb)
834 return; 835 return;
835 836
836 msg = buf_msg(buf); 837 msg = buf_msg(skb);
837 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); 838 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
838 msg_set_last_bcast(msg, link->owner->bclink.acked); 839 msg_set_last_bcast(msg, link->owner->bclink.acked);
839 __tipc_link_xmit(link, buf); 840 __tipc_link_xmit_skb(link, skb);
840} 841}
841 842
842/* 843/*
@@ -856,85 +857,46 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
856 kfree_skb(buf); 857 kfree_skb(buf);
857} 858}
858 859
860struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
861 const struct sk_buff *skb)
862{
863 if (skb_queue_is_last(list, skb))
864 return NULL;
865 return skb->next;
866}
867
859/* 868/*
860 * tipc_link_push_packet: Push one unsent packet to the media 869 * tipc_link_push_packets - push unsent packets to bearer
870 *
871 * Push out the unsent messages of a link where congestion
872 * has abated. Node is locked.
873 *
874 * Called with node locked
861 */ 875 */
862static u32 tipc_link_push_packet(struct tipc_link *l_ptr) 876void tipc_link_push_packets(struct tipc_link *l_ptr)
863{ 877{
864 struct sk_buff *buf = l_ptr->first_out; 878 struct sk_buff_head *outqueue = &l_ptr->outqueue;
865 u32 r_q_size = l_ptr->retransm_queue_size; 879 struct sk_buff *skb = l_ptr->next_out;
866 u32 r_q_head = l_ptr->retransm_queue_head; 880 struct tipc_msg *msg;
867 881 u32 next, first;
868 /* Step to position where retransmission failed, if any, */
869 /* consider that buffers may have been released in meantime */
870 if (r_q_size && buf) {
871 u32 last = lesser(mod(r_q_head + r_q_size),
872 link_last_sent(l_ptr));
873 u32 first = buf_seqno(buf);
874
875 while (buf && less(first, r_q_head)) {
876 first = mod(first + 1);
877 buf = buf->next;
878 }
879 l_ptr->retransm_queue_head = r_q_head = first;
880 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
881 }
882
883 /* Continue retransmission now, if there is anything: */
884 if (r_q_size && buf) {
885 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
886 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
887 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
888 l_ptr->retransm_queue_head = mod(++r_q_head);
889 l_ptr->retransm_queue_size = --r_q_size;
890 l_ptr->stats.retransmitted++;
891 return 0;
892 }
893
894 /* Send deferred protocol message, if any: */
895 buf = l_ptr->proto_msg_queue;
896 if (buf) {
897 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
898 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
899 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
900 l_ptr->unacked_window = 0;
901 kfree_skb(buf);
902 l_ptr->proto_msg_queue = NULL;
903 return 0;
904 }
905 882
906 /* Send one deferred data message, if send window not full: */ 883 skb_queue_walk_from(outqueue, skb) {
907 buf = l_ptr->next_out; 884 msg = buf_msg(skb);
908 if (buf) { 885 next = msg_seqno(msg);
909 struct tipc_msg *msg = buf_msg(buf); 886 first = buf_seqno(skb_peek(outqueue));
910 u32 next = msg_seqno(msg);
911 u32 first = buf_seqno(l_ptr->first_out);
912 887
913 if (mod(next - first) < l_ptr->queue_limit[0]) { 888 if (mod(next - first) < l_ptr->queue_limit[0]) {
914 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 889 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
915 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 890 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
916 tipc_bearer_send(l_ptr->bearer_id, buf,
917 &l_ptr->media_addr);
918 if (msg_user(msg) == MSG_BUNDLER) 891 if (msg_user(msg) == MSG_BUNDLER)
919 msg_set_type(msg, BUNDLE_CLOSED); 892 TIPC_SKB_CB(skb)->bundling = false;
920 l_ptr->next_out = buf->next; 893 tipc_bearer_send(l_ptr->bearer_id, skb,
921 return 0; 894 &l_ptr->media_addr);
895 l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
896 } else {
897 break;
922 } 898 }
923 } 899 }
924 return 1;
925}
926
927/*
928 * push_queue(): push out the unsent messages of a link where
929 * congestion has abated. Node is locked
930 */
931void tipc_link_push_queue(struct tipc_link *l_ptr)
932{
933 u32 res;
934
935 do {
936 res = tipc_link_push_packet(l_ptr);
937 } while (!res);
938} 900}
939 901
940void tipc_link_reset_all(struct tipc_node *node) 902void tipc_link_reset_all(struct tipc_node *node)
@@ -998,20 +960,20 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
998 } 960 }
999} 961}
1000 962
1001void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf, 963void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1002 u32 retransmits) 964 u32 retransmits)
1003{ 965{
1004 struct tipc_msg *msg; 966 struct tipc_msg *msg;
1005 967
1006 if (!buf) 968 if (!skb)
1007 return; 969 return;
1008 970
1009 msg = buf_msg(buf); 971 msg = buf_msg(skb);
1010 972
1011 /* Detect repeated retransmit failures */ 973 /* Detect repeated retransmit failures */
1012 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 974 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1013 if (++l_ptr->stale_count > 100) { 975 if (++l_ptr->stale_count > 100) {
1014 link_retransmit_failure(l_ptr, buf); 976 link_retransmit_failure(l_ptr, skb);
1015 return; 977 return;
1016 } 978 }
1017 } else { 979 } else {
@@ -1019,38 +981,29 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1019 l_ptr->stale_count = 1; 981 l_ptr->stale_count = 1;
1020 } 982 }
1021 983
1022 while (retransmits && (buf != l_ptr->next_out) && buf) { 984 skb_queue_walk_from(&l_ptr->outqueue, skb) {
1023 msg = buf_msg(buf); 985 if (!retransmits || skb == l_ptr->next_out)
986 break;
987 msg = buf_msg(skb);
1024 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 988 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1025 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 989 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1026 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); 990 tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr);
1027 buf = buf->next;
1028 retransmits--; 991 retransmits--;
1029 l_ptr->stats.retransmitted++; 992 l_ptr->stats.retransmitted++;
1030 } 993 }
1031
1032 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1033} 994}
1034 995
1035/** 996static void link_retrieve_defq(struct tipc_link *link,
1036 * link_insert_deferred_queue - insert deferred messages back into receive chain 997 struct sk_buff_head *list)
1037 */
1038static struct sk_buff *link_insert_deferred_queue(struct tipc_link *l_ptr,
1039 struct sk_buff *buf)
1040{ 998{
1041 u32 seq_no; 999 u32 seq_no;
1042 1000
1043 if (l_ptr->oldest_deferred_in == NULL) 1001 if (skb_queue_empty(&link->deferred_queue))
1044 return buf; 1002 return;
1045 1003
1046 seq_no = buf_seqno(l_ptr->oldest_deferred_in); 1004 seq_no = buf_seqno(skb_peek(&link->deferred_queue));
1047 if (seq_no == mod(l_ptr->next_in_no)) { 1005 if (seq_no == mod(link->next_in_no))
1048 l_ptr->newest_deferred_in->next = buf; 1006 skb_queue_splice_tail_init(&link->deferred_queue, list);
1049 buf = l_ptr->oldest_deferred_in;
1050 l_ptr->oldest_deferred_in = NULL;
1051 l_ptr->deferred_inqueue_sz = 0;
1052 }
1053 return buf;
1054} 1007}
1055 1008
1056/** 1009/**
@@ -1110,43 +1063,42 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1110 1063
1111/** 1064/**
1112 * tipc_rcv - process TIPC packets/messages arriving from off-node 1065 * tipc_rcv - process TIPC packets/messages arriving from off-node
1113 * @head: pointer to message buffer chain 1066 * @skb: TIPC packet
1114 * @b_ptr: pointer to bearer message arrived on 1067 * @b_ptr: pointer to bearer message arrived on
1115 * 1068 *
1116 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1069 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1117 * structure (i.e. cannot be NULL), but bearer can be inactive. 1070 * structure (i.e. cannot be NULL), but bearer can be inactive.
1118 */ 1071 */
1119void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1072void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1120{ 1073{
1121 while (head) { 1074 struct sk_buff_head head;
1122 struct tipc_node *n_ptr; 1075 struct tipc_node *n_ptr;
1123 struct tipc_link *l_ptr; 1076 struct tipc_link *l_ptr;
1124 struct sk_buff *crs; 1077 struct sk_buff *skb1, *tmp;
1125 struct sk_buff *buf = head; 1078 struct tipc_msg *msg;
1126 struct tipc_msg *msg; 1079 u32 seq_no;
1127 u32 seq_no; 1080 u32 ackd;
1128 u32 ackd; 1081 u32 released;
1129 u32 released = 0;
1130 1082
1131 head = head->next; 1083 skb2list(skb, &head);
1132 buf->next = NULL;
1133 1084
1085 while ((skb = __skb_dequeue(&head))) {
1134 /* Ensure message is well-formed */ 1086 /* Ensure message is well-formed */
1135 if (unlikely(!link_recv_buf_validate(buf))) 1087 if (unlikely(!link_recv_buf_validate(skb)))
1136 goto discard; 1088 goto discard;
1137 1089
1138 /* Ensure message data is a single contiguous unit */ 1090 /* Ensure message data is a single contiguous unit */
1139 if (unlikely(skb_linearize(buf))) 1091 if (unlikely(skb_linearize(skb)))
1140 goto discard; 1092 goto discard;
1141 1093
1142 /* Handle arrival of a non-unicast link message */ 1094 /* Handle arrival of a non-unicast link message */
1143 msg = buf_msg(buf); 1095 msg = buf_msg(skb);
1144 1096
1145 if (unlikely(msg_non_seq(msg))) { 1097 if (unlikely(msg_non_seq(msg))) {
1146 if (msg_user(msg) == LINK_CONFIG) 1098 if (msg_user(msg) == LINK_CONFIG)
1147 tipc_disc_rcv(buf, b_ptr); 1099 tipc_disc_rcv(skb, b_ptr);
1148 else 1100 else
1149 tipc_bclink_rcv(buf); 1101 tipc_bclink_rcv(skb);
1150 continue; 1102 continue;
1151 } 1103 }
1152 1104
@@ -1185,31 +1137,30 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1185 if (n_ptr->bclink.recv_permitted) 1137 if (n_ptr->bclink.recv_permitted)
1186 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); 1138 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1187 1139
1188 crs = l_ptr->first_out; 1140 released = 0;
1189 while ((crs != l_ptr->next_out) && 1141 skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) {
1190 less_eq(buf_seqno(crs), ackd)) { 1142 if (skb1 == l_ptr->next_out ||
1191 struct sk_buff *next = crs->next; 1143 more(buf_seqno(skb1), ackd))
1192 kfree_skb(crs); 1144 break;
1193 crs = next; 1145 __skb_unlink(skb1, &l_ptr->outqueue);
1194 released++; 1146 kfree_skb(skb1);
1195 } 1147 released = 1;
1196 if (released) {
1197 l_ptr->first_out = crs;
1198 l_ptr->out_queue_size -= released;
1199 } 1148 }
1200 1149
1201 /* Try sending any messages link endpoint has pending */ 1150 /* Try sending any messages link endpoint has pending */
1202 if (unlikely(l_ptr->next_out)) 1151 if (unlikely(l_ptr->next_out))
1203 tipc_link_push_queue(l_ptr); 1152 tipc_link_push_packets(l_ptr);
1204 1153
1205 if (unlikely(!list_empty(&l_ptr->waiting_ports))) 1154 if (released && !skb_queue_empty(&l_ptr->waiting_sks)) {
1206 tipc_link_wakeup_ports(l_ptr, 0); 1155 link_prepare_wakeup(l_ptr);
1156 l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
1157 }
1207 1158
1208 /* Process the incoming packet */ 1159 /* Process the incoming packet */
1209 if (unlikely(!link_working_working(l_ptr))) { 1160 if (unlikely(!link_working_working(l_ptr))) {
1210 if (msg_user(msg) == LINK_PROTOCOL) { 1161 if (msg_user(msg) == LINK_PROTOCOL) {
1211 tipc_link_proto_rcv(l_ptr, buf); 1162 tipc_link_proto_rcv(l_ptr, skb);
1212 head = link_insert_deferred_queue(l_ptr, head); 1163 link_retrieve_defq(l_ptr, &head);
1213 tipc_node_unlock(n_ptr); 1164 tipc_node_unlock(n_ptr);
1214 continue; 1165 continue;
1215 } 1166 }
@@ -1219,8 +1170,7 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1219 1170
1220 if (link_working_working(l_ptr)) { 1171 if (link_working_working(l_ptr)) {
1221 /* Re-insert buffer in front of queue */ 1172 /* Re-insert buffer in front of queue */
1222 buf->next = head; 1173 __skb_queue_head(&head, skb);
1223 head = buf;
1224 tipc_node_unlock(n_ptr); 1174 tipc_node_unlock(n_ptr);
1225 continue; 1175 continue;
1226 } 1176 }
@@ -1229,33 +1179,33 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1229 1179
1230 /* Link is now in state WORKING_WORKING */ 1180 /* Link is now in state WORKING_WORKING */
1231 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1181 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1232 link_handle_out_of_seq_msg(l_ptr, buf); 1182 link_handle_out_of_seq_msg(l_ptr, skb);
1233 head = link_insert_deferred_queue(l_ptr, head); 1183 link_retrieve_defq(l_ptr, &head);
1234 tipc_node_unlock(n_ptr); 1184 tipc_node_unlock(n_ptr);
1235 continue; 1185 continue;
1236 } 1186 }
1237 l_ptr->next_in_no++; 1187 l_ptr->next_in_no++;
1238 if (unlikely(l_ptr->oldest_deferred_in)) 1188 if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
1239 head = link_insert_deferred_queue(l_ptr, head); 1189 link_retrieve_defq(l_ptr, &head);
1240 1190
1241 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { 1191 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1242 l_ptr->stats.sent_acks++; 1192 l_ptr->stats.sent_acks++;
1243 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1193 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1244 } 1194 }
1245 1195
1246 if (tipc_link_prepare_input(l_ptr, &buf)) { 1196 if (tipc_link_prepare_input(l_ptr, &skb)) {
1247 tipc_node_unlock(n_ptr); 1197 tipc_node_unlock(n_ptr);
1248 continue; 1198 continue;
1249 } 1199 }
1250 tipc_node_unlock(n_ptr); 1200 tipc_node_unlock(n_ptr);
1251 msg = buf_msg(buf); 1201
1252 if (tipc_link_input(l_ptr, buf) != 0) 1202 if (tipc_link_input(l_ptr, skb) != 0)
1253 goto discard; 1203 goto discard;
1254 continue; 1204 continue;
1255unlock_discard: 1205unlock_discard:
1256 tipc_node_unlock(n_ptr); 1206 tipc_node_unlock(n_ptr);
1257discard: 1207discard:
1258 kfree_skb(buf); 1208 kfree_skb(skb);
1259 } 1209 }
1260} 1210}
1261 1211
@@ -1338,48 +1288,37 @@ static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
1338 * 1288 *
1339 * Returns increase in queue length (i.e. 0 or 1) 1289 * Returns increase in queue length (i.e. 0 or 1)
1340 */ 1290 */
1341u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 1291u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1342 struct sk_buff *buf)
1343{ 1292{
1344 struct sk_buff *queue_buf; 1293 struct sk_buff *skb1;
1345 struct sk_buff **prev; 1294 u32 seq_no = buf_seqno(skb);
1346 u32 seq_no = buf_seqno(buf);
1347
1348 buf->next = NULL;
1349 1295
1350 /* Empty queue ? */ 1296 /* Empty queue ? */
1351 if (*head == NULL) { 1297 if (skb_queue_empty(list)) {
1352 *head = *tail = buf; 1298 __skb_queue_tail(list, skb);
1353 return 1; 1299 return 1;
1354 } 1300 }
1355 1301
1356 /* Last ? */ 1302 /* Last ? */
1357 if (less(buf_seqno(*tail), seq_no)) { 1303 if (less(buf_seqno(skb_peek_tail(list)), seq_no)) {
1358 (*tail)->next = buf; 1304 __skb_queue_tail(list, skb);
1359 *tail = buf;
1360 return 1; 1305 return 1;
1361 } 1306 }
1362 1307
1363 /* Locate insertion point in queue, then insert; discard if duplicate */ 1308 /* Locate insertion point in queue, then insert; discard if duplicate */
1364 prev = head; 1309 skb_queue_walk(list, skb1) {
1365 queue_buf = *head; 1310 u32 curr_seqno = buf_seqno(skb1);
1366 for (;;) {
1367 u32 curr_seqno = buf_seqno(queue_buf);
1368 1311
1369 if (seq_no == curr_seqno) { 1312 if (seq_no == curr_seqno) {
1370 kfree_skb(buf); 1313 kfree_skb(skb);
1371 return 0; 1314 return 0;
1372 } 1315 }
1373 1316
1374 if (less(seq_no, curr_seqno)) 1317 if (less(seq_no, curr_seqno))
1375 break; 1318 break;
1376
1377 prev = &queue_buf->next;
1378 queue_buf = queue_buf->next;
1379 } 1319 }
1380 1320
1381 buf->next = queue_buf; 1321 __skb_queue_before(list, skb1, skb);
1382 *prev = buf;
1383 return 1; 1322 return 1;
1384} 1323}
1385 1324
@@ -1409,15 +1348,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1409 return; 1348 return;
1410 } 1349 }
1411 1350
1412 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in, 1351 if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) {
1413 &l_ptr->newest_deferred_in, buf)) {
1414 l_ptr->deferred_inqueue_sz++;
1415 l_ptr->stats.deferred_recv++; 1352 l_ptr->stats.deferred_recv++;
1416 TIPC_SKB_CB(buf)->deferred = true; 1353 TIPC_SKB_CB(buf)->deferred = true;
1417 if ((l_ptr->deferred_inqueue_sz % 16) == 1) 1354 if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1)
1418 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1355 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1419 } else 1356 } else {
1420 l_ptr->stats.duplicates++; 1357 l_ptr->stats.duplicates++;
1358 }
1421} 1359}
1422 1360
1423/* 1361/*
@@ -1431,12 +1369,6 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1431 u32 msg_size = sizeof(l_ptr->proto_msg); 1369 u32 msg_size = sizeof(l_ptr->proto_msg);
1432 int r_flag; 1370 int r_flag;
1433 1371
1434 /* Discard any previous message that was deferred due to congestion */
1435 if (l_ptr->proto_msg_queue) {
1436 kfree_skb(l_ptr->proto_msg_queue);
1437 l_ptr->proto_msg_queue = NULL;
1438 }
1439
1440 /* Don't send protocol message during link changeover */ 1372 /* Don't send protocol message during link changeover */
1441 if (l_ptr->exp_msg_count) 1373 if (l_ptr->exp_msg_count)
1442 return; 1374 return;
@@ -1459,8 +1391,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1459 if (l_ptr->next_out) 1391 if (l_ptr->next_out)
1460 next_sent = buf_seqno(l_ptr->next_out); 1392 next_sent = buf_seqno(l_ptr->next_out);
1461 msg_set_next_sent(msg, next_sent); 1393 msg_set_next_sent(msg, next_sent);
1462 if (l_ptr->oldest_deferred_in) { 1394 if (!skb_queue_empty(&l_ptr->deferred_queue)) {
1463 u32 rec = buf_seqno(l_ptr->oldest_deferred_in); 1395 u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue));
1464 gap = mod(rec - mod(l_ptr->next_in_no)); 1396 gap = mod(rec - mod(l_ptr->next_in_no));
1465 } 1397 }
1466 msg_set_seq_gap(msg, gap); 1398 msg_set_seq_gap(msg, gap);
@@ -1648,7 +1580,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1648 } 1580 }
1649 if (msg_seq_gap(msg)) { 1581 if (msg_seq_gap(msg)) {
1650 l_ptr->stats.recv_nacks++; 1582 l_ptr->stats.recv_nacks++;
1651 tipc_link_retransmit(l_ptr, l_ptr->first_out, 1583 tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue),
1652 msg_seq_gap(msg)); 1584 msg_seq_gap(msg));
1653 } 1585 }
1654 break; 1586 break;
@@ -1667,7 +1599,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1667 u32 selector) 1599 u32 selector)
1668{ 1600{
1669 struct tipc_link *tunnel; 1601 struct tipc_link *tunnel;
1670 struct sk_buff *buf; 1602 struct sk_buff *skb;
1671 u32 length = msg_size(msg); 1603 u32 length = msg_size(msg);
1672 1604
1673 tunnel = l_ptr->owner->active_links[selector & 1]; 1605 tunnel = l_ptr->owner->active_links[selector & 1];
@@ -1676,14 +1608,14 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1676 return; 1608 return;
1677 } 1609 }
1678 msg_set_size(tunnel_hdr, length + INT_H_SIZE); 1610 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
1679 buf = tipc_buf_acquire(length + INT_H_SIZE); 1611 skb = tipc_buf_acquire(length + INT_H_SIZE);
1680 if (!buf) { 1612 if (!skb) {
1681 pr_warn("%sunable to send tunnel msg\n", link_co_err); 1613 pr_warn("%sunable to send tunnel msg\n", link_co_err);
1682 return; 1614 return;
1683 } 1615 }
1684 skb_copy_to_linear_data(buf, tunnel_hdr, INT_H_SIZE); 1616 skb_copy_to_linear_data(skb, tunnel_hdr, INT_H_SIZE);
1685 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, msg, length); 1617 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, msg, length);
1686 __tipc_link_xmit(tunnel, buf); 1618 __tipc_link_xmit_skb(tunnel, skb);
1687} 1619}
1688 1620
1689 1621
@@ -1695,10 +1627,10 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr,
1695 */ 1627 */
1696void tipc_link_failover_send_queue(struct tipc_link *l_ptr) 1628void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1697{ 1629{
1698 u32 msgcount = l_ptr->out_queue_size; 1630 u32 msgcount = skb_queue_len(&l_ptr->outqueue);
1699 struct sk_buff *crs = l_ptr->first_out;
1700 struct tipc_link *tunnel = l_ptr->owner->active_links[0]; 1631 struct tipc_link *tunnel = l_ptr->owner->active_links[0];
1701 struct tipc_msg tunnel_hdr; 1632 struct tipc_msg tunnel_hdr;
1633 struct sk_buff *skb;
1702 int split_bundles; 1634 int split_bundles;
1703 1635
1704 if (!tunnel) 1636 if (!tunnel)
@@ -1709,14 +1641,12 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1709 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1641 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1710 msg_set_msgcnt(&tunnel_hdr, msgcount); 1642 msg_set_msgcnt(&tunnel_hdr, msgcount);
1711 1643
1712 if (!l_ptr->first_out) { 1644 if (skb_queue_empty(&l_ptr->outqueue)) {
1713 struct sk_buff *buf; 1645 skb = tipc_buf_acquire(INT_H_SIZE);
1714 1646 if (skb) {
1715 buf = tipc_buf_acquire(INT_H_SIZE); 1647 skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE);
1716 if (buf) {
1717 skb_copy_to_linear_data(buf, &tunnel_hdr, INT_H_SIZE);
1718 msg_set_size(&tunnel_hdr, INT_H_SIZE); 1648 msg_set_size(&tunnel_hdr, INT_H_SIZE);
1719 __tipc_link_xmit(tunnel, buf); 1649 __tipc_link_xmit_skb(tunnel, skb);
1720 } else { 1650 } else {
1721 pr_warn("%sunable to send changeover msg\n", 1651 pr_warn("%sunable to send changeover msg\n",
1722 link_co_err); 1652 link_co_err);
@@ -1727,8 +1657,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1727 split_bundles = (l_ptr->owner->active_links[0] != 1657 split_bundles = (l_ptr->owner->active_links[0] !=
1728 l_ptr->owner->active_links[1]); 1658 l_ptr->owner->active_links[1]);
1729 1659
1730 while (crs) { 1660 skb_queue_walk(&l_ptr->outqueue, skb) {
1731 struct tipc_msg *msg = buf_msg(crs); 1661 struct tipc_msg *msg = buf_msg(skb);
1732 1662
1733 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { 1663 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
1734 struct tipc_msg *m = msg_get_wrapped(msg); 1664 struct tipc_msg *m = msg_get_wrapped(msg);
@@ -1746,7 +1676,6 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1746 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg, 1676 tipc_link_tunnel_xmit(l_ptr, &tunnel_hdr, msg,
1747 msg_link_selector(msg)); 1677 msg_link_selector(msg));
1748 } 1678 }
1749 crs = crs->next;
1750 } 1679 }
1751} 1680}
1752 1681
@@ -1762,17 +1691,16 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1762void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, 1691void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1763 struct tipc_link *tunnel) 1692 struct tipc_link *tunnel)
1764{ 1693{
1765 struct sk_buff *iter; 1694 struct sk_buff *skb;
1766 struct tipc_msg tunnel_hdr; 1695 struct tipc_msg tunnel_hdr;
1767 1696
1768 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 1697 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
1769 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 1698 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1770 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size); 1699 msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
1771 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1700 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1772 iter = l_ptr->first_out; 1701 skb_queue_walk(&l_ptr->outqueue, skb) {
1773 while (iter) { 1702 struct sk_buff *outskb;
1774 struct sk_buff *outbuf; 1703 struct tipc_msg *msg = buf_msg(skb);
1775 struct tipc_msg *msg = buf_msg(iter);
1776 u32 length = msg_size(msg); 1704 u32 length = msg_size(msg);
1777 1705
1778 if (msg_user(msg) == MSG_BUNDLER) 1706 if (msg_user(msg) == MSG_BUNDLER)
@@ -1780,19 +1708,18 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1780 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ 1708 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
1781 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1709 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1782 msg_set_size(&tunnel_hdr, length + INT_H_SIZE); 1710 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
1783 outbuf = tipc_buf_acquire(length + INT_H_SIZE); 1711 outskb = tipc_buf_acquire(length + INT_H_SIZE);
1784 if (outbuf == NULL) { 1712 if (outskb == NULL) {
1785 pr_warn("%sunable to send duplicate msg\n", 1713 pr_warn("%sunable to send duplicate msg\n",
1786 link_co_err); 1714 link_co_err);
1787 return; 1715 return;
1788 } 1716 }
1789 skb_copy_to_linear_data(outbuf, &tunnel_hdr, INT_H_SIZE); 1717 skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE);
1790 skb_copy_to_linear_data_offset(outbuf, INT_H_SIZE, iter->data, 1718 skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data,
1791 length); 1719 length);
1792 __tipc_link_xmit(tunnel, outbuf); 1720 __tipc_link_xmit_skb(tunnel, outskb);
1793 if (!tipc_link_is_up(l_ptr)) 1721 if (!tipc_link_is_up(l_ptr))
1794 return; 1722 return;
1795 iter = iter->next;
1796 } 1723 }
1797} 1724}
1798 1725
@@ -1936,7 +1863,12 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
1936 } 1863 }
1937 omsg = buf_msg(obuf); 1864 omsg = buf_msg(obuf);
1938 pos += align(msg_size(omsg)); 1865 pos += align(msg_size(omsg));
1939 if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) { 1866 if (msg_isdata(omsg)) {
1867 if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
1868 tipc_sk_mcast_rcv(obuf);
1869 else
1870 tipc_sk_rcv(obuf);
1871 } else if (msg_user(omsg) == CONN_MANAGER) {
1940 tipc_sk_rcv(obuf); 1872 tipc_sk_rcv(obuf);
1941 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) { 1873 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
1942 tipc_named_rcv(obuf); 1874 tipc_named_rcv(obuf);
@@ -2382,3 +2314,435 @@ static void link_print(struct tipc_link *l_ptr, const char *str)
2382 else 2314 else
2383 pr_cont("\n"); 2315 pr_cont("\n");
2384} 2316}
2317
2318/* Parse and validate nested (link) properties valid for media, bearer and link
2319 */
2320int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
2321{
2322 int err;
2323
2324 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
2325 tipc_nl_prop_policy);
2326 if (err)
2327 return err;
2328
2329 if (props[TIPC_NLA_PROP_PRIO]) {
2330 u32 prio;
2331
2332 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2333 if (prio > TIPC_MAX_LINK_PRI)
2334 return -EINVAL;
2335 }
2336
2337 if (props[TIPC_NLA_PROP_TOL]) {
2338 u32 tol;
2339
2340 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2341 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
2342 return -EINVAL;
2343 }
2344
2345 if (props[TIPC_NLA_PROP_WIN]) {
2346 u32 win;
2347
2348 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2349 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
2350 return -EINVAL;
2351 }
2352
2353 return 0;
2354}
2355
2356int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2357{
2358 int err;
2359 int res = 0;
2360 int bearer_id;
2361 char *name;
2362 struct tipc_link *link;
2363 struct tipc_node *node;
2364 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2365
2366 if (!info->attrs[TIPC_NLA_LINK])
2367 return -EINVAL;
2368
2369 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2370 info->attrs[TIPC_NLA_LINK],
2371 tipc_nl_link_policy);
2372 if (err)
2373 return err;
2374
2375 if (!attrs[TIPC_NLA_LINK_NAME])
2376 return -EINVAL;
2377
2378 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2379
2380 node = tipc_link_find_owner(name, &bearer_id);
2381 if (!node)
2382 return -EINVAL;
2383
2384 tipc_node_lock(node);
2385
2386 link = node->links[bearer_id];
2387 if (!link) {
2388 res = -EINVAL;
2389 goto out;
2390 }
2391
2392 if (attrs[TIPC_NLA_LINK_PROP]) {
2393 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
2394
2395 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
2396 props);
2397 if (err) {
2398 res = err;
2399 goto out;
2400 }
2401
2402 if (props[TIPC_NLA_PROP_TOL]) {
2403 u32 tol;
2404
2405 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
2406 link_set_supervision_props(link, tol);
2407 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0);
2408 }
2409 if (props[TIPC_NLA_PROP_PRIO]) {
2410 u32 prio;
2411
2412 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
2413 link->priority = prio;
2414 tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0);
2415 }
2416 if (props[TIPC_NLA_PROP_WIN]) {
2417 u32 win;
2418
2419 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
2420 tipc_link_set_queue_limits(link, win);
2421 }
2422 }
2423
2424out:
2425 tipc_node_unlock(node);
2426
2427 return res;
2428}
2429
2430static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
2431{
2432 int i;
2433 struct nlattr *stats;
2434
2435 struct nla_map {
2436 u32 key;
2437 u32 val;
2438 };
2439
2440 struct nla_map map[] = {
2441 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
2442 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
2443 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
2444 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
2445 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
2446 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
2447 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
2448 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
2449 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
2450 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
2451 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
2452 s->msg_length_counts : 1},
2453 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
2454 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
2455 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
2456 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
2457 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
2458 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
2459 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
2460 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
2461 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
2462 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
2463 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
2464 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
2465 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
2466 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
2467 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
2468 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
2469 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
2470 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
2471 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
2472 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
2473 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
2474 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
2475 (s->accu_queue_sz / s->queue_sz_counts) : 0}
2476 };
2477
2478 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2479 if (!stats)
2480 return -EMSGSIZE;
2481
2482 for (i = 0; i < ARRAY_SIZE(map); i++)
2483 if (nla_put_u32(skb, map[i].key, map[i].val))
2484 goto msg_full;
2485
2486 nla_nest_end(skb, stats);
2487
2488 return 0;
2489msg_full:
2490 nla_nest_cancel(skb, stats);
2491
2492 return -EMSGSIZE;
2493}
2494
2495/* Caller should hold appropriate locks to protect the link */
2496static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
2497{
2498 int err;
2499 void *hdr;
2500 struct nlattr *attrs;
2501 struct nlattr *prop;
2502
2503 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
2504 NLM_F_MULTI, TIPC_NL_LINK_GET);
2505 if (!hdr)
2506 return -EMSGSIZE;
2507
2508 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2509 if (!attrs)
2510 goto msg_full;
2511
2512 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2513 goto attr_msg_full;
2514 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2515 tipc_cluster_mask(tipc_own_addr)))
2516 goto attr_msg_full;
2517 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2518 goto attr_msg_full;
2519 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no))
2520 goto attr_msg_full;
2521 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no))
2522 goto attr_msg_full;
2523
2524 if (tipc_link_is_up(link))
2525 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2526 goto attr_msg_full;
2527 if (tipc_link_is_active(link))
2528 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
2529 goto attr_msg_full;
2530
2531 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2532 if (!prop)
2533 goto attr_msg_full;
2534 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2535 goto prop_msg_full;
2536 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2537 goto prop_msg_full;
2538 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2539 link->queue_limit[TIPC_LOW_IMPORTANCE]))
2540 goto prop_msg_full;
2541 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2542 goto prop_msg_full;
2543 nla_nest_end(msg->skb, prop);
2544
2545 err = __tipc_nl_add_stats(msg->skb, &link->stats);
2546 if (err)
2547 goto attr_msg_full;
2548
2549 nla_nest_end(msg->skb, attrs);
2550 genlmsg_end(msg->skb, hdr);
2551
2552 return 0;
2553
2554prop_msg_full:
2555 nla_nest_cancel(msg->skb, prop);
2556attr_msg_full:
2557 nla_nest_cancel(msg->skb, attrs);
2558msg_full:
2559 genlmsg_cancel(msg->skb, hdr);
2560
2561 return -EMSGSIZE;
2562}
2563
2564/* Caller should hold node lock */
2565static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
2566 struct tipc_node *node,
2567 u32 *prev_link)
2568{
2569 u32 i;
2570 int err;
2571
2572 for (i = *prev_link; i < MAX_BEARERS; i++) {
2573 *prev_link = i;
2574
2575 if (!node->links[i])
2576 continue;
2577
2578 err = __tipc_nl_add_link(msg, node->links[i]);
2579 if (err)
2580 return err;
2581 }
2582 *prev_link = 0;
2583
2584 return 0;
2585}
2586
2587int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2588{
2589 struct tipc_node *node;
2590 struct tipc_nl_msg msg;
2591 u32 prev_node = cb->args[0];
2592 u32 prev_link = cb->args[1];
2593 int done = cb->args[2];
2594 int err;
2595
2596 if (done)
2597 return 0;
2598
2599 msg.skb = skb;
2600 msg.portid = NETLINK_CB(cb->skb).portid;
2601 msg.seq = cb->nlh->nlmsg_seq;
2602
2603 rcu_read_lock();
2604
2605 if (prev_node) {
2606 node = tipc_node_find(prev_node);
2607 if (!node) {
2608 /* We never set seq or call nl_dump_check_consistent()
2609 * this means that setting prev_seq here will cause the
2610 * consistence check to fail in the netlink callback
2611 * handler. Resulting in the last NLMSG_DONE message
2612 * having the NLM_F_DUMP_INTR flag set.
2613 */
2614 cb->prev_seq = 1;
2615 goto out;
2616 }
2617
2618 list_for_each_entry_continue_rcu(node, &tipc_node_list, list) {
2619 tipc_node_lock(node);
2620 err = __tipc_nl_add_node_links(&msg, node, &prev_link);
2621 tipc_node_unlock(node);
2622 if (err)
2623 goto out;
2624
2625 prev_node = node->addr;
2626 }
2627 } else {
2628 err = tipc_nl_add_bc_link(&msg);
2629 if (err)
2630 goto out;
2631
2632 list_for_each_entry_rcu(node, &tipc_node_list, list) {
2633 tipc_node_lock(node);
2634 err = __tipc_nl_add_node_links(&msg, node, &prev_link);
2635 tipc_node_unlock(node);
2636 if (err)
2637 goto out;
2638
2639 prev_node = node->addr;
2640 }
2641 }
2642 done = 1;
2643out:
2644 rcu_read_unlock();
2645
2646 cb->args[0] = prev_node;
2647 cb->args[1] = prev_link;
2648 cb->args[2] = done;
2649
2650 return skb->len;
2651}
2652
2653int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2654{
2655 struct sk_buff *ans_skb;
2656 struct tipc_nl_msg msg;
2657 struct tipc_link *link;
2658 struct tipc_node *node;
2659 char *name;
2660 int bearer_id;
2661 int err;
2662
2663 if (!info->attrs[TIPC_NLA_LINK_NAME])
2664 return -EINVAL;
2665
2666 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2667 node = tipc_link_find_owner(name, &bearer_id);
2668 if (!node)
2669 return -EINVAL;
2670
2671 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2672 if (!ans_skb)
2673 return -ENOMEM;
2674
2675 msg.skb = ans_skb;
2676 msg.portid = info->snd_portid;
2677 msg.seq = info->snd_seq;
2678
2679 tipc_node_lock(node);
2680 link = node->links[bearer_id];
2681 if (!link) {
2682 err = -EINVAL;
2683 goto err_out;
2684 }
2685
2686 err = __tipc_nl_add_link(&msg, link);
2687 if (err)
2688 goto err_out;
2689
2690 tipc_node_unlock(node);
2691
2692 return genlmsg_reply(ans_skb, info);
2693
2694err_out:
2695 tipc_node_unlock(node);
2696 nlmsg_free(ans_skb);
2697
2698 return err;
2699}
2700
2701int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2702{
2703 int err;
2704 char *link_name;
2705 unsigned int bearer_id;
2706 struct tipc_link *link;
2707 struct tipc_node *node;
2708 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2709
2710 if (!info->attrs[TIPC_NLA_LINK])
2711 return -EINVAL;
2712
2713 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
2714 info->attrs[TIPC_NLA_LINK],
2715 tipc_nl_link_policy);
2716 if (err)
2717 return err;
2718
2719 if (!attrs[TIPC_NLA_LINK_NAME])
2720 return -EINVAL;
2721
2722 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2723
2724 if (strcmp(link_name, tipc_bclink_name) == 0) {
2725 err = tipc_bclink_reset_stats();
2726 if (err)
2727 return err;
2728 return 0;
2729 }
2730
2731 node = tipc_link_find_owner(link_name, &bearer_id);
2732 if (!node)
2733 return -EINVAL;
2734
2735 tipc_node_lock(node);
2736
2737 link = node->links[bearer_id];
2738 if (!link) {
2739 tipc_node_unlock(node);
2740 return -EINVAL;
2741 }
2742
2743 link_reset_statistics(link);
2744
2745 tipc_node_unlock(node);
2746
2747 return 0;
2748}
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 782983ccd323..55812e87ca1e 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/link.h: Include file for TIPC link code 2 * net/tipc/link.h: Include file for TIPC link code
3 * 3 *
4 * Copyright (c) 1995-2006, 2013, Ericsson AB 4 * Copyright (c) 1995-2006, 2013-2014, Ericsson AB
5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,6 +37,7 @@
37#ifndef _TIPC_LINK_H 37#ifndef _TIPC_LINK_H
38#define _TIPC_LINK_H 38#define _TIPC_LINK_H
39 39
40#include <net/genetlink.h>
40#include "msg.h" 41#include "msg.h"
41#include "node.h" 42#include "node.h"
42 43
@@ -118,22 +119,15 @@ struct tipc_stats {
118 * @max_pkt: current maximum packet size for this link 119 * @max_pkt: current maximum packet size for this link
119 * @max_pkt_target: desired maximum packet size for this link 120 * @max_pkt_target: desired maximum packet size for this link
120 * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target) 121 * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
121 * @out_queue_size: # of messages in outbound message queue 122 * @outqueue: outbound message queue
122 * @first_out: ptr to first outbound message in queue
123 * @last_out: ptr to last outbound message in queue
124 * @next_out_no: next sequence number to use for outbound messages 123 * @next_out_no: next sequence number to use for outbound messages
125 * @last_retransmitted: sequence number of most recently retransmitted message 124 * @last_retransmitted: sequence number of most recently retransmitted message
126 * @stale_count: # of identical retransmit requests made by peer 125 * @stale_count: # of identical retransmit requests made by peer
127 * @next_in_no: next sequence number to expect for inbound messages 126 * @next_in_no: next sequence number to expect for inbound messages
128 * @deferred_inqueue_sz: # of messages in inbound message queue 127 * @deferred_queue: deferred queue saved OOS b'cast message received from node
129 * @oldest_deferred_in: ptr to first inbound message in queue
130 * @newest_deferred_in: ptr to last inbound message in queue
131 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 128 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
132 * @proto_msg_queue: ptr to (single) outbound control message
133 * @retransm_queue_size: number of messages to retransmit
134 * @retransm_queue_head: sequence number of first message to retransmit
135 * @next_out: ptr to first unsent outbound message in queue 129 * @next_out: ptr to first unsent outbound message in queue
136 * @waiting_ports: linked list of ports waiting for link congestion to abate 130 * @waiting_sks: linked list of sockets waiting for link congestion to abate
137 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 131 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
138 * @reasm_buf: head of partially reassembled inbound message fragments 132 * @reasm_buf: head of partially reassembled inbound message fragments
139 * @stats: collects statistics regarding link activity 133 * @stats: collects statistics regarding link activity
@@ -175,26 +169,19 @@ struct tipc_link {
175 u32 max_pkt_probes; 169 u32 max_pkt_probes;
176 170
177 /* Sending */ 171 /* Sending */
178 u32 out_queue_size; 172 struct sk_buff_head outqueue;
179 struct sk_buff *first_out;
180 struct sk_buff *last_out;
181 u32 next_out_no; 173 u32 next_out_no;
182 u32 last_retransmitted; 174 u32 last_retransmitted;
183 u32 stale_count; 175 u32 stale_count;
184 176
185 /* Reception */ 177 /* Reception */
186 u32 next_in_no; 178 u32 next_in_no;
187 u32 deferred_inqueue_sz; 179 struct sk_buff_head deferred_queue;
188 struct sk_buff *oldest_deferred_in;
189 struct sk_buff *newest_deferred_in;
190 u32 unacked_window; 180 u32 unacked_window;
191 181
192 /* Congestion handling */ 182 /* Congestion handling */
193 struct sk_buff *proto_msg_queue;
194 u32 retransm_queue_size;
195 u32 retransm_queue_head;
196 struct sk_buff *next_out; 183 struct sk_buff *next_out;
197 struct list_head waiting_ports; 184 struct sk_buff_head waiting_sks;
198 185
199 /* Fragmentation/reassembly */ 186 /* Fragmentation/reassembly */
200 u32 long_msg_seq_no; 187 u32 long_msg_seq_no;
@@ -226,19 +213,26 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
226void tipc_link_reset_all(struct tipc_node *node); 213void tipc_link_reset_all(struct tipc_node *node);
227void tipc_link_reset(struct tipc_link *l_ptr); 214void tipc_link_reset(struct tipc_link *l_ptr);
228void tipc_link_reset_list(unsigned int bearer_id); 215void tipc_link_reset_list(unsigned int bearer_id);
229int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector); 216int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector);
230int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf); 217int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector);
218int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list);
231u32 tipc_link_get_max_pkt(u32 dest, u32 selector); 219u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
232void tipc_link_bundle_rcv(struct sk_buff *buf); 220void tipc_link_bundle_rcv(struct sk_buff *buf);
233void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 221void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
234 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 222 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
235void tipc_link_push_queue(struct tipc_link *l_ptr); 223void tipc_link_push_packets(struct tipc_link *l_ptr);
236u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail, 224u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *buf);
237 struct sk_buff *buf);
238void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all);
239void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window); 225void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window);
240void tipc_link_retransmit(struct tipc_link *l_ptr, 226void tipc_link_retransmit(struct tipc_link *l_ptr,
241 struct sk_buff *start, u32 retransmits); 227 struct sk_buff *start, u32 retransmits);
228struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list,
229 const struct sk_buff *skb);
230
231int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb);
232int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
233int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
234int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
235int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
242 236
243/* 237/*
244 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 238 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
@@ -253,18 +247,14 @@ static inline u32 mod(u32 x)
253 return x & 0xffffu; 247 return x & 0xffffu;
254} 248}
255 249
256static inline int between(u32 lower, u32 upper, u32 n) 250static inline int less_eq(u32 left, u32 right)
257{ 251{
258 if ((lower < n) && (n < upper)) 252 return mod(right - left) < 32768u;
259 return 1;
260 if ((upper < lower) && ((n > lower) || (n < upper)))
261 return 1;
262 return 0;
263} 253}
264 254
265static inline int less_eq(u32 left, u32 right) 255static inline int more(u32 left, u32 right)
266{ 256{
267 return mod(right - left) < 32768u; 257 return !less_eq(left, right);
268} 258}
269 259
270static inline int less(u32 left, u32 right) 260static inline int less(u32 left, u32 right)
@@ -303,7 +293,7 @@ static inline int link_reset_reset(struct tipc_link *l_ptr)
303 293
304static inline int link_congested(struct tipc_link *l_ptr) 294static inline int link_congested(struct tipc_link *l_ptr)
305{ 295{
306 return l_ptr->out_queue_size >= l_ptr->queue_limit[0]; 296 return skb_queue_len(&l_ptr->outqueue) >= l_ptr->queue_limit[0];
307} 297}
308 298
309#endif 299#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 9680be6d388a..a687b30a699c 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -56,15 +56,42 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
56 msg_set_size(m, hsize); 56 msg_set_size(m, hsize);
57 msg_set_prevnode(m, tipc_own_addr); 57 msg_set_prevnode(m, tipc_own_addr);
58 msg_set_type(m, type); 58 msg_set_type(m, type);
59 msg_set_orignode(m, tipc_own_addr); 59 if (hsize > SHORT_H_SIZE) {
60 msg_set_destnode(m, destnode); 60 msg_set_orignode(m, tipc_own_addr);
61 msg_set_destnode(m, destnode);
62 }
63}
64
65struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
66 uint data_sz, u32 dnode, u32 onode,
67 u32 dport, u32 oport, int errcode)
68{
69 struct tipc_msg *msg;
70 struct sk_buff *buf;
71
72 buf = tipc_buf_acquire(hdr_sz + data_sz);
73 if (unlikely(!buf))
74 return NULL;
75
76 msg = buf_msg(buf);
77 tipc_msg_init(msg, user, type, hdr_sz, dnode);
78 msg_set_size(msg, hdr_sz + data_sz);
79 msg_set_prevnode(msg, onode);
80 msg_set_origport(msg, oport);
81 msg_set_destport(msg, dport);
82 msg_set_errcode(msg, errcode);
83 if (hdr_sz > SHORT_H_SIZE) {
84 msg_set_orignode(msg, onode);
85 msg_set_destnode(msg, dnode);
86 }
87 return buf;
61} 88}
62 89
63/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 90/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
64 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call 91 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
65 * out: set when successful non-complete reassembly, otherwise NULL 92 * out: set when successful non-complete reassembly, otherwise NULL
66 * @*buf: in: the buffer to append. Always defined 93 * @*buf: in: the buffer to append. Always defined
67 * out: head buf after sucessful complete reassembly, otherwise NULL 94 * out: head buf after successful complete reassembly, otherwise NULL
68 * Returns 1 when reassembly complete, otherwise 0 95 * Returns 1 when reassembly complete, otherwise 0
69 */ 96 */
70int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 97int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
@@ -135,15 +162,16 @@ err:
135/** 162/**
136 * tipc_msg_build - create buffer chain containing specified header and data 163 * tipc_msg_build - create buffer chain containing specified header and data
137 * @mhdr: Message header, to be prepended to data 164 * @mhdr: Message header, to be prepended to data
138 * @iov: User data 165 * @m: User message
139 * @offset: Posision in iov to start copying from 166 * @offset: Posision in iov to start copying from
140 * @dsz: Total length of user data 167 * @dsz: Total length of user data
141 * @pktmax: Max packet size that can be used 168 * @pktmax: Max packet size that can be used
142 * @chain: Buffer or chain of buffers to be returned to caller 169 * @list: Buffer or chain of buffers to be returned to caller
170 *
143 * Returns message data size or errno: -ENOMEM, -EFAULT 171 * Returns message data size or errno: -ENOMEM, -EFAULT
144 */ 172 */
145int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, 173int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
146 int offset, int dsz, int pktmax , struct sk_buff **chain) 174 int dsz, int pktmax, struct sk_buff_head *list)
147{ 175{
148 int mhsz = msg_hdr_sz(mhdr); 176 int mhsz = msg_hdr_sz(mhdr);
149 int msz = mhsz + dsz; 177 int msz = mhsz + dsz;
@@ -152,7 +180,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
152 int pktrem = pktmax; 180 int pktrem = pktmax;
153 int drem = dsz; 181 int drem = dsz;
154 struct tipc_msg pkthdr; 182 struct tipc_msg pkthdr;
155 struct sk_buff *buf, *prev; 183 struct sk_buff *skb;
156 char *pktpos; 184 char *pktpos;
157 int rc; 185 int rc;
158 186
@@ -160,13 +188,14 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
160 188
161 /* No fragmentation needed? */ 189 /* No fragmentation needed? */
162 if (likely(msz <= pktmax)) { 190 if (likely(msz <= pktmax)) {
163 buf = tipc_buf_acquire(msz); 191 skb = tipc_buf_acquire(msz);
164 *chain = buf; 192 if (unlikely(!skb))
165 if (unlikely(!buf))
166 return -ENOMEM; 193 return -ENOMEM;
167 skb_copy_to_linear_data(buf, mhdr, mhsz); 194 __skb_queue_tail(list, skb);
168 pktpos = buf->data + mhsz; 195 skb_copy_to_linear_data(skb, mhdr, mhsz);
169 if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz)) 196 pktpos = skb->data + mhsz;
197 if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset,
198 dsz))
170 return dsz; 199 return dsz;
171 rc = -EFAULT; 200 rc = -EFAULT;
172 goto error; 201 goto error;
@@ -179,14 +208,15 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
179 msg_set_fragm_no(&pkthdr, pktno); 208 msg_set_fragm_no(&pkthdr, pktno);
180 209
181 /* Prepare first fragment */ 210 /* Prepare first fragment */
182 *chain = buf = tipc_buf_acquire(pktmax); 211 skb = tipc_buf_acquire(pktmax);
183 if (!buf) 212 if (!skb)
184 return -ENOMEM; 213 return -ENOMEM;
185 pktpos = buf->data; 214 __skb_queue_tail(list, skb);
186 skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); 215 pktpos = skb->data;
216 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
187 pktpos += INT_H_SIZE; 217 pktpos += INT_H_SIZE;
188 pktrem -= INT_H_SIZE; 218 pktrem -= INT_H_SIZE;
189 skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz); 219 skb_copy_to_linear_data_offset(skb, INT_H_SIZE, mhdr, mhsz);
190 pktpos += mhsz; 220 pktpos += mhsz;
191 pktrem -= mhsz; 221 pktrem -= mhsz;
192 222
@@ -194,7 +224,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
194 if (drem < pktrem) 224 if (drem < pktrem)
195 pktrem = drem; 225 pktrem = drem;
196 226
197 if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) { 227 if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) {
198 rc = -EFAULT; 228 rc = -EFAULT;
199 goto error; 229 goto error;
200 } 230 }
@@ -209,42 +239,41 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
209 pktsz = drem + INT_H_SIZE; 239 pktsz = drem + INT_H_SIZE;
210 else 240 else
211 pktsz = pktmax; 241 pktsz = pktmax;
212 prev = buf; 242 skb = tipc_buf_acquire(pktsz);
213 buf = tipc_buf_acquire(pktsz); 243 if (!skb) {
214 if (!buf) {
215 rc = -ENOMEM; 244 rc = -ENOMEM;
216 goto error; 245 goto error;
217 } 246 }
218 prev->next = buf; 247 __skb_queue_tail(list, skb);
219 msg_set_type(&pkthdr, FRAGMENT); 248 msg_set_type(&pkthdr, FRAGMENT);
220 msg_set_size(&pkthdr, pktsz); 249 msg_set_size(&pkthdr, pktsz);
221 msg_set_fragm_no(&pkthdr, ++pktno); 250 msg_set_fragm_no(&pkthdr, ++pktno);
222 skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE); 251 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
223 pktpos = buf->data + INT_H_SIZE; 252 pktpos = skb->data + INT_H_SIZE;
224 pktrem = pktsz - INT_H_SIZE; 253 pktrem = pktsz - INT_H_SIZE;
225 254
226 } while (1); 255 } while (1);
227 256 msg_set_type(buf_msg(skb), LAST_FRAGMENT);
228 msg_set_type(buf_msg(buf), LAST_FRAGMENT);
229 return dsz; 257 return dsz;
230error: 258error:
231 kfree_skb_list(*chain); 259 __skb_queue_purge(list);
232 *chain = NULL; 260 __skb_queue_head_init(list);
233 return rc; 261 return rc;
234} 262}
235 263
236/** 264/**
237 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one 265 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
238 * @bbuf: the existing buffer ("bundle") 266 * @list: the buffer chain of the existing buffer ("bundle")
239 * @buf: buffer to be appended 267 * @skb: buffer to be appended
240 * @mtu: max allowable size for the bundle buffer 268 * @mtu: max allowable size for the bundle buffer
241 * Consumes buffer if successful 269 * Consumes buffer if successful
242 * Returns true if bundling could be performed, otherwise false 270 * Returns true if bundling could be performed, otherwise false
243 */ 271 */
244bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu) 272bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
245{ 273{
246 struct tipc_msg *bmsg = buf_msg(bbuf); 274 struct sk_buff *bskb = skb_peek_tail(list);
247 struct tipc_msg *msg = buf_msg(buf); 275 struct tipc_msg *bmsg = buf_msg(bskb);
276 struct tipc_msg *msg = buf_msg(skb);
248 unsigned int bsz = msg_size(bmsg); 277 unsigned int bsz = msg_size(bmsg);
249 unsigned int msz = msg_size(msg); 278 unsigned int msz = msg_size(msg);
250 u32 start = align(bsz); 279 u32 start = align(bsz);
@@ -259,35 +288,36 @@ bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
259 return false; 288 return false;
260 if (likely(msg_user(bmsg) != MSG_BUNDLER)) 289 if (likely(msg_user(bmsg) != MSG_BUNDLER))
261 return false; 290 return false;
262 if (likely(msg_type(bmsg) != BUNDLE_OPEN)) 291 if (likely(!TIPC_SKB_CB(bskb)->bundling))
263 return false; 292 return false;
264 if (unlikely(skb_tailroom(bbuf) < (pad + msz))) 293 if (unlikely(skb_tailroom(bskb) < (pad + msz)))
265 return false; 294 return false;
266 if (unlikely(max < (start + msz))) 295 if (unlikely(max < (start + msz)))
267 return false; 296 return false;
268 297
269 skb_put(bbuf, pad + msz); 298 skb_put(bskb, pad + msz);
270 skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz); 299 skb_copy_to_linear_data_offset(bskb, start, skb->data, msz);
271 msg_set_size(bmsg, start + msz); 300 msg_set_size(bmsg, start + msz);
272 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 301 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
273 bbuf->next = buf->next; 302 kfree_skb(skb);
274 kfree_skb(buf);
275 return true; 303 return true;
276} 304}
277 305
278/** 306/**
279 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 307 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
280 * @buf: buffer to be appended and replaced 308 * @list: the buffer chain
281 * @mtu: max allowable size for the bundle buffer, inclusive header 309 * @skb: buffer to be appended and replaced
310 * @mtu: max allowable size for the bundle buffer, inclusive header
282 * @dnode: destination node for message. (Not always present in header) 311 * @dnode: destination node for message. (Not always present in header)
283 * Replaces buffer if successful 312 * Replaces buffer if successful
284 * Returns true if sucess, otherwise false 313 * Returns true if success, otherwise false
285 */ 314 */
286bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode) 315bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
316 u32 mtu, u32 dnode)
287{ 317{
288 struct sk_buff *bbuf; 318 struct sk_buff *bskb;
289 struct tipc_msg *bmsg; 319 struct tipc_msg *bmsg;
290 struct tipc_msg *msg = buf_msg(*buf); 320 struct tipc_msg *msg = buf_msg(skb);
291 u32 msz = msg_size(msg); 321 u32 msz = msg_size(msg);
292 u32 max = mtu - INT_H_SIZE; 322 u32 max = mtu - INT_H_SIZE;
293 323
@@ -300,20 +330,19 @@ bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
300 if (msz > (max / 2)) 330 if (msz > (max / 2))
301 return false; 331 return false;
302 332
303 bbuf = tipc_buf_acquire(max); 333 bskb = tipc_buf_acquire(max);
304 if (!bbuf) 334 if (!bskb)
305 return false; 335 return false;
306 336
307 skb_trim(bbuf, INT_H_SIZE); 337 skb_trim(bskb, INT_H_SIZE);
308 bmsg = buf_msg(bbuf); 338 bmsg = buf_msg(bskb);
309 tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode); 339 tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode);
310 msg_set_seqno(bmsg, msg_seqno(msg)); 340 msg_set_seqno(bmsg, msg_seqno(msg));
311 msg_set_ack(bmsg, msg_ack(msg)); 341 msg_set_ack(bmsg, msg_ack(msg));
312 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 342 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
313 bbuf->next = (*buf)->next; 343 TIPC_SKB_CB(bskb)->bundling = true;
314 tipc_msg_bundle(bbuf, *buf, mtu); 344 __skb_queue_tail(list, bskb);
315 *buf = bbuf; 345 return tipc_msg_bundle(list, skb, mtu);
316 return true;
317} 346}
318 347
319/** 348/**
@@ -399,22 +428,23 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
399/* tipc_msg_reassemble() - clone a buffer chain of fragments and 428/* tipc_msg_reassemble() - clone a buffer chain of fragments and
400 * reassemble the clones into one message 429 * reassemble the clones into one message
401 */ 430 */
402struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain) 431struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list)
403{ 432{
404 struct sk_buff *buf = chain; 433 struct sk_buff *skb;
405 struct sk_buff *frag = buf; 434 struct sk_buff *frag = NULL;
406 struct sk_buff *head = NULL; 435 struct sk_buff *head = NULL;
407 int hdr_sz; 436 int hdr_sz;
408 437
409 /* Copy header if single buffer */ 438 /* Copy header if single buffer */
410 if (!buf->next) { 439 if (skb_queue_len(list) == 1) {
411 hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf)); 440 skb = skb_peek(list);
412 return __pskb_copy(buf, hdr_sz, GFP_ATOMIC); 441 hdr_sz = skb_headroom(skb) + msg_hdr_sz(buf_msg(skb));
442 return __pskb_copy(skb, hdr_sz, GFP_ATOMIC);
413 } 443 }
414 444
415 /* Clone all fragments and reassemble */ 445 /* Clone all fragments and reassemble */
416 while (buf) { 446 skb_queue_walk(list, skb) {
417 frag = skb_clone(buf, GFP_ATOMIC); 447 frag = skb_clone(skb, GFP_ATOMIC);
418 if (!frag) 448 if (!frag)
419 goto error; 449 goto error;
420 frag->next = NULL; 450 frag->next = NULL;
@@ -422,7 +452,6 @@ struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
422 break; 452 break;
423 if (!head) 453 if (!head)
424 goto error; 454 goto error;
425 buf = buf->next;
426 } 455 }
427 return frag; 456 return frag;
428error: 457error:
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 462fa194a6af..d5c83d7ecb47 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -442,6 +442,7 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
442#define NAME_DISTRIBUTOR 11 442#define NAME_DISTRIBUTOR 11
443#define MSG_FRAGMENTER 12 443#define MSG_FRAGMENTER 12
444#define LINK_CONFIG 13 444#define LINK_CONFIG 13
445#define SOCK_WAKEUP 14 /* pseudo user */
445 446
446/* 447/*
447 * Connection management protocol message types 448 * Connection management protocol message types
@@ -463,11 +464,6 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
463#define FRAGMENT 1 464#define FRAGMENT 1
464#define LAST_FRAGMENT 2 465#define LAST_FRAGMENT 2
465 466
466/* Bundling protocol message types
467 */
468#define BUNDLE_OPEN 0
469#define BUNDLE_CLOSED 1
470
471/* 467/*
472 * Link management protocol message types 468 * Link management protocol message types
473 */ 469 */
@@ -732,15 +728,20 @@ int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
732void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, 728void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
733 u32 destnode); 729 u32 destnode);
734 730
731struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
732 uint data_sz, u32 dnode, u32 onode,
733 u32 dport, u32 oport, int errcode);
734
735int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); 735int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
736 736
737bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu); 737bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
738 738
739bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode); 739bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
740 u32 mtu, u32 dnode);
740 741
741int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov, 742int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
742 int offset, int dsz, int mtu , struct sk_buff **chain); 743 int dsz, int mtu, struct sk_buff_head *list);
743 744
744struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain); 745struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
745 746
746#endif 747#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index dcc15bcd5692..ba6083dca95b 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/name_distr.c: TIPC name distribution code 2 * net/tipc/name_distr.c: TIPC name distribution code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -38,39 +38,21 @@
38#include "link.h" 38#include "link.h"
39#include "name_distr.h" 39#include "name_distr.h"
40 40
41int sysctl_tipc_named_timeout __read_mostly = 2000;
42
41/** 43/**
42 * struct publ_list - list of publications made by this node 44 * struct tipc_dist_queue - queue holding deferred name table updates
43 * @list: circular list of publications
44 * @list_size: number of entries in list
45 */ 45 */
46struct publ_list { 46static struct list_head tipc_dist_queue = LIST_HEAD_INIT(tipc_dist_queue);
47 struct list_head list; 47
48 u32 size; 48struct distr_queue_item {
49}; 49 struct distr_item i;
50 50 u32 dtype;
51static struct publ_list publ_zone = { 51 u32 node;
52 .list = LIST_HEAD_INIT(publ_zone.list), 52 unsigned long expires;
53 .size = 0, 53 struct list_head next;
54};
55
56static struct publ_list publ_cluster = {
57 .list = LIST_HEAD_INIT(publ_cluster.list),
58 .size = 0,
59};
60
61static struct publ_list publ_node = {
62 .list = LIST_HEAD_INIT(publ_node.list),
63 .size = 0,
64};
65
66static struct publ_list *publ_lists[] = {
67 NULL,
68 &publ_zone, /* publ_lists[TIPC_ZONE_SCOPE] */
69 &publ_cluster, /* publ_lists[TIPC_CLUSTER_SCOPE] */
70 &publ_node /* publ_lists[TIPC_NODE_SCOPE] */
71}; 54};
72 55
73
74/** 56/**
75 * publ_to_item - add publication info to a publication message 57 * publ_to_item - add publication info to a publication message
76 */ 58 */
@@ -99,9 +81,9 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
99 return buf; 81 return buf;
100} 82}
101 83
102void named_cluster_distribute(struct sk_buff *buf) 84void named_cluster_distribute(struct sk_buff *skb)
103{ 85{
104 struct sk_buff *obuf; 86 struct sk_buff *oskb;
105 struct tipc_node *node; 87 struct tipc_node *node;
106 u32 dnode; 88 u32 dnode;
107 89
@@ -112,15 +94,15 @@ void named_cluster_distribute(struct sk_buff *buf)
112 continue; 94 continue;
113 if (!tipc_node_active_links(node)) 95 if (!tipc_node_active_links(node))
114 continue; 96 continue;
115 obuf = skb_copy(buf, GFP_ATOMIC); 97 oskb = skb_copy(skb, GFP_ATOMIC);
116 if (!obuf) 98 if (!oskb)
117 break; 99 break;
118 msg_set_destnode(buf_msg(obuf), dnode); 100 msg_set_destnode(buf_msg(oskb), dnode);
119 tipc_link_xmit(obuf, dnode, dnode); 101 tipc_link_xmit_skb(oskb, dnode, dnode);
120 } 102 }
121 rcu_read_unlock(); 103 rcu_read_unlock();
122 104
123 kfree_skb(buf); 105 kfree_skb(skb);
124} 106}
125 107
126/** 108/**
@@ -131,8 +113,8 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
131 struct sk_buff *buf; 113 struct sk_buff *buf;
132 struct distr_item *item; 114 struct distr_item *item;
133 115
134 list_add_tail(&publ->local_list, &publ_lists[publ->scope]->list); 116 list_add_tail_rcu(&publ->local_list,
135 publ_lists[publ->scope]->size++; 117 &tipc_nametbl->publ_list[publ->scope]);
136 118
137 if (publ->scope == TIPC_NODE_SCOPE) 119 if (publ->scope == TIPC_NODE_SCOPE)
138 return NULL; 120 return NULL;
@@ -157,7 +139,6 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
157 struct distr_item *item; 139 struct distr_item *item;
158 140
159 list_del(&publ->local_list); 141 list_del(&publ->local_list);
160 publ_lists[publ->scope]->size--;
161 142
162 if (publ->scope == TIPC_NODE_SCOPE) 143 if (publ->scope == TIPC_NODE_SCOPE)
163 return NULL; 144 return NULL;
@@ -175,32 +156,28 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
175 156
176/** 157/**
177 * named_distribute - prepare name info for bulk distribution to another node 158 * named_distribute - prepare name info for bulk distribution to another node
178 * @msg_list: list of messages (buffers) to be returned from this function 159 * @list: list of messages (buffers) to be returned from this function
179 * @dnode: node to be updated 160 * @dnode: node to be updated
180 * @pls: linked list of publication items to be packed into buffer chain 161 * @pls: linked list of publication items to be packed into buffer chain
181 */ 162 */
182static void named_distribute(struct list_head *msg_list, u32 dnode, 163static void named_distribute(struct sk_buff_head *list, u32 dnode,
183 struct publ_list *pls) 164 struct list_head *pls)
184{ 165{
185 struct publication *publ; 166 struct publication *publ;
186 struct sk_buff *buf = NULL; 167 struct sk_buff *skb = NULL;
187 struct distr_item *item = NULL; 168 struct distr_item *item = NULL;
188 uint dsz = pls->size * ITEM_SIZE;
189 uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; 169 uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
190 uint rem = dsz; 170 uint msg_rem = msg_dsz;
191 uint msg_rem = 0;
192 171
193 list_for_each_entry(publ, &pls->list, local_list) { 172 list_for_each_entry(publ, pls, local_list) {
194 /* Prepare next buffer: */ 173 /* Prepare next buffer: */
195 if (!buf) { 174 if (!skb) {
196 msg_rem = min_t(uint, rem, msg_dsz); 175 skb = named_prepare_buf(PUBLICATION, msg_rem, dnode);
197 rem -= msg_rem; 176 if (!skb) {
198 buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
199 if (!buf) {
200 pr_warn("Bulk publication failure\n"); 177 pr_warn("Bulk publication failure\n");
201 return; 178 return;
202 } 179 }
203 item = (struct distr_item *)msg_data(buf_msg(buf)); 180 item = (struct distr_item *)msg_data(buf_msg(skb));
204 } 181 }
205 182
206 /* Pack publication into message: */ 183 /* Pack publication into message: */
@@ -210,10 +187,16 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
210 187
211 /* Append full buffer to list: */ 188 /* Append full buffer to list: */
212 if (!msg_rem) { 189 if (!msg_rem) {
213 list_add_tail((struct list_head *)buf, msg_list); 190 __skb_queue_tail(list, skb);
214 buf = NULL; 191 skb = NULL;
192 msg_rem = msg_dsz;
215 } 193 }
216 } 194 }
195 if (skb) {
196 msg_set_size(buf_msg(skb), INT_H_SIZE + (msg_dsz - msg_rem));
197 skb_trim(skb, INT_H_SIZE + (msg_dsz - msg_rem));
198 __skb_queue_tail(list, skb);
199 }
217} 200}
218 201
219/** 202/**
@@ -221,36 +204,68 @@ static void named_distribute(struct list_head *msg_list, u32 dnode,
221 */ 204 */
222void tipc_named_node_up(u32 dnode) 205void tipc_named_node_up(u32 dnode)
223{ 206{
224 LIST_HEAD(msg_list); 207 struct sk_buff_head head;
225 struct sk_buff *buf_chain; 208
226 209 __skb_queue_head_init(&head);
227 read_lock_bh(&tipc_nametbl_lock); 210
228 named_distribute(&msg_list, dnode, &publ_cluster); 211 rcu_read_lock();
229 named_distribute(&msg_list, dnode, &publ_zone); 212 named_distribute(&head, dnode,
230 read_unlock_bh(&tipc_nametbl_lock); 213 &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
231 214 named_distribute(&head, dnode,
232 /* Convert circular list to linear list and send: */ 215 &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
233 buf_chain = (struct sk_buff *)msg_list.next; 216 rcu_read_unlock();
234 ((struct sk_buff *)msg_list.prev)->next = NULL; 217
235 tipc_link_xmit(buf_chain, dnode, dnode); 218 tipc_link_xmit(&head, dnode, dnode);
219}
220
221static void tipc_publ_subscribe(struct publication *publ, u32 addr)
222{
223 struct tipc_node *node;
224
225 if (in_own_node(addr))
226 return;
227
228 node = tipc_node_find(addr);
229 if (!node) {
230 pr_warn("Node subscription rejected, unknown node 0x%x\n",
231 addr);
232 return;
233 }
234
235 tipc_node_lock(node);
236 list_add_tail(&publ->nodesub_list, &node->publ_list);
237 tipc_node_unlock(node);
238}
239
240static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
241{
242 struct tipc_node *node;
243
244 node = tipc_node_find(addr);
245 if (!node)
246 return;
247
248 tipc_node_lock(node);
249 list_del_init(&publ->nodesub_list);
250 tipc_node_unlock(node);
236} 251}
237 252
238/** 253/**
239 * named_purge_publ - remove publication associated with a failed node 254 * tipc_publ_purge - remove publication associated with a failed node
240 * 255 *
241 * Invoked for each publication issued by a newly failed node. 256 * Invoked for each publication issued by a newly failed node.
242 * Removes publication structure from name table & deletes it. 257 * Removes publication structure from name table & deletes it.
243 */ 258 */
244static void named_purge_publ(struct publication *publ) 259static void tipc_publ_purge(struct publication *publ, u32 addr)
245{ 260{
246 struct publication *p; 261 struct publication *p;
247 262
248 write_lock_bh(&tipc_nametbl_lock); 263 spin_lock_bh(&tipc_nametbl_lock);
249 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 264 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
250 publ->node, publ->ref, publ->key); 265 publ->node, publ->ref, publ->key);
251 if (p) 266 if (p)
252 tipc_nodesub_unsubscribe(&p->subscr); 267 tipc_publ_unsubscribe(p, addr);
253 write_unlock_bh(&tipc_nametbl_lock); 268 spin_unlock_bh(&tipc_nametbl_lock);
254 269
255 if (p != publ) { 270 if (p != publ) {
256 pr_err("Unable to remove publication from failed node\n" 271 pr_err("Unable to remove publication from failed node\n"
@@ -259,7 +274,96 @@ static void named_purge_publ(struct publication *publ)
259 publ->key); 274 publ->key);
260 } 275 }
261 276
262 kfree(p); 277 kfree_rcu(p, rcu);
278}
279
280void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
281{
282 struct publication *publ, *tmp;
283
284 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
285 tipc_publ_purge(publ, addr);
286}
287
288/**
289 * tipc_update_nametbl - try to process a nametable update and notify
290 * subscribers
291 *
292 * tipc_nametbl_lock must be held.
293 * Returns the publication item if successful, otherwise NULL.
294 */
295static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype)
296{
297 struct publication *publ = NULL;
298
299 if (dtype == PUBLICATION) {
300 publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower),
301 ntohl(i->upper),
302 TIPC_CLUSTER_SCOPE, node,
303 ntohl(i->ref), ntohl(i->key));
304 if (publ) {
305 tipc_publ_subscribe(publ, node);
306 return true;
307 }
308 } else if (dtype == WITHDRAWAL) {
309 publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower),
310 node, ntohl(i->ref),
311 ntohl(i->key));
312 if (publ) {
313 tipc_publ_unsubscribe(publ, node);
314 kfree_rcu(publ, rcu);
315 return true;
316 }
317 } else {
318 pr_warn("Unrecognized name table message received\n");
319 }
320 return false;
321}
322
323/**
324 * tipc_named_add_backlog - add a failed name table update to the backlog
325 *
326 */
327static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
328{
329 struct distr_queue_item *e;
330 unsigned long now = get_jiffies_64();
331
332 e = kzalloc(sizeof(*e), GFP_ATOMIC);
333 if (!e)
334 return;
335 e->dtype = type;
336 e->node = node;
337 e->expires = now + msecs_to_jiffies(sysctl_tipc_named_timeout);
338 memcpy(e, i, sizeof(*i));
339 list_add_tail(&e->next, &tipc_dist_queue);
340}
341
342/**
343 * tipc_named_process_backlog - try to process any pending name table updates
344 * from the network.
345 */
346void tipc_named_process_backlog(void)
347{
348 struct distr_queue_item *e, *tmp;
349 char addr[16];
350 unsigned long now = get_jiffies_64();
351
352 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
353 if (time_after(e->expires, now)) {
354 if (!tipc_update_nametbl(&e->i, e->node, e->dtype))
355 continue;
356 } else {
357 tipc_addr_string_fill(addr, e->node);
358 pr_warn_ratelimited("Dropping name table update (%d) of {%u, %u, %u} from %s key=%u\n",
359 e->dtype, ntohl(e->i.type),
360 ntohl(e->i.lower),
361 ntohl(e->i.upper),
362 addr, ntohl(e->i.key));
363 }
364 list_del(&e->next);
365 kfree(e);
366 }
263} 367}
264 368
265/** 369/**
@@ -267,51 +371,19 @@ static void named_purge_publ(struct publication *publ)
267 */ 371 */
268void tipc_named_rcv(struct sk_buff *buf) 372void tipc_named_rcv(struct sk_buff *buf)
269{ 373{
270 struct publication *publ;
271 struct tipc_msg *msg = buf_msg(buf); 374 struct tipc_msg *msg = buf_msg(buf);
272 struct distr_item *item = (struct distr_item *)msg_data(msg); 375 struct distr_item *item = (struct distr_item *)msg_data(msg);
273 u32 count = msg_data_sz(msg) / ITEM_SIZE; 376 u32 count = msg_data_sz(msg) / ITEM_SIZE;
377 u32 node = msg_orignode(msg);
274 378
275 write_lock_bh(&tipc_nametbl_lock); 379 spin_lock_bh(&tipc_nametbl_lock);
276 while (count--) { 380 while (count--) {
277 if (msg_type(msg) == PUBLICATION) { 381 if (!tipc_update_nametbl(item, node, msg_type(msg)))
278 publ = tipc_nametbl_insert_publ(ntohl(item->type), 382 tipc_named_add_backlog(item, msg_type(msg), node);
279 ntohl(item->lower),
280 ntohl(item->upper),
281 TIPC_CLUSTER_SCOPE,
282 msg_orignode(msg),
283 ntohl(item->ref),
284 ntohl(item->key));
285 if (publ) {
286 tipc_nodesub_subscribe(&publ->subscr,
287 msg_orignode(msg),
288 publ,
289 (net_ev_handler)
290 named_purge_publ);
291 }
292 } else if (msg_type(msg) == WITHDRAWAL) {
293 publ = tipc_nametbl_remove_publ(ntohl(item->type),
294 ntohl(item->lower),
295 msg_orignode(msg),
296 ntohl(item->ref),
297 ntohl(item->key));
298
299 if (publ) {
300 tipc_nodesub_unsubscribe(&publ->subscr);
301 kfree(publ);
302 } else {
303 pr_err("Unable to remove publication by node 0x%x\n"
304 " (type=%u, lower=%u, ref=%u, key=%u)\n",
305 msg_orignode(msg), ntohl(item->type),
306 ntohl(item->lower), ntohl(item->ref),
307 ntohl(item->key));
308 }
309 } else {
310 pr_warn("Unrecognized name table message received\n");
311 }
312 item++; 383 item++;
313 } 384 }
314 write_unlock_bh(&tipc_nametbl_lock); 385 tipc_named_process_backlog();
386 spin_unlock_bh(&tipc_nametbl_lock);
315 kfree_skb(buf); 387 kfree_skb(buf);
316} 388}
317 389
@@ -327,11 +399,12 @@ void tipc_named_reinit(void)
327 struct publication *publ; 399 struct publication *publ;
328 int scope; 400 int scope;
329 401
330 write_lock_bh(&tipc_nametbl_lock); 402 spin_lock_bh(&tipc_nametbl_lock);
331 403
332 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) 404 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
333 list_for_each_entry(publ, &publ_lists[scope]->list, local_list) 405 list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope],
406 local_list)
334 publ->node = tipc_own_addr; 407 publ->node = tipc_own_addr;
335 408
336 write_unlock_bh(&tipc_nametbl_lock); 409 spin_unlock_bh(&tipc_nametbl_lock);
337} 410}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index 8afe32b7fc9a..cef55cedcfb2 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -73,5 +73,7 @@ void named_cluster_distribute(struct sk_buff *buf);
73void tipc_named_node_up(u32 dnode); 73void tipc_named_node_up(u32 dnode);
74void tipc_named_rcv(struct sk_buff *buf); 74void tipc_named_rcv(struct sk_buff *buf);
75void tipc_named_reinit(void); 75void tipc_named_reinit(void);
76void tipc_named_process_backlog(void);
77void tipc_publ_notify(struct list_head *nsub_list, u32 addr);
76 78
77#endif 79#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 9d7d37d95187..c8df0223371a 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * net/tipc/name_table.c: TIPC name table code 2 * net/tipc/name_table.c: TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -39,10 +39,15 @@
39#include "name_table.h" 39#include "name_table.h"
40#include "name_distr.h" 40#include "name_distr.h"
41#include "subscr.h" 41#include "subscr.h"
42#include "port.h"
43 42
44#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ 43#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
45 44
45static const struct nla_policy
46tipc_nl_name_table_policy[TIPC_NLA_NAME_TABLE_MAX + 1] = {
47 [TIPC_NLA_NAME_TABLE_UNSPEC] = { .type = NLA_UNSPEC },
48 [TIPC_NLA_NAME_TABLE_PUBL] = { .type = NLA_NESTED }
49};
50
46/** 51/**
47 * struct name_info - name sequence publication info 52 * struct name_info - name sequence publication info
48 * @node_list: circular list of publications made by own node 53 * @node_list: circular list of publications made by own node
@@ -87,6 +92,7 @@ struct sub_seq {
87 * @ns_list: links to adjacent name sequences in hash chain 92 * @ns_list: links to adjacent name sequences in hash chain
88 * @subscriptions: list of subscriptions for this 'type' 93 * @subscriptions: list of subscriptions for this 'type'
89 * @lock: spinlock controlling access to publication lists of all sub-sequences 94 * @lock: spinlock controlling access to publication lists of all sub-sequences
95 * @rcu: RCU callback head used for deferred freeing
90 */ 96 */
91struct name_seq { 97struct name_seq {
92 u32 type; 98 u32 type;
@@ -96,21 +102,11 @@ struct name_seq {
96 struct hlist_node ns_list; 102 struct hlist_node ns_list;
97 struct list_head subscriptions; 103 struct list_head subscriptions;
98 spinlock_t lock; 104 spinlock_t lock;
105 struct rcu_head rcu;
99}; 106};
100 107
101/** 108struct name_table *tipc_nametbl;
102 * struct name_table - table containing all existing port name publications 109DEFINE_SPINLOCK(tipc_nametbl_lock);
103 * @types: pointer to fixed-sized array of name sequence lists,
104 * accessed via hashing on 'type'; name sequence lists are *not* sorted
105 * @local_publ_count: number of publications issued by this node
106 */
107struct name_table {
108 struct hlist_head *types;
109 u32 local_publ_count;
110};
111
112static struct name_table table;
113DEFINE_RWLOCK(tipc_nametbl_lock);
114 110
115static int hash(int x) 111static int hash(int x)
116{ 112{
@@ -137,9 +133,7 @@ static struct publication *publ_create(u32 type, u32 lower, u32 upper,
137 publ->node = node; 133 publ->node = node;
138 publ->ref = port_ref; 134 publ->ref = port_ref;
139 publ->key = key; 135 publ->key = key;
140 INIT_LIST_HEAD(&publ->local_list);
141 INIT_LIST_HEAD(&publ->pport_list); 136 INIT_LIST_HEAD(&publ->pport_list);
142 INIT_LIST_HEAD(&publ->subscr.nodesub_list);
143 return publ; 137 return publ;
144} 138}
145 139
@@ -174,22 +168,10 @@ static struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_hea
174 nseq->alloc = 1; 168 nseq->alloc = 1;
175 INIT_HLIST_NODE(&nseq->ns_list); 169 INIT_HLIST_NODE(&nseq->ns_list);
176 INIT_LIST_HEAD(&nseq->subscriptions); 170 INIT_LIST_HEAD(&nseq->subscriptions);
177 hlist_add_head(&nseq->ns_list, seq_head); 171 hlist_add_head_rcu(&nseq->ns_list, seq_head);
178 return nseq; 172 return nseq;
179} 173}
180 174
181/*
182 * nameseq_delete_empty - deletes a name sequence structure if now unused
183 */
184static void nameseq_delete_empty(struct name_seq *seq)
185{
186 if (!seq->first_free && list_empty(&seq->subscriptions)) {
187 hlist_del_init(&seq->ns_list);
188 kfree(seq->sseqs);
189 kfree(seq);
190 }
191}
192
193/** 175/**
194 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance 176 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
195 * 177 *
@@ -262,8 +244,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
262 244
263 /* Lower end overlaps existing entry => need an exact match */ 245 /* Lower end overlaps existing entry => need an exact match */
264 if ((sseq->lower != lower) || (sseq->upper != upper)) { 246 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
266 type, lower, upper);
267 return NULL; 247 return NULL;
268 } 248 }
269 249
@@ -285,8 +265,6 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
285 /* Fail if upper end overlaps into an existing entry */ 265 /* Fail if upper end overlaps into an existing entry */
286 if ((inspos < nseq->first_free) && 266 if ((inspos < nseq->first_free) &&
287 (upper >= nseq->sseqs[inspos].lower)) { 267 (upper >= nseq->sseqs[inspos].lower)) {
288 pr_warn("Cannot publish {%u,%u,%u}, overlap error\n",
289 type, lower, upper);
290 return NULL; 268 return NULL;
291 } 269 }
292 270
@@ -474,8 +452,8 @@ static struct name_seq *nametbl_find_seq(u32 type)
474 struct hlist_head *seq_head; 452 struct hlist_head *seq_head;
475 struct name_seq *ns; 453 struct name_seq *ns;
476 454
477 seq_head = &table.types[hash(type)]; 455 seq_head = &tipc_nametbl->seq_hlist[hash(type)];
478 hlist_for_each_entry(ns, seq_head, ns_list) { 456 hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
479 if (ns->type == type) 457 if (ns->type == type)
480 return ns; 458 return ns;
481 } 459 }
@@ -486,7 +464,9 @@ static struct name_seq *nametbl_find_seq(u32 type)
486struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, 464struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
487 u32 scope, u32 node, u32 port, u32 key) 465 u32 scope, u32 node, u32 port, u32 key)
488{ 466{
467 struct publication *publ;
489 struct name_seq *seq = nametbl_find_seq(type); 468 struct name_seq *seq = nametbl_find_seq(type);
469 int index = hash(type);
490 470
491 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || 471 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
492 (lower > upper)) { 472 (lower > upper)) {
@@ -496,12 +476,16 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
496 } 476 }
497 477
498 if (!seq) 478 if (!seq)
499 seq = tipc_nameseq_create(type, &table.types[hash(type)]); 479 seq = tipc_nameseq_create(type,
480 &tipc_nametbl->seq_hlist[index]);
500 if (!seq) 481 if (!seq)
501 return NULL; 482 return NULL;
502 483
503 return tipc_nameseq_insert_publ(seq, type, lower, upper, 484 spin_lock_bh(&seq->lock);
485 publ = tipc_nameseq_insert_publ(seq, type, lower, upper,
504 scope, node, port, key); 486 scope, node, port, key);
487 spin_unlock_bh(&seq->lock);
488 return publ;
505} 489}
506 490
507struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 491struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
@@ -513,8 +497,16 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
513 if (!seq) 497 if (!seq)
514 return NULL; 498 return NULL;
515 499
500 spin_lock_bh(&seq->lock);
516 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 501 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
517 nameseq_delete_empty(seq); 502 if (!seq->first_free && list_empty(&seq->subscriptions)) {
503 hlist_del_init_rcu(&seq->ns_list);
504 kfree(seq->sseqs);
505 spin_unlock_bh(&seq->lock);
506 kfree_rcu(seq, rcu);
507 return publ;
508 }
509 spin_unlock_bh(&seq->lock);
518 return publ; 510 return publ;
519} 511}
520 512
@@ -543,14 +535,14 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
543 if (!tipc_in_scope(*destnode, tipc_own_addr)) 535 if (!tipc_in_scope(*destnode, tipc_own_addr))
544 return 0; 536 return 0;
545 537
546 read_lock_bh(&tipc_nametbl_lock); 538 rcu_read_lock();
547 seq = nametbl_find_seq(type); 539 seq = nametbl_find_seq(type);
548 if (unlikely(!seq)) 540 if (unlikely(!seq))
549 goto not_found; 541 goto not_found;
542 spin_lock_bh(&seq->lock);
550 sseq = nameseq_find_subseq(seq, instance); 543 sseq = nameseq_find_subseq(seq, instance);
551 if (unlikely(!sseq)) 544 if (unlikely(!sseq))
552 goto not_found; 545 goto no_match;
553 spin_lock_bh(&seq->lock);
554 info = sseq->info; 546 info = sseq->info;
555 547
556 /* Closest-First Algorithm */ 548 /* Closest-First Algorithm */
@@ -600,7 +592,7 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
600no_match: 592no_match:
601 spin_unlock_bh(&seq->lock); 593 spin_unlock_bh(&seq->lock);
602not_found: 594not_found:
603 read_unlock_bh(&tipc_nametbl_lock); 595 rcu_read_unlock();
604 *destnode = node; 596 *destnode = node;
605 return ref; 597 return ref;
606} 598}
@@ -626,13 +618,12 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
626 struct name_info *info; 618 struct name_info *info;
627 int res = 0; 619 int res = 0;
628 620
629 read_lock_bh(&tipc_nametbl_lock); 621 rcu_read_lock();
630 seq = nametbl_find_seq(type); 622 seq = nametbl_find_seq(type);
631 if (!seq) 623 if (!seq)
632 goto exit; 624 goto exit;
633 625
634 spin_lock_bh(&seq->lock); 626 spin_lock_bh(&seq->lock);
635
636 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower); 627 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
637 sseq_stop = seq->sseqs + seq->first_free; 628 sseq_stop = seq->sseqs + seq->first_free;
638 for (; sseq != sseq_stop; sseq++) { 629 for (; sseq != sseq_stop; sseq++) {
@@ -650,10 +641,9 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
650 if (info->cluster_list_size != info->node_list_size) 641 if (info->cluster_list_size != info->node_list_size)
651 res = 1; 642 res = 1;
652 } 643 }
653
654 spin_unlock_bh(&seq->lock); 644 spin_unlock_bh(&seq->lock);
655exit: 645exit:
656 read_unlock_bh(&tipc_nametbl_lock); 646 rcu_read_unlock();
657 return res; 647 return res;
658} 648}
659 649
@@ -666,20 +656,23 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
666 struct publication *publ; 656 struct publication *publ;
667 struct sk_buff *buf = NULL; 657 struct sk_buff *buf = NULL;
668 658
669 if (table.local_publ_count >= TIPC_MAX_PUBLICATIONS) { 659 spin_lock_bh(&tipc_nametbl_lock);
660 if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
670 pr_warn("Publication failed, local publication limit reached (%u)\n", 661 pr_warn("Publication failed, local publication limit reached (%u)\n",
671 TIPC_MAX_PUBLICATIONS); 662 TIPC_MAX_PUBLICATIONS);
663 spin_unlock_bh(&tipc_nametbl_lock);
672 return NULL; 664 return NULL;
673 } 665 }
674 666
675 write_lock_bh(&tipc_nametbl_lock);
676 publ = tipc_nametbl_insert_publ(type, lower, upper, scope, 667 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
677 tipc_own_addr, port_ref, key); 668 tipc_own_addr, port_ref, key);
678 if (likely(publ)) { 669 if (likely(publ)) {
679 table.local_publ_count++; 670 tipc_nametbl->local_publ_count++;
680 buf = tipc_named_publish(publ); 671 buf = tipc_named_publish(publ);
672 /* Any pending external events? */
673 tipc_named_process_backlog();
681 } 674 }
682 write_unlock_bh(&tipc_nametbl_lock); 675 spin_unlock_bh(&tipc_nametbl_lock);
683 676
684 if (buf) 677 if (buf)
685 named_cluster_distribute(buf); 678 named_cluster_distribute(buf);
@@ -692,25 +685,28 @@ struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
692int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 685int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
693{ 686{
694 struct publication *publ; 687 struct publication *publ;
695 struct sk_buff *buf; 688 struct sk_buff *skb = NULL;
696 689
697 write_lock_bh(&tipc_nametbl_lock); 690 spin_lock_bh(&tipc_nametbl_lock);
698 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 691 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
699 if (likely(publ)) { 692 if (likely(publ)) {
700 table.local_publ_count--; 693 tipc_nametbl->local_publ_count--;
701 buf = tipc_named_withdraw(publ); 694 skb = tipc_named_withdraw(publ);
702 write_unlock_bh(&tipc_nametbl_lock); 695 /* Any pending external events? */
696 tipc_named_process_backlog();
703 list_del_init(&publ->pport_list); 697 list_del_init(&publ->pport_list);
704 kfree(publ); 698 kfree_rcu(publ, rcu);
699 } else {
700 pr_err("Unable to remove local publication\n"
701 "(type=%u, lower=%u, ref=%u, key=%u)\n",
702 type, lower, ref, key);
703 }
704 spin_unlock_bh(&tipc_nametbl_lock);
705 705
706 if (buf) 706 if (skb) {
707 named_cluster_distribute(buf); 707 named_cluster_distribute(skb);
708 return 1; 708 return 1;
709 } 709 }
710 write_unlock_bh(&tipc_nametbl_lock);
711 pr_err("Unable to remove local publication\n"
712 "(type=%u, lower=%u, ref=%u, key=%u)\n",
713 type, lower, ref, key);
714 return 0; 710 return 0;
715} 711}
716 712
@@ -720,12 +716,14 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
720void tipc_nametbl_subscribe(struct tipc_subscription *s) 716void tipc_nametbl_subscribe(struct tipc_subscription *s)
721{ 717{
722 u32 type = s->seq.type; 718 u32 type = s->seq.type;
719 int index = hash(type);
723 struct name_seq *seq; 720 struct name_seq *seq;
724 721
725 write_lock_bh(&tipc_nametbl_lock); 722 spin_lock_bh(&tipc_nametbl_lock);
726 seq = nametbl_find_seq(type); 723 seq = nametbl_find_seq(type);
727 if (!seq) 724 if (!seq)
728 seq = tipc_nameseq_create(type, &table.types[hash(type)]); 725 seq = tipc_nameseq_create(type,
726 &tipc_nametbl->seq_hlist[index]);
729 if (seq) { 727 if (seq) {
730 spin_lock_bh(&seq->lock); 728 spin_lock_bh(&seq->lock);
731 tipc_nameseq_subscribe(seq, s); 729 tipc_nameseq_subscribe(seq, s);
@@ -734,7 +732,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
734 pr_warn("Failed to create subscription for {%u,%u,%u}\n", 732 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
735 s->seq.type, s->seq.lower, s->seq.upper); 733 s->seq.type, s->seq.lower, s->seq.upper);
736 } 734 }
737 write_unlock_bh(&tipc_nametbl_lock); 735 spin_unlock_bh(&tipc_nametbl_lock);
738} 736}
739 737
740/** 738/**
@@ -744,18 +742,23 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
744{ 742{
745 struct name_seq *seq; 743 struct name_seq *seq;
746 744
747 write_lock_bh(&tipc_nametbl_lock); 745 spin_lock_bh(&tipc_nametbl_lock);
748 seq = nametbl_find_seq(s->seq.type); 746 seq = nametbl_find_seq(s->seq.type);
749 if (seq != NULL) { 747 if (seq != NULL) {
750 spin_lock_bh(&seq->lock); 748 spin_lock_bh(&seq->lock);
751 list_del_init(&s->nameseq_list); 749 list_del_init(&s->nameseq_list);
752 spin_unlock_bh(&seq->lock); 750 if (!seq->first_free && list_empty(&seq->subscriptions)) {
753 nameseq_delete_empty(seq); 751 hlist_del_init_rcu(&seq->ns_list);
752 kfree(seq->sseqs);
753 spin_unlock_bh(&seq->lock);
754 kfree_rcu(seq, rcu);
755 } else {
756 spin_unlock_bh(&seq->lock);
757 }
754 } 758 }
755 write_unlock_bh(&tipc_nametbl_lock); 759 spin_unlock_bh(&tipc_nametbl_lock);
756} 760}
757 761
758
759/** 762/**
760 * subseq_list - print specified sub-sequence contents into the given buffer 763 * subseq_list - print specified sub-sequence contents into the given buffer
761 */ 764 */
@@ -877,8 +880,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
877 lowbound = 0; 880 lowbound = 0;
878 upbound = ~0; 881 upbound = ~0;
879 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 882 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
880 seq_head = &table.types[i]; 883 seq_head = &tipc_nametbl->seq_hlist[i];
881 hlist_for_each_entry(seq, seq_head, ns_list) { 884 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
882 ret += nameseq_list(seq, buf + ret, len - ret, 885 ret += nameseq_list(seq, buf + ret, len - ret,
883 depth, seq->type, 886 depth, seq->type,
884 lowbound, upbound, i); 887 lowbound, upbound, i);
@@ -893,8 +896,8 @@ static int nametbl_list(char *buf, int len, u32 depth_info,
893 } 896 }
894 ret += nametbl_header(buf + ret, len - ret, depth); 897 ret += nametbl_header(buf + ret, len - ret, depth);
895 i = hash(type); 898 i = hash(type);
896 seq_head = &table.types[i]; 899 seq_head = &tipc_nametbl->seq_hlist[i];
897 hlist_for_each_entry(seq, seq_head, ns_list) { 900 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
898 if (seq->type == type) { 901 if (seq->type == type) {
899 ret += nameseq_list(seq, buf + ret, len - ret, 902 ret += nameseq_list(seq, buf + ret, len - ret,
900 depth, type, 903 depth, type,
@@ -926,11 +929,11 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
926 pb = TLV_DATA(rep_tlv); 929 pb = TLV_DATA(rep_tlv);
927 pb_len = ULTRA_STRING_MAX_LEN; 930 pb_len = ULTRA_STRING_MAX_LEN;
928 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area); 931 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
929 read_lock_bh(&tipc_nametbl_lock); 932 rcu_read_lock();
930 str_len = nametbl_list(pb, pb_len, ntohl(argv->depth), 933 str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
931 ntohl(argv->type), 934 ntohl(argv->type),
932 ntohl(argv->lowbound), ntohl(argv->upbound)); 935 ntohl(argv->lowbound), ntohl(argv->upbound));
933 read_unlock_bh(&tipc_nametbl_lock); 936 rcu_read_unlock();
934 str_len += 1; /* for "\0" */ 937 str_len += 1; /* for "\0" */
935 skb_put(buf, TLV_SPACE(str_len)); 938 skb_put(buf, TLV_SPACE(str_len));
936 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); 939 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
@@ -940,12 +943,18 @@ struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
940 943
941int tipc_nametbl_init(void) 944int tipc_nametbl_init(void)
942{ 945{
943 table.types = kcalloc(TIPC_NAMETBL_SIZE, sizeof(struct hlist_head), 946 int i;
944 GFP_ATOMIC); 947
945 if (!table.types) 948 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
949 if (!tipc_nametbl)
946 return -ENOMEM; 950 return -ENOMEM;
947 951
948 table.local_publ_count = 0; 952 for (i = 0; i < TIPC_NAMETBL_SIZE; i++)
953 INIT_HLIST_HEAD(&tipc_nametbl->seq_hlist[i]);
954
955 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
956 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
957 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
949 return 0; 958 return 0;
950} 959}
951 960
@@ -960,17 +969,19 @@ static void tipc_purge_publications(struct name_seq *seq)
960 struct sub_seq *sseq; 969 struct sub_seq *sseq;
961 struct name_info *info; 970 struct name_info *info;
962 971
963 if (!seq->sseqs) { 972 spin_lock_bh(&seq->lock);
964 nameseq_delete_empty(seq);
965 return;
966 }
967 sseq = seq->sseqs; 973 sseq = seq->sseqs;
968 info = sseq->info; 974 info = sseq->info;
969 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { 975 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
970 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, 976 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
971 publ->ref, publ->key); 977 publ->ref, publ->key);
972 kfree(publ); 978 kfree_rcu(publ, rcu);
973 } 979 }
980 hlist_del_init_rcu(&seq->ns_list);
981 kfree(seq->sseqs);
982 spin_unlock_bh(&seq->lock);
983
984 kfree_rcu(seq, rcu);
974} 985}
975 986
976void tipc_nametbl_stop(void) 987void tipc_nametbl_stop(void)
@@ -978,21 +989,202 @@ void tipc_nametbl_stop(void)
978 u32 i; 989 u32 i;
979 struct name_seq *seq; 990 struct name_seq *seq;
980 struct hlist_head *seq_head; 991 struct hlist_head *seq_head;
981 struct hlist_node *safe;
982 992
983 /* Verify name table is empty and purge any lingering 993 /* Verify name table is empty and purge any lingering
984 * publications, then release the name table 994 * publications, then release the name table
985 */ 995 */
986 write_lock_bh(&tipc_nametbl_lock); 996 spin_lock_bh(&tipc_nametbl_lock);
987 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 997 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
988 if (hlist_empty(&table.types[i])) 998 if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
989 continue; 999 continue;
990 seq_head = &table.types[i]; 1000 seq_head = &tipc_nametbl->seq_hlist[i];
991 hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { 1001 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
992 tipc_purge_publications(seq); 1002 tipc_purge_publications(seq);
993 } 1003 }
994 } 1004 }
995 kfree(table.types); 1005 spin_unlock_bh(&tipc_nametbl_lock);
996 table.types = NULL; 1006
997 write_unlock_bh(&tipc_nametbl_lock); 1007 synchronize_net();
1008 kfree(tipc_nametbl);
1009
1010}
1011
1012static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
1013 struct name_seq *seq,
1014 struct sub_seq *sseq, u32 *last_publ)
1015{
1016 void *hdr;
1017 struct nlattr *attrs;
1018 struct nlattr *publ;
1019 struct publication *p;
1020
1021 if (*last_publ) {
1022 list_for_each_entry(p, &sseq->info->zone_list, zone_list)
1023 if (p->key == *last_publ)
1024 break;
1025 if (p->key != *last_publ)
1026 return -EPIPE;
1027 } else {
1028 p = list_first_entry(&sseq->info->zone_list, struct publication,
1029 zone_list);
1030 }
1031
1032 list_for_each_entry_from(p, &sseq->info->zone_list, zone_list) {
1033 *last_publ = p->key;
1034
1035 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
1036 &tipc_genl_v2_family, NLM_F_MULTI,
1037 TIPC_NL_NAME_TABLE_GET);
1038 if (!hdr)
1039 return -EMSGSIZE;
1040
1041 attrs = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE);
1042 if (!attrs)
1043 goto msg_full;
1044
1045 publ = nla_nest_start(msg->skb, TIPC_NLA_NAME_TABLE_PUBL);
1046 if (!publ)
1047 goto attr_msg_full;
1048
1049 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_TYPE, seq->type))
1050 goto publ_msg_full;
1051 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_LOWER, sseq->lower))
1052 goto publ_msg_full;
1053 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_UPPER, sseq->upper))
1054 goto publ_msg_full;
1055 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_SCOPE, p->scope))
1056 goto publ_msg_full;
1057 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_NODE, p->node))
1058 goto publ_msg_full;
1059 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_REF, p->ref))
1060 goto publ_msg_full;
1061 if (nla_put_u32(msg->skb, TIPC_NLA_PUBL_KEY, p->key))
1062 goto publ_msg_full;
1063
1064 nla_nest_end(msg->skb, publ);
1065 nla_nest_end(msg->skb, attrs);
1066 genlmsg_end(msg->skb, hdr);
1067 }
1068 *last_publ = 0;
1069
1070 return 0;
1071
1072publ_msg_full:
1073 nla_nest_cancel(msg->skb, publ);
1074attr_msg_full:
1075 nla_nest_cancel(msg->skb, attrs);
1076msg_full:
1077 genlmsg_cancel(msg->skb, hdr);
1078
1079 return -EMSGSIZE;
1080}
1081
1082static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
1083 u32 *last_lower, u32 *last_publ)
1084{
1085 struct sub_seq *sseq;
1086 struct sub_seq *sseq_start;
1087 int err;
1088
1089 if (*last_lower) {
1090 sseq_start = nameseq_find_subseq(seq, *last_lower);
1091 if (!sseq_start)
1092 return -EPIPE;
1093 } else {
1094 sseq_start = seq->sseqs;
1095 }
1096
1097 for (sseq = sseq_start; sseq != &seq->sseqs[seq->first_free]; sseq++) {
1098 err = __tipc_nl_add_nametable_publ(msg, seq, sseq, last_publ);
1099 if (err) {
1100 *last_lower = sseq->lower;
1101 return err;
1102 }
1103 }
1104 *last_lower = 0;
1105
1106 return 0;
1107}
1108
1109static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
1110 u32 *last_lower, u32 *last_publ)
1111{
1112 struct hlist_head *seq_head;
1113 struct name_seq *seq = NULL;
1114 int err;
1115 int i;
1116
1117 if (*last_type)
1118 i = hash(*last_type);
1119 else
1120 i = 0;
1121
1122 for (; i < TIPC_NAMETBL_SIZE; i++) {
1123 seq_head = &tipc_nametbl->seq_hlist[i];
1124
1125 if (*last_type) {
1126 seq = nametbl_find_seq(*last_type);
1127 if (!seq)
1128 return -EPIPE;
1129 } else {
1130 hlist_for_each_entry_rcu(seq, seq_head, ns_list)
1131 break;
1132 if (!seq)
1133 continue;
1134 }
1135
1136 hlist_for_each_entry_from_rcu(seq, ns_list) {
1137 spin_lock_bh(&seq->lock);
1138 err = __tipc_nl_subseq_list(msg, seq, last_lower,
1139 last_publ);
1140
1141 if (err) {
1142 *last_type = seq->type;
1143 spin_unlock_bh(&seq->lock);
1144 return err;
1145 }
1146 spin_unlock_bh(&seq->lock);
1147 }
1148 *last_type = 0;
1149 }
1150 return 0;
1151}
1152
1153int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
1154{
1155 int err;
1156 int done = cb->args[3];
1157 u32 last_type = cb->args[0];
1158 u32 last_lower = cb->args[1];
1159 u32 last_publ = cb->args[2];
1160 struct tipc_nl_msg msg;
1161
1162 if (done)
1163 return 0;
1164
1165 msg.skb = skb;
1166 msg.portid = NETLINK_CB(cb->skb).portid;
1167 msg.seq = cb->nlh->nlmsg_seq;
1168
1169 rcu_read_lock();
1170 err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ);
1171 if (!err) {
1172 done = 1;
1173 } else if (err != -EMSGSIZE) {
1174 /* We never set seq or call nl_dump_check_consistent() this
1175 * means that setting prev_seq here will cause the consistence
1176 * check to fail in the netlink callback handler. Resulting in
1177 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
1178 * we got an error.
1179 */
1180 cb->prev_seq = 1;
1181 }
1182 rcu_read_unlock();
1183
1184 cb->args[0] = last_type;
1185 cb->args[1] = last_lower;
1186 cb->args[2] = last_publ;
1187 cb->args[3] = done;
1188
1189 return skb->len;
998} 1190}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index f02f48b9a216..5f0dee92010d 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/name_table.h: Include file for TIPC name table code 2 * net/tipc/name_table.h: Include file for TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,15 +37,15 @@
37#ifndef _TIPC_NAME_TABLE_H 37#ifndef _TIPC_NAME_TABLE_H
38#define _TIPC_NAME_TABLE_H 38#define _TIPC_NAME_TABLE_H
39 39
40#include "node_subscr.h"
41
42struct tipc_subscription; 40struct tipc_subscription;
43struct tipc_port_list; 41struct tipc_port_list;
44 42
45/* 43/*
46 * TIPC name types reserved for internal TIPC use (both current and planned) 44 * TIPC name types reserved for internal TIPC use (both current and planned)
47 */ 45 */
48#define TIPC_ZM_SRV 3 /* zone master service name type */ 46#define TIPC_ZM_SRV 3 /* zone master service name type */
47#define TIPC_PUBL_SCOPE_NUM (TIPC_NODE_SCOPE + 1)
48#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
49 49
50/** 50/**
51 * struct publication - info about a published (name or) name sequence 51 * struct publication - info about a published (name or) name sequence
@@ -56,12 +56,13 @@ struct tipc_port_list;
56 * @node: network address of publishing port's node 56 * @node: network address of publishing port's node
57 * @ref: publishing port 57 * @ref: publishing port
58 * @key: publication key 58 * @key: publication key
59 * @subscr: subscription to "node down" event (for off-node publications only) 59 * @nodesub_list: subscription to "node down" event (off-node publication only)
60 * @local_list: adjacent entries in list of publications made by this node 60 * @local_list: adjacent entries in list of publications made by this node
61 * @pport_list: adjacent entries in list of publications made by this port 61 * @pport_list: adjacent entries in list of publications made by this port
62 * @node_list: adjacent matching name seq publications with >= node scope 62 * @node_list: adjacent matching name seq publications with >= node scope
63 * @cluster_list: adjacent matching name seq publications with >= cluster scope 63 * @cluster_list: adjacent matching name seq publications with >= cluster scope
64 * @zone_list: adjacent matching name seq publications with >= zone scope 64 * @zone_list: adjacent matching name seq publications with >= zone scope
65 * @rcu: RCU callback head used for deferred freeing
65 * 66 *
66 * Note that the node list, cluster list, and zone list are circular lists. 67 * Note that the node list, cluster list, and zone list are circular lists.
67 */ 68 */
@@ -73,16 +74,31 @@ struct publication {
73 u32 node; 74 u32 node;
74 u32 ref; 75 u32 ref;
75 u32 key; 76 u32 key;
76 struct tipc_node_subscr subscr; 77 struct list_head nodesub_list;
77 struct list_head local_list; 78 struct list_head local_list;
78 struct list_head pport_list; 79 struct list_head pport_list;
79 struct list_head node_list; 80 struct list_head node_list;
80 struct list_head cluster_list; 81 struct list_head cluster_list;
81 struct list_head zone_list; 82 struct list_head zone_list;
83 struct rcu_head rcu;
84};
85
86/**
87 * struct name_table - table containing all existing port name publications
88 * @seq_hlist: name sequence hash lists
89 * @publ_list: pulication lists
90 * @local_publ_count: number of publications issued by this node
91 */
92struct name_table {
93 struct hlist_head seq_hlist[TIPC_NAMETBL_SIZE];
94 struct list_head publ_list[TIPC_PUBL_SCOPE_NUM];
95 u32 local_publ_count;
82}; 96};
83 97
98extern spinlock_t tipc_nametbl_lock;
99extern struct name_table *tipc_nametbl;
84 100
85extern rwlock_t tipc_nametbl_lock; 101int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
86 102
87struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); 103struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
88u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 104u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 7fcc94998fea..cf13df3cde8f 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -38,11 +38,15 @@
38#include "net.h" 38#include "net.h"
39#include "name_distr.h" 39#include "name_distr.h"
40#include "subscr.h" 40#include "subscr.h"
41#include "port.h"
42#include "socket.h" 41#include "socket.h"
43#include "node.h" 42#include "node.h"
44#include "config.h" 43#include "config.h"
45 44
45static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
46 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
47 [TIPC_NLA_NET_ID] = { .type = NLA_U32 }
48};
49
46/* 50/*
47 * The TIPC locking policy is designed to ensure a very fine locking 51 * The TIPC locking policy is designed to ensure a very fine locking
48 * granularity, permitting complete parallel access to individual 52 * granularity, permitting complete parallel access to individual
@@ -111,7 +115,7 @@ int tipc_net_start(u32 addr)
111 115
112 tipc_own_addr = addr; 116 tipc_own_addr = addr;
113 tipc_named_reinit(); 117 tipc_named_reinit();
114 tipc_port_reinit(); 118 tipc_sk_reinit();
115 res = tipc_bclink_init(); 119 res = tipc_bclink_init();
116 if (res) 120 if (res)
117 return res; 121 return res;
@@ -139,3 +143,104 @@ void tipc_net_stop(void)
139 143
140 pr_info("Left network mode\n"); 144 pr_info("Left network mode\n");
141} 145}
146
147static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
148{
149 void *hdr;
150 struct nlattr *attrs;
151
152 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
153 NLM_F_MULTI, TIPC_NL_NET_GET);
154 if (!hdr)
155 return -EMSGSIZE;
156
157 attrs = nla_nest_start(msg->skb, TIPC_NLA_NET);
158 if (!attrs)
159 goto msg_full;
160
161 if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id))
162 goto attr_msg_full;
163
164 nla_nest_end(msg->skb, attrs);
165 genlmsg_end(msg->skb, hdr);
166
167 return 0;
168
169attr_msg_full:
170 nla_nest_cancel(msg->skb, attrs);
171msg_full:
172 genlmsg_cancel(msg->skb, hdr);
173
174 return -EMSGSIZE;
175}
176
177int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
178{
179 int err;
180 int done = cb->args[0];
181 struct tipc_nl_msg msg;
182
183 if (done)
184 return 0;
185
186 msg.skb = skb;
187 msg.portid = NETLINK_CB(cb->skb).portid;
188 msg.seq = cb->nlh->nlmsg_seq;
189
190 err = __tipc_nl_add_net(&msg);
191 if (err)
192 goto out;
193
194 done = 1;
195out:
196 cb->args[0] = done;
197
198 return skb->len;
199}
200
201int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
202{
203 int err;
204 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
205
206 if (!info->attrs[TIPC_NLA_NET])
207 return -EINVAL;
208
209 err = nla_parse_nested(attrs, TIPC_NLA_NET_MAX,
210 info->attrs[TIPC_NLA_NET],
211 tipc_nl_net_policy);
212 if (err)
213 return err;
214
215 if (attrs[TIPC_NLA_NET_ID]) {
216 u32 val;
217
218 /* Can't change net id once TIPC has joined a network */
219 if (tipc_own_addr)
220 return -EPERM;
221
222 val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
223 if (val < 1 || val > 9999)
224 return -EINVAL;
225
226 tipc_net_id = val;
227 }
228
229 if (attrs[TIPC_NLA_NET_ADDR]) {
230 u32 addr;
231
232 /* Can't change net addr once TIPC has joined a network */
233 if (tipc_own_addr)
234 return -EPERM;
235
236 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
237 if (!tipc_addr_node_valid(addr))
238 return -EINVAL;
239
240 rtnl_lock();
241 tipc_net_start(addr);
242 rtnl_unlock();
243 }
244
245 return 0;
246}
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 59ef3388be2c..a81c1b9eb150 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/net.h: Include file for TIPC network routing code 2 * net/tipc/net.h: Include file for TIPC network routing code
3 * 3 *
4 * Copyright (c) 1995-2006, Ericsson AB 4 * Copyright (c) 1995-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,7 +37,13 @@
37#ifndef _TIPC_NET_H 37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H 38#define _TIPC_NET_H
39 39
40#include <net/genetlink.h>
41
40int tipc_net_start(u32 addr); 42int tipc_net_start(u32 addr);
43
41void tipc_net_stop(void); 44void tipc_net_stop(void);
42 45
46int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
47int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
48
43#endif 49#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index ad844d365340..b891e3905bc4 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/netlink.c: TIPC configuration handling 2 * net/tipc/netlink.c: TIPC configuration handling
3 * 3 *
4 * Copyright (c) 2005-2006, Ericsson AB 4 * Copyright (c) 2005-2006, 2014, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems 5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -36,6 +36,12 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "config.h"
39#include "socket.h"
40#include "name_table.h"
41#include "bearer.h"
42#include "link.h"
43#include "node.h"
44#include "net.h"
39#include <net/genetlink.h> 45#include <net/genetlink.h>
40 46
41static int handle_cmd(struct sk_buff *skb, struct genl_info *info) 47static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
@@ -68,6 +74,19 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
68 return 0; 74 return 0;
69} 75}
70 76
77static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
78 [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, },
79 [TIPC_NLA_BEARER] = { .type = NLA_NESTED, },
80 [TIPC_NLA_SOCK] = { .type = NLA_NESTED, },
81 [TIPC_NLA_PUBL] = { .type = NLA_NESTED, },
82 [TIPC_NLA_LINK] = { .type = NLA_NESTED, },
83 [TIPC_NLA_MEDIA] = { .type = NLA_NESTED, },
84 [TIPC_NLA_NODE] = { .type = NLA_NESTED, },
85 [TIPC_NLA_NET] = { .type = NLA_NESTED, },
86 [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, }
87};
88
89/* Legacy ASCII API */
71static struct genl_family tipc_genl_family = { 90static struct genl_family tipc_genl_family = {
72 .id = GENL_ID_GENERATE, 91 .id = GENL_ID_GENERATE,
73 .name = TIPC_GENL_NAME, 92 .name = TIPC_GENL_NAME,
@@ -76,6 +95,7 @@ static struct genl_family tipc_genl_family = {
76 .maxattr = 0, 95 .maxattr = 0,
77}; 96};
78 97
98/* Legacy ASCII API */
79static struct genl_ops tipc_genl_ops[] = { 99static struct genl_ops tipc_genl_ops[] = {
80 { 100 {
81 .cmd = TIPC_GENL_CMD, 101 .cmd = TIPC_GENL_CMD,
@@ -83,12 +103,122 @@ static struct genl_ops tipc_genl_ops[] = {
83 }, 103 },
84}; 104};
85 105
106/* Users of the legacy API (tipc-config) can't handle that we add operations,
107 * so we have a separate genl handling for the new API.
108 */
109struct genl_family tipc_genl_v2_family = {
110 .id = GENL_ID_GENERATE,
111 .name = TIPC_GENL_V2_NAME,
112 .version = TIPC_GENL_V2_VERSION,
113 .hdrsize = 0,
114 .maxattr = TIPC_NLA_MAX,
115};
116
117static const struct genl_ops tipc_genl_v2_ops[] = {
118 {
119 .cmd = TIPC_NL_BEARER_DISABLE,
120 .doit = tipc_nl_bearer_disable,
121 .policy = tipc_nl_policy,
122 },
123 {
124 .cmd = TIPC_NL_BEARER_ENABLE,
125 .doit = tipc_nl_bearer_enable,
126 .policy = tipc_nl_policy,
127 },
128 {
129 .cmd = TIPC_NL_BEARER_GET,
130 .doit = tipc_nl_bearer_get,
131 .dumpit = tipc_nl_bearer_dump,
132 .policy = tipc_nl_policy,
133 },
134 {
135 .cmd = TIPC_NL_BEARER_SET,
136 .doit = tipc_nl_bearer_set,
137 .policy = tipc_nl_policy,
138 },
139 {
140 .cmd = TIPC_NL_SOCK_GET,
141 .dumpit = tipc_nl_sk_dump,
142 .policy = tipc_nl_policy,
143 },
144 {
145 .cmd = TIPC_NL_PUBL_GET,
146 .dumpit = tipc_nl_publ_dump,
147 .policy = tipc_nl_policy,
148 },
149 {
150 .cmd = TIPC_NL_LINK_GET,
151 .doit = tipc_nl_link_get,
152 .dumpit = tipc_nl_link_dump,
153 .policy = tipc_nl_policy,
154 },
155 {
156 .cmd = TIPC_NL_LINK_SET,
157 .doit = tipc_nl_link_set,
158 .policy = tipc_nl_policy,
159 },
160 {
161 .cmd = TIPC_NL_LINK_RESET_STATS,
162 .doit = tipc_nl_link_reset_stats,
163 .policy = tipc_nl_policy,
164 },
165 {
166 .cmd = TIPC_NL_MEDIA_GET,
167 .doit = tipc_nl_media_get,
168 .dumpit = tipc_nl_media_dump,
169 .policy = tipc_nl_policy,
170 },
171 {
172 .cmd = TIPC_NL_MEDIA_SET,
173 .doit = tipc_nl_media_set,
174 .policy = tipc_nl_policy,
175 },
176 {
177 .cmd = TIPC_NL_NODE_GET,
178 .dumpit = tipc_nl_node_dump,
179 .policy = tipc_nl_policy,
180 },
181 {
182 .cmd = TIPC_NL_NET_GET,
183 .dumpit = tipc_nl_net_dump,
184 .policy = tipc_nl_policy,
185 },
186 {
187 .cmd = TIPC_NL_NET_SET,
188 .doit = tipc_nl_net_set,
189 .policy = tipc_nl_policy,
190 },
191 {
192 .cmd = TIPC_NL_NAME_TABLE_GET,
193 .dumpit = tipc_nl_name_table_dump,
194 .policy = tipc_nl_policy,
195 }
196};
197
198int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
199{
200 u32 maxattr = tipc_genl_v2_family.maxattr;
201
202 *attr = tipc_genl_v2_family.attrbuf;
203 if (!*attr)
204 return -EOPNOTSUPP;
205
206 return nlmsg_parse(nlh, GENL_HDRLEN, *attr, maxattr, tipc_nl_policy);
207}
208
86int tipc_netlink_start(void) 209int tipc_netlink_start(void)
87{ 210{
88 int res; 211 int res;
89 212
90 res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); 213 res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops);
91 if (res) { 214 if (res) {
215 pr_err("Failed to register legacy interface\n");
216 return res;
217 }
218
219 res = genl_register_family_with_ops(&tipc_genl_v2_family,
220 tipc_genl_v2_ops);
221 if (res) {
92 pr_err("Failed to register netlink interface\n"); 222 pr_err("Failed to register netlink interface\n");
93 return res; 223 return res;
94 } 224 }
@@ -98,4 +228,5 @@ int tipc_netlink_start(void)
98void tipc_netlink_stop(void) 228void tipc_netlink_stop(void)
99{ 229{
100 genl_unregister_family(&tipc_genl_family); 230 genl_unregister_family(&tipc_genl_family);
231 genl_unregister_family(&tipc_genl_v2_family);
101} 232}
diff --git a/net/tipc/ref.h b/net/tipc/netlink.h
index d01aa1df63b8..1425c6869de0 100644
--- a/net/tipc/ref.h
+++ b/net/tipc/netlink.h
@@ -1,8 +1,7 @@
1/* 1/*
2 * net/tipc/ref.h: Include file for TIPC object registry code 2 * net/tipc/netlink.h: Include file for TIPC netlink code
3 * 3 *
4 * Copyright (c) 1991-2006, Ericsson AB 4 * Copyright (c) 2014, Ericsson AB
5 * Copyright (c) 2005-2006, Wind River Systems
6 * All rights reserved. 5 * All rights reserved.
7 * 6 *
8 * Redistribution and use in source and binary forms, with or without 7 * Redistribution and use in source and binary forms, with or without
@@ -34,15 +33,16 @@
34 * POSSIBILITY OF SUCH DAMAGE. 33 * POSSIBILITY OF SUCH DAMAGE.
35 */ 34 */
36 35
37#ifndef _TIPC_REF_H 36#ifndef _TIPC_NETLINK_H
38#define _TIPC_REF_H 37#define _TIPC_NETLINK_H
39 38
40int tipc_ref_table_init(u32 requested_size, u32 start); 39extern struct genl_family tipc_genl_v2_family;
41void tipc_ref_table_stop(void); 40int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
42 41
43u32 tipc_ref_acquire(void *object, spinlock_t **lock); 42struct tipc_nl_msg {
44void tipc_ref_discard(u32 ref); 43 struct sk_buff *skb;
45 44 u32 portid;
46void *tipc_ref_lock(u32 ref); 45 u32 seq;
46};
47 47
48#endif 48#endif
diff --git a/net/tipc/node.c b/net/tipc/node.c
index f7069299943f..8d353ec77a66 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -38,6 +38,7 @@
38#include "config.h" 38#include "config.h"
39#include "node.h" 39#include "node.h"
40#include "name_distr.h" 40#include "name_distr.h"
41#include "socket.h"
41 42
42#define NODE_HTABLE_SIZE 512 43#define NODE_HTABLE_SIZE 512
43 44
@@ -50,6 +51,19 @@ static u32 tipc_num_nodes;
50static u32 tipc_num_links; 51static u32 tipc_num_links;
51static DEFINE_SPINLOCK(node_list_lock); 52static DEFINE_SPINLOCK(node_list_lock);
52 53
54struct tipc_sock_conn {
55 u32 port;
56 u32 peer_port;
57 u32 peer_node;
58 struct list_head list;
59};
60
61static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
62 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
63 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
64 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
65};
66
53/* 67/*
54 * A trivial power-of-two bitmask technique is used for speed, since this 68 * A trivial power-of-two bitmask technique is used for speed, since this
55 * operation is done for every incoming TIPC packet. The number of hash table 69 * operation is done for every incoming TIPC packet. The number of hash table
@@ -99,7 +113,10 @@ struct tipc_node *tipc_node_create(u32 addr)
99 spin_lock_init(&n_ptr->lock); 113 spin_lock_init(&n_ptr->lock);
100 INIT_HLIST_NODE(&n_ptr->hash); 114 INIT_HLIST_NODE(&n_ptr->hash);
101 INIT_LIST_HEAD(&n_ptr->list); 115 INIT_LIST_HEAD(&n_ptr->list);
102 INIT_LIST_HEAD(&n_ptr->nsub); 116 INIT_LIST_HEAD(&n_ptr->publ_list);
117 INIT_LIST_HEAD(&n_ptr->conn_sks);
118 skb_queue_head_init(&n_ptr->waiting_sks);
119 __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
103 120
104 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 121 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
105 122
@@ -136,6 +153,71 @@ void tipc_node_stop(void)
136 spin_unlock_bh(&node_list_lock); 153 spin_unlock_bh(&node_list_lock);
137} 154}
138 155
156int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
157{
158 struct tipc_node *node;
159 struct tipc_sock_conn *conn;
160
161 if (in_own_node(dnode))
162 return 0;
163
164 node = tipc_node_find(dnode);
165 if (!node) {
166 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
167 return -EHOSTUNREACH;
168 }
169 conn = kmalloc(sizeof(*conn), GFP_ATOMIC);
170 if (!conn)
171 return -EHOSTUNREACH;
172 conn->peer_node = dnode;
173 conn->port = port;
174 conn->peer_port = peer_port;
175
176 tipc_node_lock(node);
177 list_add_tail(&conn->list, &node->conn_sks);
178 tipc_node_unlock(node);
179 return 0;
180}
181
182void tipc_node_remove_conn(u32 dnode, u32 port)
183{
184 struct tipc_node *node;
185 struct tipc_sock_conn *conn, *safe;
186
187 if (in_own_node(dnode))
188 return;
189
190 node = tipc_node_find(dnode);
191 if (!node)
192 return;
193
194 tipc_node_lock(node);
195 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
196 if (port != conn->port)
197 continue;
198 list_del(&conn->list);
199 kfree(conn);
200 }
201 tipc_node_unlock(node);
202}
203
204void tipc_node_abort_sock_conns(struct list_head *conns)
205{
206 struct tipc_sock_conn *conn, *safe;
207 struct sk_buff *buf;
208
209 list_for_each_entry_safe(conn, safe, conns, list) {
210 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
211 SHORT_H_SIZE, 0, tipc_own_addr,
212 conn->peer_node, conn->port,
213 conn->peer_port, TIPC_ERR_NO_NODE);
214 if (likely(buf))
215 tipc_sk_rcv(buf);
216 list_del(&conn->list);
217 kfree(conn);
218 }
219}
220
139/** 221/**
140 * tipc_node_link_up - handle addition of link 222 * tipc_node_link_up - handle addition of link
141 * 223 *
@@ -144,11 +226,11 @@ void tipc_node_stop(void)
144void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 226void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
145{ 227{
146 struct tipc_link **active = &n_ptr->active_links[0]; 228 struct tipc_link **active = &n_ptr->active_links[0];
147 u32 addr = n_ptr->addr;
148 229
149 n_ptr->working_links++; 230 n_ptr->working_links++;
150 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, 231 n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
151 l_ptr->bearer_id, addr); 232 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
233
152 pr_info("Established link <%s> on network plane %c\n", 234 pr_info("Established link <%s> on network plane %c\n",
153 l_ptr->name, l_ptr->net_plane); 235 l_ptr->name, l_ptr->net_plane);
154 236
@@ -209,10 +291,10 @@ static void node_select_active_links(struct tipc_node *n_ptr)
209void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 291void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
210{ 292{
211 struct tipc_link **active; 293 struct tipc_link **active;
212 u32 addr = n_ptr->addr;
213 294
214 n_ptr->working_links--; 295 n_ptr->working_links--;
215 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); 296 n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
297 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
216 298
217 if (!tipc_link_is_active(l_ptr)) { 299 if (!tipc_link_is_active(l_ptr)) {
218 pr_info("Lost standby link <%s> on network plane %c\n", 300 pr_info("Lost standby link <%s> on network plane %c\n",
@@ -300,8 +382,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
300 382
301 /* Flush broadcast link info associated with lost node */ 383 /* Flush broadcast link info associated with lost node */
302 if (n_ptr->bclink.recv_permitted) { 384 if (n_ptr->bclink.recv_permitted) {
303 kfree_skb_list(n_ptr->bclink.deferred_head); 385 __skb_queue_purge(&n_ptr->bclink.deferred_queue);
304 n_ptr->bclink.deferred_size = 0;
305 386
306 if (n_ptr->bclink.reasm_buf) { 387 if (n_ptr->bclink.reasm_buf) {
307 kfree_skb(n_ptr->bclink.reasm_buf); 388 kfree_skb(n_ptr->bclink.reasm_buf);
@@ -474,25 +555,145 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
474void tipc_node_unlock(struct tipc_node *node) 555void tipc_node_unlock(struct tipc_node *node)
475{ 556{
476 LIST_HEAD(nsub_list); 557 LIST_HEAD(nsub_list);
558 LIST_HEAD(conn_sks);
559 struct sk_buff_head waiting_sks;
477 u32 addr = 0; 560 u32 addr = 0;
561 int flags = node->action_flags;
562 u32 link_id = 0;
478 563
479 if (likely(!node->action_flags)) { 564 if (likely(!flags)) {
480 spin_unlock_bh(&node->lock); 565 spin_unlock_bh(&node->lock);
481 return; 566 return;
482 } 567 }
483 568
484 if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) { 569 addr = node->addr;
485 list_replace_init(&node->nsub, &nsub_list); 570 link_id = node->link_id;
486 node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN; 571 __skb_queue_head_init(&waiting_sks);
487 } 572
488 if (node->action_flags & TIPC_NOTIFY_NODE_UP) { 573 if (flags & TIPC_WAKEUP_USERS)
489 node->action_flags &= ~TIPC_NOTIFY_NODE_UP; 574 skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
490 addr = node->addr; 575
576 if (flags & TIPC_NOTIFY_NODE_DOWN) {
577 list_replace_init(&node->publ_list, &nsub_list);
578 list_replace_init(&node->conn_sks, &conn_sks);
491 } 579 }
580 node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
581 TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
582 TIPC_NOTIFY_LINK_DOWN |
583 TIPC_WAKEUP_BCAST_USERS);
584
492 spin_unlock_bh(&node->lock); 585 spin_unlock_bh(&node->lock);
493 586
587 while (!skb_queue_empty(&waiting_sks))
588 tipc_sk_rcv(__skb_dequeue(&waiting_sks));
589
590 if (!list_empty(&conn_sks))
591 tipc_node_abort_sock_conns(&conn_sks);
592
494 if (!list_empty(&nsub_list)) 593 if (!list_empty(&nsub_list))
495 tipc_nodesub_notify(&nsub_list); 594 tipc_publ_notify(&nsub_list, addr);
496 if (addr) 595
596 if (flags & TIPC_WAKEUP_BCAST_USERS)
597 tipc_bclink_wakeup_users();
598
599 if (flags & TIPC_NOTIFY_NODE_UP)
497 tipc_named_node_up(addr); 600 tipc_named_node_up(addr);
601
602 if (flags & TIPC_NOTIFY_LINK_UP)
603 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
604 TIPC_NODE_SCOPE, link_id, addr);
605
606 if (flags & TIPC_NOTIFY_LINK_DOWN)
607 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
608 link_id, addr);
609}
610
611/* Caller should hold node lock for the passed node */
612static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
613{
614 void *hdr;
615 struct nlattr *attrs;
616
617 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family,
618 NLM_F_MULTI, TIPC_NL_NODE_GET);
619 if (!hdr)
620 return -EMSGSIZE;
621
622 attrs = nla_nest_start(msg->skb, TIPC_NLA_NODE);
623 if (!attrs)
624 goto msg_full;
625
626 if (nla_put_u32(msg->skb, TIPC_NLA_NODE_ADDR, node->addr))
627 goto attr_msg_full;
628 if (tipc_node_is_up(node))
629 if (nla_put_flag(msg->skb, TIPC_NLA_NODE_UP))
630 goto attr_msg_full;
631
632 nla_nest_end(msg->skb, attrs);
633 genlmsg_end(msg->skb, hdr);
634
635 return 0;
636
637attr_msg_full:
638 nla_nest_cancel(msg->skb, attrs);
639msg_full:
640 genlmsg_cancel(msg->skb, hdr);
641
642 return -EMSGSIZE;
643}
644
645int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
646{
647 int err;
648 int done = cb->args[0];
649 int last_addr = cb->args[1];
650 struct tipc_node *node;
651 struct tipc_nl_msg msg;
652
653 if (done)
654 return 0;
655
656 msg.skb = skb;
657 msg.portid = NETLINK_CB(cb->skb).portid;
658 msg.seq = cb->nlh->nlmsg_seq;
659
660 rcu_read_lock();
661
662 if (last_addr && !tipc_node_find(last_addr)) {
663 rcu_read_unlock();
664 /* We never set seq or call nl_dump_check_consistent() this
665 * means that setting prev_seq here will cause the consistence
666 * check to fail in the netlink callback handler. Resulting in
667 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
668 * the node state changed while we released the lock.
669 */
670 cb->prev_seq = 1;
671 return -EPIPE;
672 }
673
674 list_for_each_entry_rcu(node, &tipc_node_list, list) {
675 if (last_addr) {
676 if (node->addr == last_addr)
677 last_addr = 0;
678 else
679 continue;
680 }
681
682 tipc_node_lock(node);
683 err = __tipc_nl_add_node(&msg, node);
684 if (err) {
685 last_addr = node->addr;
686 tipc_node_unlock(node);
687 goto out;
688 }
689
690 tipc_node_unlock(node);
691 }
692 done = 1;
693out:
694 cb->args[0] = done;
695 cb->args[1] = last_addr;
696 rcu_read_unlock();
697
698 return skb->len;
498} 699}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index b61716a8218e..cbe0e950f1cc 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/node.h: Include file for TIPC node management routines 2 * net/tipc/node.h: Include file for TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, 2014, Ericsson AB
5 * Copyright (c) 2005, 2010-2014, Wind River Systems 5 * Copyright (c) 2005, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,7 +37,6 @@
37#ifndef _TIPC_NODE_H 37#ifndef _TIPC_NODE_H
38#define _TIPC_NODE_H 38#define _TIPC_NODE_H
39 39
40#include "node_subscr.h"
41#include "addr.h" 40#include "addr.h"
42#include "net.h" 41#include "net.h"
43#include "bearer.h" 42#include "bearer.h"
@@ -53,12 +52,17 @@
53 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down 52 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
54 * TIPC_NOTIFY_NODE_DOWN: notify node is down 53 * TIPC_NOTIFY_NODE_DOWN: notify node is down
55 * TIPC_NOTIFY_NODE_UP: notify node is up 54 * TIPC_NOTIFY_NODE_UP: notify node is up
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */ 56 */
57enum { 57enum {
58 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), 58 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
59 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), 59 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
60 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 60 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
61 TIPC_NOTIFY_NODE_UP = (1 << 4) 61 TIPC_NOTIFY_NODE_UP = (1 << 4),
62 TIPC_WAKEUP_USERS = (1 << 5),
63 TIPC_WAKEUP_BCAST_USERS = (1 << 6),
64 TIPC_NOTIFY_LINK_UP = (1 << 7),
65 TIPC_NOTIFY_LINK_DOWN = (1 << 8)
62}; 66};
63 67
64/** 68/**
@@ -67,9 +71,7 @@ enum {
67 * @last_in: sequence # of last in-sequence b'cast message received from node 71 * @last_in: sequence # of last in-sequence b'cast message received from node
68 * @last_sent: sequence # of last b'cast message sent by node 72 * @last_sent: sequence # of last b'cast message sent by node
69 * @oos_state: state tracker for handling OOS b'cast messages 73 * @oos_state: state tracker for handling OOS b'cast messages
70 * @deferred_size: number of OOS b'cast messages in deferred queue 74 * @deferred_queue: deferred queue saved OOS b'cast message received from node
71 * @deferred_head: oldest OOS b'cast message received from node
72 * @deferred_tail: newest OOS b'cast message received from node
73 * @reasm_buf: broadcast reassembly queue head from node 75 * @reasm_buf: broadcast reassembly queue head from node
74 * @recv_permitted: true if node is allowed to receive b'cast messages 76 * @recv_permitted: true if node is allowed to receive b'cast messages
75 */ 77 */
@@ -79,8 +81,7 @@ struct tipc_node_bclink {
79 u32 last_sent; 81 u32 last_sent;
80 u32 oos_state; 82 u32 oos_state;
81 u32 deferred_size; 83 u32 deferred_size;
82 struct sk_buff *deferred_head; 84 struct sk_buff_head deferred_queue;
83 struct sk_buff *deferred_tail;
84 struct sk_buff *reasm_buf; 85 struct sk_buff *reasm_buf;
85 bool recv_permitted; 86 bool recv_permitted;
86}; 87};
@@ -98,7 +99,8 @@ struct tipc_node_bclink {
98 * @working_links: number of working links to node (both active and standby) 99 * @working_links: number of working links to node (both active and standby)
99 * @link_cnt: number of links to node 100 * @link_cnt: number of links to node
100 * @signature: node instance identifier 101 * @signature: node instance identifier
101 * @nsub: list of "node down" subscriptions monitoring node 102 * @link_id: local and remote bearer ids of changing link, if any
103 * @publ_list: list of publications
102 * @rcu: rcu struct for tipc_node 104 * @rcu: rcu struct for tipc_node
103 */ 105 */
104struct tipc_node { 106struct tipc_node {
@@ -114,7 +116,10 @@ struct tipc_node {
114 int link_cnt; 116 int link_cnt;
115 int working_links; 117 int working_links;
116 u32 signature; 118 u32 signature;
117 struct list_head nsub; 119 u32 link_id;
120 struct list_head publ_list;
121 struct sk_buff_head waiting_sks;
122 struct list_head conn_sks;
118 struct rcu_head rcu; 123 struct rcu_head rcu;
119}; 124};
120 125
@@ -133,6 +138,10 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
133struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 138struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
134int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len); 139int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
135void tipc_node_unlock(struct tipc_node *node); 140void tipc_node_unlock(struct tipc_node *node);
141int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port);
142void tipc_node_remove_conn(u32 dnode, u32 port);
143
144int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
136 145
137static inline void tipc_node_lock(struct tipc_node *node) 146static inline void tipc_node_lock(struct tipc_node *node)
138{ 147{
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
deleted file mode 100644
index 2d13eea8574a..000000000000
--- a/net/tipc/node_subscr.c
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * net/tipc/node_subscr.c: TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "node_subscr.h"
39#include "node.h"
40
41/**
42 * tipc_nodesub_subscribe - create "node down" subscription for specified node
43 */
44void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
45 void *usr_handle, net_ev_handler handle_down)
46{
47 if (in_own_node(addr)) {
48 node_sub->node = NULL;
49 return;
50 }
51
52 node_sub->node = tipc_node_find(addr);
53 if (!node_sub->node) {
54 pr_warn("Node subscription rejected, unknown node 0x%x\n",
55 addr);
56 return;
57 }
58 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle;
60
61 tipc_node_lock(node_sub->node);
62 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
63 tipc_node_unlock(node_sub->node);
64}
65
66/**
67 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
68 */
69void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
70{
71 if (!node_sub->node)
72 return;
73
74 tipc_node_lock(node_sub->node);
75 list_del_init(&node_sub->nodesub_list);
76 tipc_node_unlock(node_sub->node);
77}
78
79/**
80 * tipc_nodesub_notify - notify subscribers that a node is unreachable
81 *
82 * Note: node is locked by caller
83 */
84void tipc_nodesub_notify(struct list_head *nsub_list)
85{
86 struct tipc_node_subscr *ns, *safe;
87 net_ev_handler handle_node_down;
88
89 list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
90 handle_node_down = ns->handle_node_down;
91 if (handle_node_down) {
92 ns->handle_node_down = NULL;
93 handle_node_down(ns->usr_handle);
94 }
95 }
96}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
deleted file mode 100644
index d91b8cc81e3d..000000000000
--- a/net/tipc/node_subscr.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NODE_SUBSCR_H
38#define _TIPC_NODE_SUBSCR_H
39
40#include "addr.h"
41
42typedef void (*net_ev_handler) (void *usr_handle);
43
44/**
45 * struct tipc_node_subscr - "node down" subscription entry
46 * @node: ptr to node structure of interest (or NULL, if none)
47 * @handle_node_down: routine to invoke when node fails
48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */
51struct tipc_node_subscr {
52 struct tipc_node *node;
53 net_ev_handler handle_node_down;
54 void *usr_handle;
55 struct list_head nodesub_list;
56};
57
58void tipc_nodesub_subscribe(struct tipc_node_subscr *node_sub, u32 addr,
59 void *usr_handle, net_ev_handler handle_down);
60void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub);
61void tipc_nodesub_notify(struct list_head *nsub_list);
62
63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
deleted file mode 100644
index 7e096a5e7701..000000000000
--- a/net/tipc/port.c
+++ /dev/null
@@ -1,514 +0,0 @@
1/*
2 * net/tipc/port.c: TIPC port code
3 *
4 * Copyright (c) 1992-2007, 2014, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "port.h"
40#include "name_table.h"
41#include "socket.h"
42
43/* Connection management: */
44#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
45
46#define MAX_REJECT_SIZE 1024
47
48DEFINE_SPINLOCK(tipc_port_list_lock);
49
50static LIST_HEAD(ports);
51static void port_handle_node_down(unsigned long ref);
52static struct sk_buff *port_build_self_abort_msg(struct tipc_port *, u32 err);
53static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *, u32 err);
54static void port_timeout(unsigned long ref);
55
56/**
57 * tipc_port_peer_msg - verify message was sent by connected port's peer
58 *
59 * Handles cases where the node's network address has changed from
60 * the default of <0.0.0> to its configured setting.
61 */
62int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
63{
64 u32 peernode;
65 u32 orignode;
66
67 if (msg_origport(msg) != tipc_port_peerport(p_ptr))
68 return 0;
69
70 orignode = msg_orignode(msg);
71 peernode = tipc_port_peernode(p_ptr);
72 return (orignode == peernode) ||
73 (!orignode && (peernode == tipc_own_addr)) ||
74 (!peernode && (orignode == tipc_own_addr));
75}
76
77/* tipc_port_init - intiate TIPC port and lock it
78 *
79 * Returns obtained reference if initialization is successful, zero otherwise
80 */
81u32 tipc_port_init(struct tipc_port *p_ptr,
82 const unsigned int importance)
83{
84 struct tipc_msg *msg;
85 u32 ref;
86
87 ref = tipc_ref_acquire(p_ptr, &p_ptr->lock);
88 if (!ref) {
89 pr_warn("Port registration failed, ref. table exhausted\n");
90 return 0;
91 }
92
93 p_ptr->max_pkt = MAX_PKT_DEFAULT;
94 p_ptr->ref = ref;
95 INIT_LIST_HEAD(&p_ptr->wait_list);
96 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
97 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
98 INIT_LIST_HEAD(&p_ptr->publications);
99 INIT_LIST_HEAD(&p_ptr->port_list);
100
101 /*
102 * Must hold port list lock while initializing message header template
103 * to ensure a change to node's own network address doesn't result
104 * in template containing out-dated network address information
105 */
106 spin_lock_bh(&tipc_port_list_lock);
107 msg = &p_ptr->phdr;
108 tipc_msg_init(msg, importance, TIPC_NAMED_MSG, NAMED_H_SIZE, 0);
109 msg_set_origport(msg, ref);
110 list_add_tail(&p_ptr->port_list, &ports);
111 spin_unlock_bh(&tipc_port_list_lock);
112 return ref;
113}
114
115void tipc_port_destroy(struct tipc_port *p_ptr)
116{
117 struct sk_buff *buf = NULL;
118 struct tipc_msg *msg = NULL;
119 u32 peer;
120
121 tipc_withdraw(p_ptr, 0, NULL);
122
123 spin_lock_bh(p_ptr->lock);
124 tipc_ref_discard(p_ptr->ref);
125 spin_unlock_bh(p_ptr->lock);
126
127 k_cancel_timer(&p_ptr->timer);
128 if (p_ptr->connected) {
129 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
130 tipc_nodesub_unsubscribe(&p_ptr->subscription);
131 msg = buf_msg(buf);
132 peer = msg_destnode(msg);
133 tipc_link_xmit(buf, peer, msg_link_selector(msg));
134 }
135 spin_lock_bh(&tipc_port_list_lock);
136 list_del(&p_ptr->port_list);
137 list_del(&p_ptr->wait_list);
138 spin_unlock_bh(&tipc_port_list_lock);
139 k_term_timer(&p_ptr->timer);
140}
141
142/*
143 * port_build_proto_msg(): create connection protocol message for port
144 *
145 * On entry the port must be locked and connected.
146 */
147static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
148 u32 type, u32 ack)
149{
150 struct sk_buff *buf;
151 struct tipc_msg *msg;
152
153 buf = tipc_buf_acquire(INT_H_SIZE);
154 if (buf) {
155 msg = buf_msg(buf);
156 tipc_msg_init(msg, CONN_MANAGER, type, INT_H_SIZE,
157 tipc_port_peernode(p_ptr));
158 msg_set_destport(msg, tipc_port_peerport(p_ptr));
159 msg_set_origport(msg, p_ptr->ref);
160 msg_set_msgcnt(msg, ack);
161 buf->next = NULL;
162 }
163 return buf;
164}
165
166static void port_timeout(unsigned long ref)
167{
168 struct tipc_port *p_ptr = tipc_port_lock(ref);
169 struct sk_buff *buf = NULL;
170 struct tipc_msg *msg = NULL;
171
172 if (!p_ptr)
173 return;
174
175 if (!p_ptr->connected) {
176 tipc_port_unlock(p_ptr);
177 return;
178 }
179
180 /* Last probe answered ? */
181 if (p_ptr->probing_state == TIPC_CONN_PROBING) {
182 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
183 } else {
184 buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
185 p_ptr->probing_state = TIPC_CONN_PROBING;
186 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
187 }
188 tipc_port_unlock(p_ptr);
189 msg = buf_msg(buf);
190 tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
191}
192
193
194static void port_handle_node_down(unsigned long ref)
195{
196 struct tipc_port *p_ptr = tipc_port_lock(ref);
197 struct sk_buff *buf = NULL;
198 struct tipc_msg *msg = NULL;
199
200 if (!p_ptr)
201 return;
202 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
203 tipc_port_unlock(p_ptr);
204 msg = buf_msg(buf);
205 tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
206}
207
208
209static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 err)
210{
211 struct sk_buff *buf = port_build_peer_abort_msg(p_ptr, err);
212
213 if (buf) {
214 struct tipc_msg *msg = buf_msg(buf);
215 msg_swap_words(msg, 4, 5);
216 msg_swap_words(msg, 6, 7);
217 buf->next = NULL;
218 }
219 return buf;
220}
221
222
223static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 err)
224{
225 struct sk_buff *buf;
226 struct tipc_msg *msg;
227 u32 imp;
228
229 if (!p_ptr->connected)
230 return NULL;
231
232 buf = tipc_buf_acquire(BASIC_H_SIZE);
233 if (buf) {
234 msg = buf_msg(buf);
235 memcpy(msg, &p_ptr->phdr, BASIC_H_SIZE);
236 msg_set_hdr_sz(msg, BASIC_H_SIZE);
237 msg_set_size(msg, BASIC_H_SIZE);
238 imp = msg_importance(msg);
239 if (imp < TIPC_CRITICAL_IMPORTANCE)
240 msg_set_importance(msg, ++imp);
241 msg_set_errcode(msg, err);
242 buf->next = NULL;
243 }
244 return buf;
245}
246
247static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
248{
249 struct publication *publ;
250 int ret;
251
252 if (full_id)
253 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
254 tipc_zone(tipc_own_addr),
255 tipc_cluster(tipc_own_addr),
256 tipc_node(tipc_own_addr), p_ptr->ref);
257 else
258 ret = tipc_snprintf(buf, len, "%-10u:", p_ptr->ref);
259
260 if (p_ptr->connected) {
261 u32 dport = tipc_port_peerport(p_ptr);
262 u32 destnode = tipc_port_peernode(p_ptr);
263
264 ret += tipc_snprintf(buf + ret, len - ret,
265 " connected to <%u.%u.%u:%u>",
266 tipc_zone(destnode),
267 tipc_cluster(destnode),
268 tipc_node(destnode), dport);
269 if (p_ptr->conn_type != 0)
270 ret += tipc_snprintf(buf + ret, len - ret,
271 " via {%u,%u}", p_ptr->conn_type,
272 p_ptr->conn_instance);
273 } else if (p_ptr->published) {
274 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
275 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
276 if (publ->lower == publ->upper)
277 ret += tipc_snprintf(buf + ret, len - ret,
278 " {%u,%u}", publ->type,
279 publ->lower);
280 else
281 ret += tipc_snprintf(buf + ret, len - ret,
282 " {%u,%u,%u}", publ->type,
283 publ->lower, publ->upper);
284 }
285 }
286 ret += tipc_snprintf(buf + ret, len - ret, "\n");
287 return ret;
288}
289
290struct sk_buff *tipc_port_get_ports(void)
291{
292 struct sk_buff *buf;
293 struct tlv_desc *rep_tlv;
294 char *pb;
295 int pb_len;
296 struct tipc_port *p_ptr;
297 int str_len = 0;
298
299 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
300 if (!buf)
301 return NULL;
302 rep_tlv = (struct tlv_desc *)buf->data;
303 pb = TLV_DATA(rep_tlv);
304 pb_len = ULTRA_STRING_MAX_LEN;
305
306 spin_lock_bh(&tipc_port_list_lock);
307 list_for_each_entry(p_ptr, &ports, port_list) {
308 spin_lock_bh(p_ptr->lock);
309 str_len += port_print(p_ptr, pb, pb_len, 0);
310 spin_unlock_bh(p_ptr->lock);
311 }
312 spin_unlock_bh(&tipc_port_list_lock);
313 str_len += 1; /* for "\0" */
314 skb_put(buf, TLV_SPACE(str_len));
315 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
316
317 return buf;
318}
319
320void tipc_port_reinit(void)
321{
322 struct tipc_port *p_ptr;
323 struct tipc_msg *msg;
324
325 spin_lock_bh(&tipc_port_list_lock);
326 list_for_each_entry(p_ptr, &ports, port_list) {
327 msg = &p_ptr->phdr;
328 msg_set_prevnode(msg, tipc_own_addr);
329 msg_set_orignode(msg, tipc_own_addr);
330 }
331 spin_unlock_bh(&tipc_port_list_lock);
332}
333
334void tipc_acknowledge(u32 ref, u32 ack)
335{
336 struct tipc_port *p_ptr;
337 struct sk_buff *buf = NULL;
338 struct tipc_msg *msg;
339
340 p_ptr = tipc_port_lock(ref);
341 if (!p_ptr)
342 return;
343 if (p_ptr->connected)
344 buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
345
346 tipc_port_unlock(p_ptr);
347 if (!buf)
348 return;
349 msg = buf_msg(buf);
350 tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
351}
352
353int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
354 struct tipc_name_seq const *seq)
355{
356 struct publication *publ;
357 u32 key;
358
359 if (p_ptr->connected)
360 return -EINVAL;
361 key = p_ptr->ref + p_ptr->pub_count + 1;
362 if (key == p_ptr->ref)
363 return -EADDRINUSE;
364
365 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
366 scope, p_ptr->ref, key);
367 if (publ) {
368 list_add(&publ->pport_list, &p_ptr->publications);
369 p_ptr->pub_count++;
370 p_ptr->published = 1;
371 return 0;
372 }
373 return -EINVAL;
374}
375
376int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
377 struct tipc_name_seq const *seq)
378{
379 struct publication *publ;
380 struct publication *tpubl;
381 int res = -EINVAL;
382
383 if (!seq) {
384 list_for_each_entry_safe(publ, tpubl,
385 &p_ptr->publications, pport_list) {
386 tipc_nametbl_withdraw(publ->type, publ->lower,
387 publ->ref, publ->key);
388 }
389 res = 0;
390 } else {
391 list_for_each_entry_safe(publ, tpubl,
392 &p_ptr->publications, pport_list) {
393 if (publ->scope != scope)
394 continue;
395 if (publ->type != seq->type)
396 continue;
397 if (publ->lower != seq->lower)
398 continue;
399 if (publ->upper != seq->upper)
400 break;
401 tipc_nametbl_withdraw(publ->type, publ->lower,
402 publ->ref, publ->key);
403 res = 0;
404 break;
405 }
406 }
407 if (list_empty(&p_ptr->publications))
408 p_ptr->published = 0;
409 return res;
410}
411
412int tipc_port_connect(u32 ref, struct tipc_portid const *peer)
413{
414 struct tipc_port *p_ptr;
415 int res;
416
417 p_ptr = tipc_port_lock(ref);
418 if (!p_ptr)
419 return -EINVAL;
420 res = __tipc_port_connect(ref, p_ptr, peer);
421 tipc_port_unlock(p_ptr);
422 return res;
423}
424
425/*
426 * __tipc_port_connect - connect to a remote peer
427 *
428 * Port must be locked.
429 */
430int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
431 struct tipc_portid const *peer)
432{
433 struct tipc_msg *msg;
434 int res = -EINVAL;
435
436 if (p_ptr->published || p_ptr->connected)
437 goto exit;
438 if (!peer->ref)
439 goto exit;
440
441 msg = &p_ptr->phdr;
442 msg_set_destnode(msg, peer->node);
443 msg_set_destport(msg, peer->ref);
444 msg_set_type(msg, TIPC_CONN_MSG);
445 msg_set_lookup_scope(msg, 0);
446 msg_set_hdr_sz(msg, SHORT_H_SIZE);
447
448 p_ptr->probing_interval = PROBING_INTERVAL;
449 p_ptr->probing_state = TIPC_CONN_OK;
450 p_ptr->connected = 1;
451 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
452
453 tipc_nodesub_subscribe(&p_ptr->subscription, peer->node,
454 (void *)(unsigned long)ref,
455 (net_ev_handler)port_handle_node_down);
456 res = 0;
457exit:
458 p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref);
459 return res;
460}
461
462/*
463 * __tipc_disconnect - disconnect port from peer
464 *
465 * Port must be locked.
466 */
467int __tipc_port_disconnect(struct tipc_port *tp_ptr)
468{
469 if (tp_ptr->connected) {
470 tp_ptr->connected = 0;
471 /* let timer expire on it's own to avoid deadlock! */
472 tipc_nodesub_unsubscribe(&tp_ptr->subscription);
473 return 0;
474 }
475
476 return -ENOTCONN;
477}
478
479/*
480 * tipc_port_disconnect(): Disconnect port form peer.
481 * This is a node local operation.
482 */
483int tipc_port_disconnect(u32 ref)
484{
485 struct tipc_port *p_ptr;
486 int res;
487
488 p_ptr = tipc_port_lock(ref);
489 if (!p_ptr)
490 return -EINVAL;
491 res = __tipc_port_disconnect(p_ptr);
492 tipc_port_unlock(p_ptr);
493 return res;
494}
495
496/*
497 * tipc_port_shutdown(): Send a SHUTDOWN msg to peer and disconnect
498 */
499int tipc_port_shutdown(u32 ref)
500{
501 struct tipc_msg *msg;
502 struct tipc_port *p_ptr;
503 struct sk_buff *buf = NULL;
504
505 p_ptr = tipc_port_lock(ref);
506 if (!p_ptr)
507 return -EINVAL;
508
509 buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
510 tipc_port_unlock(p_ptr);
511 msg = buf_msg(buf);
512 tipc_link_xmit(buf, msg_destnode(msg), msg_link_selector(msg));
513 return tipc_port_disconnect(ref);
514}
diff --git a/net/tipc/port.h b/net/tipc/port.h
deleted file mode 100644
index 3087da39ee47..000000000000
--- a/net/tipc/port.h
+++ /dev/null
@@ -1,190 +0,0 @@
1/*
2 * net/tipc/port.h: Include file for TIPC port code
3 *
4 * Copyright (c) 1994-2007, 2014, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H
39
40#include "ref.h"
41#include "net.h"
42#include "msg.h"
43#include "node_subscr.h"
44
45#define TIPC_CONNACK_INTV 256
46#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
47#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
48 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
49
50/**
51 * struct tipc_port - TIPC port structure
52 * @lock: pointer to spinlock for controlling access to port
53 * @connected: non-zero if port is currently connected to a peer port
54 * @conn_type: TIPC type used when connection was established
55 * @conn_instance: TIPC instance used when connection was established
56 * @published: non-zero if port has one or more associated names
57 * @max_pkt: maximum packet size "hint" used when building messages sent by port
58 * @ref: unique reference to port in TIPC object registry
59 * @phdr: preformatted message header used when sending messages
60 * @port_list: adjacent ports in TIPC's global list of ports
61 * @wait_list: adjacent ports in list of ports waiting on link congestion
62 * @waiting_pkts:
63 * @publications: list of publications for port
64 * @pub_count: total # of publications port has made during its lifetime
65 * @probing_state:
66 * @probing_interval:
67 * @timer_ref:
68 * @subscription: "node down" subscription used to terminate failed connections
69 */
70struct tipc_port {
71 spinlock_t *lock;
72 int connected;
73 u32 conn_type;
74 u32 conn_instance;
75 int published;
76 u32 max_pkt;
77 u32 ref;
78 struct tipc_msg phdr;
79 struct list_head port_list;
80 struct list_head wait_list;
81 u32 waiting_pkts;
82 struct list_head publications;
83 u32 pub_count;
84 u32 probing_state;
85 u32 probing_interval;
86 struct timer_list timer;
87 struct tipc_node_subscr subscription;
88};
89
90extern spinlock_t tipc_port_list_lock;
91struct tipc_port_list;
92
93/*
94 * TIPC port manipulation routines
95 */
96u32 tipc_port_init(struct tipc_port *p_ptr,
97 const unsigned int importance);
98
99void tipc_acknowledge(u32 port_ref, u32 ack);
100
101void tipc_port_destroy(struct tipc_port *p_ptr);
102
103int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
104 struct tipc_name_seq const *name_seq);
105
106int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
107 struct tipc_name_seq const *name_seq);
108
109int tipc_port_connect(u32 portref, struct tipc_portid const *port);
110
111int tipc_port_disconnect(u32 portref);
112
113int tipc_port_shutdown(u32 ref);
114
115/*
116 * The following routines require that the port be locked on entry
117 */
118int __tipc_port_disconnect(struct tipc_port *tp_ptr);
119int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
120 struct tipc_portid const *peer);
121int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
122
123struct sk_buff *tipc_port_get_ports(void);
124void tipc_port_reinit(void);
125
126/**
127 * tipc_port_lock - lock port instance referred to and return its pointer
128 */
129static inline struct tipc_port *tipc_port_lock(u32 ref)
130{
131 return (struct tipc_port *)tipc_ref_lock(ref);
132}
133
134/**
135 * tipc_port_unlock - unlock a port instance
136 *
137 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
138 */
139static inline void tipc_port_unlock(struct tipc_port *p_ptr)
140{
141 spin_unlock_bh(p_ptr->lock);
142}
143
144static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
145{
146 return msg_destnode(&p_ptr->phdr);
147}
148
149static inline u32 tipc_port_peerport(struct tipc_port *p_ptr)
150{
151 return msg_destport(&p_ptr->phdr);
152}
153
154static inline bool tipc_port_unreliable(struct tipc_port *port)
155{
156 return msg_src_droppable(&port->phdr) != 0;
157}
158
159static inline void tipc_port_set_unreliable(struct tipc_port *port,
160 bool unreliable)
161{
162 msg_set_src_droppable(&port->phdr, unreliable ? 1 : 0);
163}
164
165static inline bool tipc_port_unreturnable(struct tipc_port *port)
166{
167 return msg_dest_droppable(&port->phdr) != 0;
168}
169
170static inline void tipc_port_set_unreturnable(struct tipc_port *port,
171 bool unreturnable)
172{
173 msg_set_dest_droppable(&port->phdr, unreturnable ? 1 : 0);
174}
175
176
177static inline int tipc_port_importance(struct tipc_port *port)
178{
179 return msg_importance(&port->phdr);
180}
181
182static inline int tipc_port_set_importance(struct tipc_port *port, int imp)
183{
184 if (imp > TIPC_CRITICAL_IMPORTANCE)
185 return -EINVAL;
186 msg_set_importance(&port->phdr, (u32)imp);
187 return 0;
188}
189
190#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
deleted file mode 100644
index 3d4ecd754eee..000000000000
--- a/net/tipc/ref.c
+++ /dev/null
@@ -1,266 +0,0 @@
1/*
2 * net/tipc/ref.c: TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "ref.h"
39
40/**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @ref: reference value for object (combines instance & array index info)
45 */
46struct reference {
47 void *object;
48 spinlock_t lock;
49 u32 ref;
50};
51
52/**
53 * struct tipc_ref_table - table of TIPC object reference entries
54 * @entries: pointer to array of reference entries
55 * @capacity: array index of first unusable entry
56 * @init_point: array index of first uninitialized entry
57 * @first_free: array index of first unused object reference entry
58 * @last_free: array index of last unused object reference entry
59 * @index_mask: bitmask for array index portion of reference values
60 * @start_mask: initial value for instance value portion of reference values
61 */
62struct ref_table {
63 struct reference *entries;
64 u32 capacity;
65 u32 init_point;
66 u32 first_free;
67 u32 last_free;
68 u32 index_mask;
69 u32 start_mask;
70};
71
72/*
73 * Object reference table consists of 2**N entries.
74 *
75 * State Object ptr Reference
76 * ----- ---------- ---------
77 * In use non-NULL XXXX|own index
78 * (XXXX changes each time entry is acquired)
79 * Free NULL YYYY|next free index
80 * (YYYY is one more than last used XXXX)
81 * Uninitialized NULL 0
82 *
83 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
84 *
85 * Note that a reference value of 0 does not necessarily indicate that an
86 * entry is uninitialized, since the last entry in the free list could also
87 * have a reference value of 0 (although this is unlikely).
88 */
89
90static struct ref_table tipc_ref_table;
91
92static DEFINE_SPINLOCK(ref_table_lock);
93
94/**
95 * tipc_ref_table_init - create reference table for objects
96 */
97int tipc_ref_table_init(u32 requested_size, u32 start)
98{
99 struct reference *table;
100 u32 actual_size;
101
102 /* account for unused entry, then round up size to a power of 2 */
103
104 requested_size++;
105 for (actual_size = 16; actual_size < requested_size; actual_size <<= 1)
106 /* do nothing */ ;
107
108 /* allocate table & mark all entries as uninitialized */
109 table = vzalloc(actual_size * sizeof(struct reference));
110 if (table == NULL)
111 return -ENOMEM;
112
113 tipc_ref_table.entries = table;
114 tipc_ref_table.capacity = requested_size;
115 tipc_ref_table.init_point = 1;
116 tipc_ref_table.first_free = 0;
117 tipc_ref_table.last_free = 0;
118 tipc_ref_table.index_mask = actual_size - 1;
119 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
120
121 return 0;
122}
123
124/**
125 * tipc_ref_table_stop - destroy reference table for objects
126 */
127void tipc_ref_table_stop(void)
128{
129 vfree(tipc_ref_table.entries);
130 tipc_ref_table.entries = NULL;
131}
132
133/**
134 * tipc_ref_acquire - create reference to an object
135 *
136 * Register an object pointer in reference table and lock the object.
137 * Returns a unique reference value that is used from then on to retrieve the
138 * object pointer, or to determine that the object has been deregistered.
139 *
140 * Note: The object is returned in the locked state so that the caller can
141 * register a partially initialized object, without running the risk that
142 * the object will be accessed before initialization is complete.
143 */
144u32 tipc_ref_acquire(void *object, spinlock_t **lock)
145{
146 u32 index;
147 u32 index_mask;
148 u32 next_plus_upper;
149 u32 ref;
150 struct reference *entry = NULL;
151
152 if (!object) {
153 pr_err("Attempt to acquire ref. to non-existent obj\n");
154 return 0;
155 }
156 if (!tipc_ref_table.entries) {
157 pr_err("Ref. table not found in acquisition attempt\n");
158 return 0;
159 }
160
161 /* take a free entry, if available; otherwise initialize a new entry */
162 spin_lock_bh(&ref_table_lock);
163 if (tipc_ref_table.first_free) {
164 index = tipc_ref_table.first_free;
165 entry = &(tipc_ref_table.entries[index]);
166 index_mask = tipc_ref_table.index_mask;
167 next_plus_upper = entry->ref;
168 tipc_ref_table.first_free = next_plus_upper & index_mask;
169 ref = (next_plus_upper & ~index_mask) + index;
170 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
171 index = tipc_ref_table.init_point++;
172 entry = &(tipc_ref_table.entries[index]);
173 spin_lock_init(&entry->lock);
174 ref = tipc_ref_table.start_mask + index;
175 } else {
176 ref = 0;
177 }
178 spin_unlock_bh(&ref_table_lock);
179
180 /*
181 * Grab the lock so no one else can modify this entry
182 * While we assign its ref value & object pointer
183 */
184 if (entry) {
185 spin_lock_bh(&entry->lock);
186 entry->ref = ref;
187 entry->object = object;
188 *lock = &entry->lock;
189 /*
190 * keep it locked, the caller is responsible
191 * for unlocking this when they're done with it
192 */
193 }
194
195 return ref;
196}
197
198/**
199 * tipc_ref_discard - invalidate references to an object
200 *
201 * Disallow future references to an object and free up the entry for re-use.
202 * Note: The entry's spin_lock may still be busy after discard
203 */
204void tipc_ref_discard(u32 ref)
205{
206 struct reference *entry;
207 u32 index;
208 u32 index_mask;
209
210 if (!tipc_ref_table.entries) {
211 pr_err("Ref. table not found during discard attempt\n");
212 return;
213 }
214
215 index_mask = tipc_ref_table.index_mask;
216 index = ref & index_mask;
217 entry = &(tipc_ref_table.entries[index]);
218
219 spin_lock_bh(&ref_table_lock);
220
221 if (!entry->object) {
222 pr_err("Attempt to discard ref. to non-existent obj\n");
223 goto exit;
224 }
225 if (entry->ref != ref) {
226 pr_err("Attempt to discard non-existent reference\n");
227 goto exit;
228 }
229
230 /*
231 * mark entry as unused; increment instance part of entry's reference
232 * to invalidate any subsequent references
233 */
234 entry->object = NULL;
235 entry->ref = (ref & ~index_mask) + (index_mask + 1);
236
237 /* append entry to free entry list */
238 if (tipc_ref_table.first_free == 0)
239 tipc_ref_table.first_free = index;
240 else
241 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
242 tipc_ref_table.last_free = index;
243
244exit:
245 spin_unlock_bh(&ref_table_lock);
246}
247
248/**
249 * tipc_ref_lock - lock referenced object and return pointer to it
250 */
251void *tipc_ref_lock(u32 ref)
252{
253 if (likely(tipc_ref_table.entries)) {
254 struct reference *entry;
255
256 entry = &tipc_ref_table.entries[ref &
257 tipc_ref_table.index_mask];
258 if (likely(entry->ref != 0)) {
259 spin_lock_bh(&entry->lock);
260 if (likely((entry->ref == ref) && (entry->object)))
261 return entry->object;
262 spin_unlock_bh(&entry->lock);
263 }
264 }
265 return NULL;
266}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ff8c8118d56e..4731cad99d1c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -35,17 +35,67 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "port.h"
39#include "name_table.h" 38#include "name_table.h"
40#include "node.h" 39#include "node.h"
41#include "link.h" 40#include "link.h"
42#include <linux/export.h> 41#include <linux/export.h>
42#include "config.h"
43#include "socket.h"
43 44
44#define SS_LISTENING -1 /* socket is listening */ 45#define SS_LISTENING -1 /* socket is listening */
45#define SS_READY -2 /* socket is connectionless */ 46#define SS_READY -2 /* socket is connectionless */
46 47
47#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 48#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
48#define TIPC_FWD_MSG 1 49#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */
50#define TIPC_FWD_MSG 1
51#define TIPC_CONN_OK 0
52#define TIPC_CONN_PROBING 1
53
54/**
55 * struct tipc_sock - TIPC socket structure
56 * @sk: socket - interacts with 'port' and with user via the socket API
57 * @connected: non-zero if port is currently connected to a peer port
58 * @conn_type: TIPC type used when connection was established
59 * @conn_instance: TIPC instance used when connection was established
60 * @published: non-zero if port has one or more associated names
61 * @max_pkt: maximum packet size "hint" used when building messages sent by port
62 * @ref: unique reference to port in TIPC object registry
63 * @phdr: preformatted message header used when sending messages
64 * @port_list: adjacent ports in TIPC's global list of ports
65 * @publications: list of publications for port
66 * @pub_count: total # of publications port has made during its lifetime
67 * @probing_state:
68 * @probing_interval:
69 * @timer:
70 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
71 * @peer_name: the peer of the connection, if any
72 * @conn_timeout: the time we can wait for an unresponded setup request
73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
74 * @link_cong: non-zero if owner must sleep because of link congestion
75 * @sent_unacked: # messages sent by socket, and not yet acked by peer
76 * @rcv_unacked: # messages read by user, but not yet acked back to peer
77 */
78struct tipc_sock {
79 struct sock sk;
80 int connected;
81 u32 conn_type;
82 u32 conn_instance;
83 int published;
84 u32 max_pkt;
85 u32 ref;
86 struct tipc_msg phdr;
87 struct list_head sock_list;
88 struct list_head publications;
89 u32 pub_count;
90 u32 probing_state;
91 u32 probing_interval;
92 struct timer_list timer;
93 uint conn_timeout;
94 atomic_t dupl_rcvcnt;
95 bool link_cong;
96 uint sent_unacked;
97 uint rcv_unacked;
98};
49 99
50static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 100static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
51static void tipc_data_ready(struct sock *sk); 101static void tipc_data_ready(struct sock *sk);
@@ -53,6 +103,16 @@ static void tipc_write_space(struct sock *sk);
53static int tipc_release(struct socket *sock); 103static int tipc_release(struct socket *sock);
54static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 104static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
55static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); 105static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
106static void tipc_sk_timeout(unsigned long ref);
107static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
108 struct tipc_name_seq const *seq);
109static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
110 struct tipc_name_seq const *seq);
111static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk);
112static void tipc_sk_ref_discard(u32 ref);
113static struct tipc_sock *tipc_sk_get(u32 ref);
114static struct tipc_sock *tipc_sk_get_next(u32 *ref);
115static void tipc_sk_put(struct tipc_sock *tsk);
56 116
57static const struct proto_ops packet_ops; 117static const struct proto_ops packet_ops;
58static const struct proto_ops stream_ops; 118static const struct proto_ops stream_ops;
@@ -61,6 +121,14 @@ static const struct proto_ops msg_ops;
61static struct proto tipc_proto; 121static struct proto tipc_proto;
62static struct proto tipc_proto_kern; 122static struct proto tipc_proto_kern;
63 123
124static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
125 [TIPC_NLA_SOCK_UNSPEC] = { .type = NLA_UNSPEC },
126 [TIPC_NLA_SOCK_ADDR] = { .type = NLA_U32 },
127 [TIPC_NLA_SOCK_REF] = { .type = NLA_U32 },
128 [TIPC_NLA_SOCK_CON] = { .type = NLA_NESTED },
129 [TIPC_NLA_SOCK_HAS_PUBL] = { .type = NLA_FLAG }
130};
131
64/* 132/*
65 * Revised TIPC socket locking policy: 133 * Revised TIPC socket locking policy:
66 * 134 *
@@ -106,34 +174,117 @@ static struct proto tipc_proto_kern;
106 * - port reference 174 * - port reference
107 */ 175 */
108 176
109#include "socket.h" 177static u32 tsk_peer_node(struct tipc_sock *tsk)
178{
179 return msg_destnode(&tsk->phdr);
180}
181
182static u32 tsk_peer_port(struct tipc_sock *tsk)
183{
184 return msg_destport(&tsk->phdr);
185}
186
187static bool tsk_unreliable(struct tipc_sock *tsk)
188{
189 return msg_src_droppable(&tsk->phdr) != 0;
190}
191
192static void tsk_set_unreliable(struct tipc_sock *tsk, bool unreliable)
193{
194 msg_set_src_droppable(&tsk->phdr, unreliable ? 1 : 0);
195}
196
197static bool tsk_unreturnable(struct tipc_sock *tsk)
198{
199 return msg_dest_droppable(&tsk->phdr) != 0;
200}
201
202static void tsk_set_unreturnable(struct tipc_sock *tsk, bool unreturnable)
203{
204 msg_set_dest_droppable(&tsk->phdr, unreturnable ? 1 : 0);
205}
206
207static int tsk_importance(struct tipc_sock *tsk)
208{
209 return msg_importance(&tsk->phdr);
210}
211
212static int tsk_set_importance(struct tipc_sock *tsk, int imp)
213{
214 if (imp > TIPC_CRITICAL_IMPORTANCE)
215 return -EINVAL;
216 msg_set_importance(&tsk->phdr, (u32)imp);
217 return 0;
218}
219
220static struct tipc_sock *tipc_sk(const struct sock *sk)
221{
222 return container_of(sk, struct tipc_sock, sk);
223}
224
225static int tsk_conn_cong(struct tipc_sock *tsk)
226{
227 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
228}
110 229
111/** 230/**
112 * advance_rx_queue - discard first buffer in socket receive queue 231 * tsk_advance_rx_queue - discard first buffer in socket receive queue
113 * 232 *
114 * Caller must hold socket lock 233 * Caller must hold socket lock
115 */ 234 */
116static void advance_rx_queue(struct sock *sk) 235static void tsk_advance_rx_queue(struct sock *sk)
117{ 236{
118 kfree_skb(__skb_dequeue(&sk->sk_receive_queue)); 237 kfree_skb(__skb_dequeue(&sk->sk_receive_queue));
119} 238}
120 239
121/** 240/**
122 * reject_rx_queue - reject all buffers in socket receive queue 241 * tsk_rej_rx_queue - reject all buffers in socket receive queue
123 * 242 *
124 * Caller must hold socket lock 243 * Caller must hold socket lock
125 */ 244 */
126static void reject_rx_queue(struct sock *sk) 245static void tsk_rej_rx_queue(struct sock *sk)
127{ 246{
128 struct sk_buff *buf; 247 struct sk_buff *skb;
129 u32 dnode; 248 u32 dnode;
130 249
131 while ((buf = __skb_dequeue(&sk->sk_receive_queue))) { 250 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
132 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) 251 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
133 tipc_link_xmit(buf, dnode, 0); 252 tipc_link_xmit_skb(skb, dnode, 0);
134 } 253 }
135} 254}
136 255
256/* tsk_peer_msg - verify if message was sent by connected port's peer
257 *
258 * Handles cases where the node's network address has changed from
259 * the default of <0.0.0> to its configured setting.
260 */
261static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
262{
263 u32 peer_port = tsk_peer_port(tsk);
264 u32 orig_node;
265 u32 peer_node;
266
267 if (unlikely(!tsk->connected))
268 return false;
269
270 if (unlikely(msg_origport(msg) != peer_port))
271 return false;
272
273 orig_node = msg_orignode(msg);
274 peer_node = tsk_peer_node(tsk);
275
276 if (likely(orig_node == peer_node))
277 return true;
278
279 if (!orig_node && (peer_node == tipc_own_addr))
280 return true;
281
282 if (!peer_node && (orig_node == tipc_own_addr))
283 return true;
284
285 return false;
286}
287
137/** 288/**
138 * tipc_sk_create - create a TIPC socket 289 * tipc_sk_create - create a TIPC socket
139 * @net: network namespace (must be default network) 290 * @net: network namespace (must be default network)
@@ -153,7 +304,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
153 socket_state state; 304 socket_state state;
154 struct sock *sk; 305 struct sock *sk;
155 struct tipc_sock *tsk; 306 struct tipc_sock *tsk;
156 struct tipc_port *port; 307 struct tipc_msg *msg;
157 u32 ref; 308 u32 ref;
158 309
159 /* Validate arguments */ 310 /* Validate arguments */
@@ -188,20 +339,24 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
188 return -ENOMEM; 339 return -ENOMEM;
189 340
190 tsk = tipc_sk(sk); 341 tsk = tipc_sk(sk);
191 port = &tsk->port; 342 ref = tipc_sk_ref_acquire(tsk);
192
193 ref = tipc_port_init(port, TIPC_LOW_IMPORTANCE);
194 if (!ref) { 343 if (!ref) {
195 pr_warn("Socket registration failed, ref. table exhausted\n"); 344 pr_warn("Socket create failed; reference table exhausted\n");
196 sk_free(sk);
197 return -ENOMEM; 345 return -ENOMEM;
198 } 346 }
347 tsk->max_pkt = MAX_PKT_DEFAULT;
348 tsk->ref = ref;
349 INIT_LIST_HEAD(&tsk->publications);
350 msg = &tsk->phdr;
351 tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
352 NAMED_H_SIZE, 0);
353 msg_set_origport(msg, ref);
199 354
200 /* Finish initializing socket data structures */ 355 /* Finish initializing socket data structures */
201 sock->ops = ops; 356 sock->ops = ops;
202 sock->state = state; 357 sock->state = state;
203
204 sock_init_data(sock, sk); 358 sock_init_data(sock, sk);
359 k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref);
205 sk->sk_backlog_rcv = tipc_backlog_rcv; 360 sk->sk_backlog_rcv = tipc_backlog_rcv;
206 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 361 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
207 sk->sk_data_ready = tipc_data_ready; 362 sk->sk_data_ready = tipc_data_ready;
@@ -209,12 +364,11 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
209 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT; 364 tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
210 tsk->sent_unacked = 0; 365 tsk->sent_unacked = 0;
211 atomic_set(&tsk->dupl_rcvcnt, 0); 366 atomic_set(&tsk->dupl_rcvcnt, 0);
212 tipc_port_unlock(port);
213 367
214 if (sock->state == SS_READY) { 368 if (sock->state == SS_READY) {
215 tipc_port_set_unreturnable(port, true); 369 tsk_set_unreturnable(tsk, true);
216 if (sock->type == SOCK_DGRAM) 370 if (sock->type == SOCK_DGRAM)
217 tipc_port_set_unreliable(port, true); 371 tsk_set_unreliable(tsk, true);
218 } 372 }
219 return 0; 373 return 0;
220} 374}
@@ -308,8 +462,7 @@ static int tipc_release(struct socket *sock)
308{ 462{
309 struct sock *sk = sock->sk; 463 struct sock *sk = sock->sk;
310 struct tipc_sock *tsk; 464 struct tipc_sock *tsk;
311 struct tipc_port *port; 465 struct sk_buff *skb;
312 struct sk_buff *buf;
313 u32 dnode; 466 u32 dnode;
314 467
315 /* 468 /*
@@ -320,34 +473,44 @@ static int tipc_release(struct socket *sock)
320 return 0; 473 return 0;
321 474
322 tsk = tipc_sk(sk); 475 tsk = tipc_sk(sk);
323 port = &tsk->port;
324 lock_sock(sk); 476 lock_sock(sk);
325 477
326 /* 478 /*
327 * Reject all unreceived messages, except on an active connection 479 * Reject all unreceived messages, except on an active connection
328 * (which disconnects locally & sends a 'FIN+' to peer) 480 * (which disconnects locally & sends a 'FIN+' to peer)
329 */ 481 */
482 dnode = tsk_peer_node(tsk);
330 while (sock->state != SS_DISCONNECTING) { 483 while (sock->state != SS_DISCONNECTING) {
331 buf = __skb_dequeue(&sk->sk_receive_queue); 484 skb = __skb_dequeue(&sk->sk_receive_queue);
332 if (buf == NULL) 485 if (skb == NULL)
333 break; 486 break;
334 if (TIPC_SKB_CB(buf)->handle != NULL) 487 if (TIPC_SKB_CB(skb)->handle != NULL)
335 kfree_skb(buf); 488 kfree_skb(skb);
336 else { 489 else {
337 if ((sock->state == SS_CONNECTING) || 490 if ((sock->state == SS_CONNECTING) ||
338 (sock->state == SS_CONNECTED)) { 491 (sock->state == SS_CONNECTED)) {
339 sock->state = SS_DISCONNECTING; 492 sock->state = SS_DISCONNECTING;
340 tipc_port_disconnect(port->ref); 493 tsk->connected = 0;
494 tipc_node_remove_conn(dnode, tsk->ref);
341 } 495 }
342 if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT)) 496 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT))
343 tipc_link_xmit(buf, dnode, 0); 497 tipc_link_xmit_skb(skb, dnode, 0);
344 } 498 }
345 } 499 }
346 500
347 /* Destroy TIPC port; also disconnects an active connection and 501 tipc_sk_withdraw(tsk, 0, NULL);
348 * sends a 'FIN-' to peer. 502 tipc_sk_ref_discard(tsk->ref);
349 */ 503 k_cancel_timer(&tsk->timer);
350 tipc_port_destroy(port); 504 if (tsk->connected) {
505 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
506 SHORT_H_SIZE, 0, dnode, tipc_own_addr,
507 tsk_peer_port(tsk),
508 tsk->ref, TIPC_ERR_NO_PORT);
509 if (skb)
510 tipc_link_xmit_skb(skb, dnode, tsk->ref);
511 tipc_node_remove_conn(dnode, tsk->ref);
512 }
513 k_term_timer(&tsk->timer);
351 514
352 /* Discard any remaining (connection-based) messages in receive queue */ 515 /* Discard any remaining (connection-based) messages in receive queue */
353 __skb_queue_purge(&sk->sk_receive_queue); 516 __skb_queue_purge(&sk->sk_receive_queue);
@@ -355,7 +518,6 @@ static int tipc_release(struct socket *sock)
355 /* Reject any messages that accumulated in backlog queue */ 518 /* Reject any messages that accumulated in backlog queue */
356 sock->state = SS_DISCONNECTING; 519 sock->state = SS_DISCONNECTING;
357 release_sock(sk); 520 release_sock(sk);
358
359 sock_put(sk); 521 sock_put(sk);
360 sock->sk = NULL; 522 sock->sk = NULL;
361 523
@@ -387,7 +549,7 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
387 549
388 lock_sock(sk); 550 lock_sock(sk);
389 if (unlikely(!uaddr_len)) { 551 if (unlikely(!uaddr_len)) {
390 res = tipc_withdraw(&tsk->port, 0, NULL); 552 res = tipc_sk_withdraw(tsk, 0, NULL);
391 goto exit; 553 goto exit;
392 } 554 }
393 555
@@ -415,8 +577,8 @@ static int tipc_bind(struct socket *sock, struct sockaddr *uaddr,
415 } 577 }
416 578
417 res = (addr->scope > 0) ? 579 res = (addr->scope > 0) ?
418 tipc_publish(&tsk->port, addr->scope, &addr->addr.nameseq) : 580 tipc_sk_publish(tsk, addr->scope, &addr->addr.nameseq) :
419 tipc_withdraw(&tsk->port, -addr->scope, &addr->addr.nameseq); 581 tipc_sk_withdraw(tsk, -addr->scope, &addr->addr.nameseq);
420exit: 582exit:
421 release_sock(sk); 583 release_sock(sk);
422 return res; 584 return res;
@@ -446,10 +608,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
446 if ((sock->state != SS_CONNECTED) && 608 if ((sock->state != SS_CONNECTED) &&
447 ((peer != 2) || (sock->state != SS_DISCONNECTING))) 609 ((peer != 2) || (sock->state != SS_DISCONNECTING)))
448 return -ENOTCONN; 610 return -ENOTCONN;
449 addr->addr.id.ref = tipc_port_peerport(&tsk->port); 611 addr->addr.id.ref = tsk_peer_port(tsk);
450 addr->addr.id.node = tipc_port_peernode(&tsk->port); 612 addr->addr.id.node = tsk_peer_node(tsk);
451 } else { 613 } else {
452 addr->addr.id.ref = tsk->port.ref; 614 addr->addr.id.ref = tsk->ref;
453 addr->addr.id.node = tipc_own_addr; 615 addr->addr.id.node = tipc_own_addr;
454 } 616 }
455 617
@@ -518,7 +680,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
518 break; 680 break;
519 case SS_READY: 681 case SS_READY:
520 case SS_CONNECTED: 682 case SS_CONNECTED:
521 if (!tsk->link_cong && !tipc_sk_conn_cong(tsk)) 683 if (!tsk->link_cong && !tsk_conn_cong(tsk))
522 mask |= POLLOUT; 684 mask |= POLLOUT;
523 /* fall thru' */ 685 /* fall thru' */
524 case SS_CONNECTING: 686 case SS_CONNECTING:
@@ -538,7 +700,7 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
538 * tipc_sendmcast - send multicast message 700 * tipc_sendmcast - send multicast message
539 * @sock: socket structure 701 * @sock: socket structure
540 * @seq: destination address 702 * @seq: destination address
541 * @iov: message data to send 703 * @msg: message to send
542 * @dsz: total length of message data 704 * @dsz: total length of message data
543 * @timeo: timeout to wait for wakeup 705 * @timeo: timeout to wait for wakeup
544 * 706 *
@@ -546,11 +708,11 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
546 * Returns the number of bytes sent on success, or errno 708 * Returns the number of bytes sent on success, or errno
547 */ 709 */
548static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq, 710static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
549 struct iovec *iov, size_t dsz, long timeo) 711 struct msghdr *msg, size_t dsz, long timeo)
550{ 712{
551 struct sock *sk = sock->sk; 713 struct sock *sk = sock->sk;
552 struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr; 714 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr;
553 struct sk_buff *buf; 715 struct sk_buff_head head;
554 uint mtu; 716 uint mtu;
555 int rc; 717 int rc;
556 718
@@ -565,12 +727,13 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
565 727
566new_mtu: 728new_mtu:
567 mtu = tipc_bclink_get_mtu(); 729 mtu = tipc_bclink_get_mtu();
568 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); 730 __skb_queue_head_init(&head);
731 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
569 if (unlikely(rc < 0)) 732 if (unlikely(rc < 0))
570 return rc; 733 return rc;
571 734
572 do { 735 do {
573 rc = tipc_bclink_xmit(buf); 736 rc = tipc_bclink_xmit(&head);
574 if (likely(rc >= 0)) { 737 if (likely(rc >= 0)) {
575 rc = dsz; 738 rc = dsz;
576 break; 739 break;
@@ -579,9 +742,10 @@ new_mtu:
579 goto new_mtu; 742 goto new_mtu;
580 if (rc != -ELINKCONG) 743 if (rc != -ELINKCONG)
581 break; 744 break;
745 tipc_sk(sk)->link_cong = 1;
582 rc = tipc_wait_for_sndmsg(sock, &timeo); 746 rc = tipc_wait_for_sndmsg(sock, &timeo);
583 if (rc) 747 if (rc)
584 kfree_skb_list(buf); 748 __skb_queue_purge(&head);
585 } while (!rc); 749 } while (!rc);
586 return rc; 750 return rc;
587} 751}
@@ -638,20 +802,19 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
638 struct sk_buff *buf) 802 struct sk_buff *buf)
639{ 803{
640 struct tipc_msg *msg = buf_msg(buf); 804 struct tipc_msg *msg = buf_msg(buf);
641 struct tipc_port *port = &tsk->port;
642 int conn_cong; 805 int conn_cong;
643 806
644 /* Ignore if connection cannot be validated: */ 807 /* Ignore if connection cannot be validated: */
645 if (!port->connected || !tipc_port_peer_msg(port, msg)) 808 if (!tsk_peer_msg(tsk, msg))
646 goto exit; 809 goto exit;
647 810
648 port->probing_state = TIPC_CONN_OK; 811 tsk->probing_state = TIPC_CONN_OK;
649 812
650 if (msg_type(msg) == CONN_ACK) { 813 if (msg_type(msg) == CONN_ACK) {
651 conn_cong = tipc_sk_conn_cong(tsk); 814 conn_cong = tsk_conn_cong(tsk);
652 tsk->sent_unacked -= msg_msgcnt(msg); 815 tsk->sent_unacked -= msg_msgcnt(msg);
653 if (conn_cong) 816 if (conn_cong)
654 tipc_sock_wakeup(tsk); 817 tsk->sk.sk_write_space(&tsk->sk);
655 } else if (msg_type(msg) == CONN_PROBE) { 818 } else if (msg_type(msg) == CONN_PROBE) {
656 if (!tipc_msg_reverse(buf, dnode, TIPC_OK)) 819 if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
657 return TIPC_OK; 820 return TIPC_OK;
@@ -664,39 +827,6 @@ exit:
664 return TIPC_OK; 827 return TIPC_OK;
665} 828}
666 829
667/**
668 * dest_name_check - verify user is permitted to send to specified port name
669 * @dest: destination address
670 * @m: descriptor for message to be sent
671 *
672 * Prevents restricted configuration commands from being issued by
673 * unauthorized users.
674 *
675 * Returns 0 if permission is granted, otherwise errno
676 */
677static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
678{
679 struct tipc_cfg_msg_hdr hdr;
680
681 if (unlikely(dest->addrtype == TIPC_ADDR_ID))
682 return 0;
683 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
684 return 0;
685 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
686 return 0;
687 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
688 return -EACCES;
689
690 if (!m->msg_iovlen || (m->msg_iov[0].iov_len < sizeof(hdr)))
691 return -EMSGSIZE;
692 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
693 return -EFAULT;
694 if ((ntohs(hdr.tcm_type) & 0xC000) && (!capable(CAP_NET_ADMIN)))
695 return -EACCES;
696
697 return 0;
698}
699
700static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) 830static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
701{ 831{
702 struct sock *sk = sock->sk; 832 struct sock *sk = sock->sk;
@@ -742,15 +872,14 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
742 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 872 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
743 struct sock *sk = sock->sk; 873 struct sock *sk = sock->sk;
744 struct tipc_sock *tsk = tipc_sk(sk); 874 struct tipc_sock *tsk = tipc_sk(sk);
745 struct tipc_port *port = &tsk->port; 875 struct tipc_msg *mhdr = &tsk->phdr;
746 struct tipc_msg *mhdr = &port->phdr;
747 struct iovec *iov = m->msg_iov;
748 u32 dnode, dport; 876 u32 dnode, dport;
749 struct sk_buff *buf; 877 struct sk_buff_head head;
878 struct sk_buff *skb;
750 struct tipc_name_seq *seq = &dest->addr.nameseq; 879 struct tipc_name_seq *seq = &dest->addr.nameseq;
751 u32 mtu; 880 u32 mtu;
752 long timeo; 881 long timeo;
753 int rc = -EINVAL; 882 int rc;
754 883
755 if (unlikely(!dest)) 884 if (unlikely(!dest))
756 return -EDESTADDRREQ; 885 return -EDESTADDRREQ;
@@ -774,23 +903,20 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
774 rc = -EISCONN; 903 rc = -EISCONN;
775 goto exit; 904 goto exit;
776 } 905 }
777 if (tsk->port.published) { 906 if (tsk->published) {
778 rc = -EOPNOTSUPP; 907 rc = -EOPNOTSUPP;
779 goto exit; 908 goto exit;
780 } 909 }
781 if (dest->addrtype == TIPC_ADDR_NAME) { 910 if (dest->addrtype == TIPC_ADDR_NAME) {
782 tsk->port.conn_type = dest->addr.name.name.type; 911 tsk->conn_type = dest->addr.name.name.type;
783 tsk->port.conn_instance = dest->addr.name.name.instance; 912 tsk->conn_instance = dest->addr.name.name.instance;
784 } 913 }
785 } 914 }
786 rc = dest_name_check(dest, m);
787 if (rc)
788 goto exit;
789 915
790 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 916 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
791 917
792 if (dest->addrtype == TIPC_ADDR_MCAST) { 918 if (dest->addrtype == TIPC_ADDR_MCAST) {
793 rc = tipc_sendmcast(sock, seq, iov, dsz, timeo); 919 rc = tipc_sendmcast(sock, seq, m, dsz, timeo);
794 goto exit; 920 goto exit;
795 } else if (dest->addrtype == TIPC_ADDR_NAME) { 921 } else if (dest->addrtype == TIPC_ADDR_NAME) {
796 u32 type = dest->addr.name.name.type; 922 u32 type = dest->addr.name.name.type;
@@ -820,13 +946,16 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
820 } 946 }
821 947
822new_mtu: 948new_mtu:
823 mtu = tipc_node_get_mtu(dnode, tsk->port.ref); 949 mtu = tipc_node_get_mtu(dnode, tsk->ref);
824 rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf); 950 __skb_queue_head_init(&head);
951 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
825 if (rc < 0) 952 if (rc < 0)
826 goto exit; 953 goto exit;
827 954
828 do { 955 do {
829 rc = tipc_link_xmit(buf, dnode, tsk->port.ref); 956 skb = skb_peek(&head);
957 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
958 rc = tipc_link_xmit(&head, dnode, tsk->ref);
830 if (likely(rc >= 0)) { 959 if (likely(rc >= 0)) {
831 if (sock->state != SS_READY) 960 if (sock->state != SS_READY)
832 sock->state = SS_CONNECTING; 961 sock->state = SS_CONNECTING;
@@ -835,13 +964,12 @@ new_mtu:
835 } 964 }
836 if (rc == -EMSGSIZE) 965 if (rc == -EMSGSIZE)
837 goto new_mtu; 966 goto new_mtu;
838
839 if (rc != -ELINKCONG) 967 if (rc != -ELINKCONG)
840 break; 968 break;
841 969 tsk->link_cong = 1;
842 rc = tipc_wait_for_sndmsg(sock, &timeo); 970 rc = tipc_wait_for_sndmsg(sock, &timeo);
843 if (rc) 971 if (rc)
844 kfree_skb_list(buf); 972 __skb_queue_purge(&head);
845 } while (!rc); 973 } while (!rc);
846exit: 974exit:
847 if (iocb) 975 if (iocb)
@@ -873,8 +1001,8 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
873 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1001 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
874 done = sk_wait_event(sk, timeo_p, 1002 done = sk_wait_event(sk, timeo_p,
875 (!tsk->link_cong && 1003 (!tsk->link_cong &&
876 !tipc_sk_conn_cong(tsk)) || 1004 !tsk_conn_cong(tsk)) ||
877 !tsk->port.connected); 1005 !tsk->connected);
878 finish_wait(sk_sleep(sk), &wait); 1006 finish_wait(sk_sleep(sk), &wait);
879 } while (!done); 1007 } while (!done);
880 return 0; 1008 return 0;
@@ -897,11 +1025,10 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
897{ 1025{
898 struct sock *sk = sock->sk; 1026 struct sock *sk = sock->sk;
899 struct tipc_sock *tsk = tipc_sk(sk); 1027 struct tipc_sock *tsk = tipc_sk(sk);
900 struct tipc_port *port = &tsk->port; 1028 struct tipc_msg *mhdr = &tsk->phdr;
901 struct tipc_msg *mhdr = &port->phdr; 1029 struct sk_buff_head head;
902 struct sk_buff *buf;
903 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1030 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
904 u32 ref = port->ref; 1031 u32 ref = tsk->ref;
905 int rc = -EINVAL; 1032 int rc = -EINVAL;
906 long timeo; 1033 long timeo;
907 u32 dnode; 1034 u32 dnode;
@@ -929,17 +1056,18 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
929 } 1056 }
930 1057
931 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT); 1058 timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
932 dnode = tipc_port_peernode(port); 1059 dnode = tsk_peer_node(tsk);
933 1060
934next: 1061next:
935 mtu = port->max_pkt; 1062 mtu = tsk->max_pkt;
936 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); 1063 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
937 rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf); 1064 __skb_queue_head_init(&head);
1065 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
938 if (unlikely(rc < 0)) 1066 if (unlikely(rc < 0))
939 goto exit; 1067 goto exit;
940 do { 1068 do {
941 if (likely(!tipc_sk_conn_cong(tsk))) { 1069 if (likely(!tsk_conn_cong(tsk))) {
942 rc = tipc_link_xmit(buf, dnode, ref); 1070 rc = tipc_link_xmit(&head, dnode, ref);
943 if (likely(!rc)) { 1071 if (likely(!rc)) {
944 tsk->sent_unacked++; 1072 tsk->sent_unacked++;
945 sent += send; 1073 sent += send;
@@ -948,15 +1076,16 @@ next:
948 goto next; 1076 goto next;
949 } 1077 }
950 if (rc == -EMSGSIZE) { 1078 if (rc == -EMSGSIZE) {
951 port->max_pkt = tipc_node_get_mtu(dnode, ref); 1079 tsk->max_pkt = tipc_node_get_mtu(dnode, ref);
952 goto next; 1080 goto next;
953 } 1081 }
954 if (rc != -ELINKCONG) 1082 if (rc != -ELINKCONG)
955 break; 1083 break;
1084 tsk->link_cong = 1;
956 } 1085 }
957 rc = tipc_wait_for_sndpkt(sock, &timeo); 1086 rc = tipc_wait_for_sndpkt(sock, &timeo);
958 if (rc) 1087 if (rc)
959 kfree_skb_list(buf); 1088 __skb_queue_purge(&head);
960 } while (!rc); 1089 } while (!rc);
961exit: 1090exit:
962 if (iocb) 1091 if (iocb)
@@ -984,29 +1113,25 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
984 return tipc_send_stream(iocb, sock, m, dsz); 1113 return tipc_send_stream(iocb, sock, m, dsz);
985} 1114}
986 1115
987/** 1116/* tipc_sk_finish_conn - complete the setup of a connection
988 * auto_connect - complete connection setup to a remote port
989 * @tsk: tipc socket structure
990 * @msg: peer's response message
991 *
992 * Returns 0 on success, errno otherwise
993 */ 1117 */
994static int auto_connect(struct tipc_sock *tsk, struct tipc_msg *msg) 1118static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1119 u32 peer_node)
995{ 1120{
996 struct tipc_port *port = &tsk->port; 1121 struct tipc_msg *msg = &tsk->phdr;
997 struct socket *sock = tsk->sk.sk_socket; 1122
998 struct tipc_portid peer; 1123 msg_set_destnode(msg, peer_node);
999 1124 msg_set_destport(msg, peer_port);
1000 peer.ref = msg_origport(msg); 1125 msg_set_type(msg, TIPC_CONN_MSG);
1001 peer.node = msg_orignode(msg); 1126 msg_set_lookup_scope(msg, 0);
1002 1127 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1003 __tipc_port_connect(port->ref, port, &peer); 1128
1004 1129 tsk->probing_interval = CONN_PROBING_INTERVAL;
1005 if (msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE) 1130 tsk->probing_state = TIPC_CONN_OK;
1006 return -EINVAL; 1131 tsk->connected = 1;
1007 msg_set_importance(&port->phdr, (u32)msg_importance(msg)); 1132 k_start_timer(&tsk->timer, tsk->probing_interval);
1008 sock->state = SS_CONNECTED; 1133 tipc_node_add_conn(peer_node, tsk->ref, peer_port);
1009 return 0; 1134 tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref);
1010} 1135}
1011 1136
1012/** 1137/**
@@ -1033,17 +1158,17 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
1033} 1158}
1034 1159
1035/** 1160/**
1036 * anc_data_recv - optionally capture ancillary data for received message 1161 * tipc_sk_anc_data_recv - optionally capture ancillary data for received message
1037 * @m: descriptor for message info 1162 * @m: descriptor for message info
1038 * @msg: received message header 1163 * @msg: received message header
1039 * @tport: TIPC port associated with message 1164 * @tsk: TIPC port associated with message
1040 * 1165 *
1041 * Note: Ancillary data is not captured if not requested by receiver. 1166 * Note: Ancillary data is not captured if not requested by receiver.
1042 * 1167 *
1043 * Returns 0 if successful, otherwise errno 1168 * Returns 0 if successful, otherwise errno
1044 */ 1169 */
1045static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg, 1170static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1046 struct tipc_port *tport) 1171 struct tipc_sock *tsk)
1047{ 1172{
1048 u32 anc_data[3]; 1173 u32 anc_data[3];
1049 u32 err; 1174 u32 err;
@@ -1086,10 +1211,10 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1086 anc_data[2] = msg_nameupper(msg); 1211 anc_data[2] = msg_nameupper(msg);
1087 break; 1212 break;
1088 case TIPC_CONN_MSG: 1213 case TIPC_CONN_MSG:
1089 has_name = (tport->conn_type != 0); 1214 has_name = (tsk->conn_type != 0);
1090 anc_data[0] = tport->conn_type; 1215 anc_data[0] = tsk->conn_type;
1091 anc_data[1] = tport->conn_instance; 1216 anc_data[1] = tsk->conn_instance;
1092 anc_data[2] = tport->conn_instance; 1217 anc_data[2] = tsk->conn_instance;
1093 break; 1218 break;
1094 default: 1219 default:
1095 has_name = 0; 1220 has_name = 0;
@@ -1103,6 +1228,24 @@ static int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1103 return 0; 1228 return 0;
1104} 1229}
1105 1230
1231static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1232{
1233 struct sk_buff *skb = NULL;
1234 struct tipc_msg *msg;
1235 u32 peer_port = tsk_peer_port(tsk);
1236 u32 dnode = tsk_peer_node(tsk);
1237
1238 if (!tsk->connected)
1239 return;
1240 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode,
1241 tipc_own_addr, peer_port, tsk->ref, TIPC_OK);
1242 if (!skb)
1243 return;
1244 msg = buf_msg(skb);
1245 msg_set_msgcnt(msg, ack);
1246 tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg));
1247}
1248
1106static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1249static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
1107{ 1250{
1108 struct sock *sk = sock->sk; 1251 struct sock *sk = sock->sk;
@@ -1153,7 +1296,6 @@ static int tipc_recvmsg(struct kiocb *iocb, struct socket *sock,
1153{ 1296{
1154 struct sock *sk = sock->sk; 1297 struct sock *sk = sock->sk;
1155 struct tipc_sock *tsk = tipc_sk(sk); 1298 struct tipc_sock *tsk = tipc_sk(sk);
1156 struct tipc_port *port = &tsk->port;
1157 struct sk_buff *buf; 1299 struct sk_buff *buf;
1158 struct tipc_msg *msg; 1300 struct tipc_msg *msg;
1159 long timeo; 1301 long timeo;
@@ -1188,7 +1330,7 @@ restart:
1188 1330
1189 /* Discard an empty non-errored message & try again */ 1331 /* Discard an empty non-errored message & try again */
1190 if ((!sz) && (!err)) { 1332 if ((!sz) && (!err)) {
1191 advance_rx_queue(sk); 1333 tsk_advance_rx_queue(sk);
1192 goto restart; 1334 goto restart;
1193 } 1335 }
1194 1336
@@ -1196,7 +1338,7 @@ restart:
1196 set_orig_addr(m, msg); 1338 set_orig_addr(m, msg);
1197 1339
1198 /* Capture ancillary data (optional) */ 1340 /* Capture ancillary data (optional) */
1199 res = anc_data_recv(m, msg, port); 1341 res = tipc_sk_anc_data_recv(m, msg, tsk);
1200 if (res) 1342 if (res)
1201 goto exit; 1343 goto exit;
1202 1344
@@ -1206,8 +1348,7 @@ restart:
1206 sz = buf_len; 1348 sz = buf_len;
1207 m->msg_flags |= MSG_TRUNC; 1349 m->msg_flags |= MSG_TRUNC;
1208 } 1350 }
1209 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg), 1351 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg), m, sz);
1210 m->msg_iov, sz);
1211 if (res) 1352 if (res)
1212 goto exit; 1353 goto exit;
1213 res = sz; 1354 res = sz;
@@ -1223,10 +1364,10 @@ restart:
1223 if (likely(!(flags & MSG_PEEK))) { 1364 if (likely(!(flags & MSG_PEEK))) {
1224 if ((sock->state != SS_READY) && 1365 if ((sock->state != SS_READY) &&
1225 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { 1366 (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1226 tipc_acknowledge(port->ref, tsk->rcv_unacked); 1367 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1227 tsk->rcv_unacked = 0; 1368 tsk->rcv_unacked = 0;
1228 } 1369 }
1229 advance_rx_queue(sk); 1370 tsk_advance_rx_queue(sk);
1230 } 1371 }
1231exit: 1372exit:
1232 release_sock(sk); 1373 release_sock(sk);
@@ -1250,7 +1391,6 @@ static int tipc_recv_stream(struct kiocb *iocb, struct socket *sock,
1250{ 1391{
1251 struct sock *sk = sock->sk; 1392 struct sock *sk = sock->sk;
1252 struct tipc_sock *tsk = tipc_sk(sk); 1393 struct tipc_sock *tsk = tipc_sk(sk);
1253 struct tipc_port *port = &tsk->port;
1254 struct sk_buff *buf; 1394 struct sk_buff *buf;
1255 struct tipc_msg *msg; 1395 struct tipc_msg *msg;
1256 long timeo; 1396 long timeo;
@@ -1288,14 +1428,14 @@ restart:
1288 1428
1289 /* Discard an empty non-errored message & try again */ 1429 /* Discard an empty non-errored message & try again */
1290 if ((!sz) && (!err)) { 1430 if ((!sz) && (!err)) {
1291 advance_rx_queue(sk); 1431 tsk_advance_rx_queue(sk);
1292 goto restart; 1432 goto restart;
1293 } 1433 }
1294 1434
1295 /* Optionally capture sender's address & ancillary data of first msg */ 1435 /* Optionally capture sender's address & ancillary data of first msg */
1296 if (sz_copied == 0) { 1436 if (sz_copied == 0) {
1297 set_orig_addr(m, msg); 1437 set_orig_addr(m, msg);
1298 res = anc_data_recv(m, msg, port); 1438 res = tipc_sk_anc_data_recv(m, msg, tsk);
1299 if (res) 1439 if (res)
1300 goto exit; 1440 goto exit;
1301 } 1441 }
@@ -1308,8 +1448,8 @@ restart:
1308 needed = (buf_len - sz_copied); 1448 needed = (buf_len - sz_copied);
1309 sz_to_copy = (sz <= needed) ? sz : needed; 1449 sz_to_copy = (sz <= needed) ? sz : needed;
1310 1450
1311 res = skb_copy_datagram_iovec(buf, msg_hdr_sz(msg) + offset, 1451 res = skb_copy_datagram_msg(buf, msg_hdr_sz(msg) + offset,
1312 m->msg_iov, sz_to_copy); 1452 m, sz_to_copy);
1313 if (res) 1453 if (res)
1314 goto exit; 1454 goto exit;
1315 1455
@@ -1334,10 +1474,10 @@ restart:
1334 /* Consume received message (optional) */ 1474 /* Consume received message (optional) */
1335 if (likely(!(flags & MSG_PEEK))) { 1475 if (likely(!(flags & MSG_PEEK))) {
1336 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) { 1476 if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
1337 tipc_acknowledge(port->ref, tsk->rcv_unacked); 1477 tipc_sk_send_ack(tsk, tsk->rcv_unacked);
1338 tsk->rcv_unacked = 0; 1478 tsk->rcv_unacked = 0;
1339 } 1479 }
1340 advance_rx_queue(sk); 1480 tsk_advance_rx_queue(sk);
1341 } 1481 }
1342 1482
1343 /* Loop around if more data is required */ 1483 /* Loop around if more data is required */
@@ -1391,17 +1531,14 @@ static void tipc_data_ready(struct sock *sk)
1391 * @tsk: TIPC socket 1531 * @tsk: TIPC socket
1392 * @msg: message 1532 * @msg: message
1393 * 1533 *
1394 * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise 1534 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1395 */ 1535 */
1396static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) 1536static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1397{ 1537{
1398 struct sock *sk = &tsk->sk; 1538 struct sock *sk = &tsk->sk;
1399 struct tipc_port *port = &tsk->port;
1400 struct socket *sock = sk->sk_socket; 1539 struct socket *sock = sk->sk_socket;
1401 struct tipc_msg *msg = buf_msg(*buf); 1540 struct tipc_msg *msg = buf_msg(*buf);
1402
1403 int retval = -TIPC_ERR_NO_PORT; 1541 int retval = -TIPC_ERR_NO_PORT;
1404 int res;
1405 1542
1406 if (msg_mcast(msg)) 1543 if (msg_mcast(msg))
1407 return retval; 1544 return retval;
@@ -1409,16 +1546,23 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1409 switch ((int)sock->state) { 1546 switch ((int)sock->state) {
1410 case SS_CONNECTED: 1547 case SS_CONNECTED:
1411 /* Accept only connection-based messages sent by peer */ 1548 /* Accept only connection-based messages sent by peer */
1412 if (msg_connected(msg) && tipc_port_peer_msg(port, msg)) { 1549 if (tsk_peer_msg(tsk, msg)) {
1413 if (unlikely(msg_errcode(msg))) { 1550 if (unlikely(msg_errcode(msg))) {
1414 sock->state = SS_DISCONNECTING; 1551 sock->state = SS_DISCONNECTING;
1415 __tipc_port_disconnect(port); 1552 tsk->connected = 0;
1553 /* let timer expire on it's own */
1554 tipc_node_remove_conn(tsk_peer_node(tsk),
1555 tsk->ref);
1416 } 1556 }
1417 retval = TIPC_OK; 1557 retval = TIPC_OK;
1418 } 1558 }
1419 break; 1559 break;
1420 case SS_CONNECTING: 1560 case SS_CONNECTING:
1421 /* Accept only ACK or NACK message */ 1561 /* Accept only ACK or NACK message */
1562
1563 if (unlikely(!msg_connected(msg)))
1564 break;
1565
1422 if (unlikely(msg_errcode(msg))) { 1566 if (unlikely(msg_errcode(msg))) {
1423 sock->state = SS_DISCONNECTING; 1567 sock->state = SS_DISCONNECTING;
1424 sk->sk_err = ECONNREFUSED; 1568 sk->sk_err = ECONNREFUSED;
@@ -1426,17 +1570,17 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1426 break; 1570 break;
1427 } 1571 }
1428 1572
1429 if (unlikely(!msg_connected(msg))) 1573 if (unlikely(msg_importance(msg) > TIPC_CRITICAL_IMPORTANCE)) {
1430 break;
1431
1432 res = auto_connect(tsk, msg);
1433 if (res) {
1434 sock->state = SS_DISCONNECTING; 1574 sock->state = SS_DISCONNECTING;
1435 sk->sk_err = -res; 1575 sk->sk_err = EINVAL;
1436 retval = TIPC_OK; 1576 retval = TIPC_OK;
1437 break; 1577 break;
1438 } 1578 }
1439 1579
1580 tipc_sk_finish_conn(tsk, msg_origport(msg), msg_orignode(msg));
1581 msg_set_importance(&tsk->phdr, msg_importance(msg));
1582 sock->state = SS_CONNECTED;
1583
1440 /* If an incoming message is an 'ACK-', it should be 1584 /* If an incoming message is an 'ACK-', it should be
1441 * discarded here because it doesn't contain useful 1585 * discarded here because it doesn't contain useful
1442 * data. In addition, we should try to wake up 1586 * data. In addition, we should try to wake up
@@ -1518,6 +1662,13 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1518 if (unlikely(msg_user(msg) == CONN_MANAGER)) 1662 if (unlikely(msg_user(msg) == CONN_MANAGER))
1519 return tipc_sk_proto_rcv(tsk, &onode, buf); 1663 return tipc_sk_proto_rcv(tsk, &onode, buf);
1520 1664
1665 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1666 kfree_skb(buf);
1667 tsk->link_cong = 0;
1668 sk->sk_write_space(sk);
1669 return TIPC_OK;
1670 }
1671
1521 /* Reject message if it is wrong sort of message for socket */ 1672 /* Reject message if it is wrong sort of message for socket */
1522 if (msg_type(msg) > TIPC_DIRECT_MSG) 1673 if (msg_type(msg) > TIPC_DIRECT_MSG)
1523 return -TIPC_ERR_NO_PORT; 1674 return -TIPC_ERR_NO_PORT;
@@ -1547,20 +1698,20 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1547/** 1698/**
1548 * tipc_backlog_rcv - handle incoming message from backlog queue 1699 * tipc_backlog_rcv - handle incoming message from backlog queue
1549 * @sk: socket 1700 * @sk: socket
1550 * @buf: message 1701 * @skb: message
1551 * 1702 *
1552 * Caller must hold socket lock, but not port lock. 1703 * Caller must hold socket lock, but not port lock.
1553 * 1704 *
1554 * Returns 0 1705 * Returns 0
1555 */ 1706 */
1556static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf) 1707static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1557{ 1708{
1558 int rc; 1709 int rc;
1559 u32 onode; 1710 u32 onode;
1560 struct tipc_sock *tsk = tipc_sk(sk); 1711 struct tipc_sock *tsk = tipc_sk(sk);
1561 uint truesize = buf->truesize; 1712 uint truesize = skb->truesize;
1562 1713
1563 rc = filter_rcv(sk, buf); 1714 rc = filter_rcv(sk, skb);
1564 1715
1565 if (likely(!rc)) { 1716 if (likely(!rc)) {
1566 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) 1717 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
@@ -1568,62 +1719,58 @@ static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
1568 return 0; 1719 return 0;
1569 } 1720 }
1570 1721
1571 if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc)) 1722 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc))
1572 return 0; 1723 return 0;
1573 1724
1574 tipc_link_xmit(buf, onode, 0); 1725 tipc_link_xmit_skb(skb, onode, 0);
1575 1726
1576 return 0; 1727 return 0;
1577} 1728}
1578 1729
1579/** 1730/**
1580 * tipc_sk_rcv - handle incoming message 1731 * tipc_sk_rcv - handle incoming message
1581 * @buf: buffer containing arriving message 1732 * @skb: buffer containing arriving message
1582 * Consumes buffer 1733 * Consumes buffer
1583 * Returns 0 if success, or errno: -EHOSTUNREACH 1734 * Returns 0 if success, or errno: -EHOSTUNREACH
1584 */ 1735 */
1585int tipc_sk_rcv(struct sk_buff *buf) 1736int tipc_sk_rcv(struct sk_buff *skb)
1586{ 1737{
1587 struct tipc_sock *tsk; 1738 struct tipc_sock *tsk;
1588 struct tipc_port *port;
1589 struct sock *sk; 1739 struct sock *sk;
1590 u32 dport = msg_destport(buf_msg(buf)); 1740 u32 dport = msg_destport(buf_msg(skb));
1591 int rc = TIPC_OK; 1741 int rc = TIPC_OK;
1592 uint limit; 1742 uint limit;
1593 u32 dnode; 1743 u32 dnode;
1594 1744
1595 /* Validate destination and message */ 1745 /* Validate destination and message */
1596 port = tipc_port_lock(dport); 1746 tsk = tipc_sk_get(dport);
1597 if (unlikely(!port)) { 1747 if (unlikely(!tsk)) {
1598 rc = tipc_msg_eval(buf, &dnode); 1748 rc = tipc_msg_eval(skb, &dnode);
1599 goto exit; 1749 goto exit;
1600 } 1750 }
1601
1602 tsk = tipc_port_to_sock(port);
1603 sk = &tsk->sk; 1751 sk = &tsk->sk;
1604 1752
1605 /* Queue message */ 1753 /* Queue message */
1606 bh_lock_sock(sk); 1754 spin_lock_bh(&sk->sk_lock.slock);
1607 1755
1608 if (!sock_owned_by_user(sk)) { 1756 if (!sock_owned_by_user(sk)) {
1609 rc = filter_rcv(sk, buf); 1757 rc = filter_rcv(sk, skb);
1610 } else { 1758 } else {
1611 if (sk->sk_backlog.len == 0) 1759 if (sk->sk_backlog.len == 0)
1612 atomic_set(&tsk->dupl_rcvcnt, 0); 1760 atomic_set(&tsk->dupl_rcvcnt, 0);
1613 limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt); 1761 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1614 if (sk_add_backlog(sk, buf, limit)) 1762 if (sk_add_backlog(sk, skb, limit))
1615 rc = -TIPC_ERR_OVERLOAD; 1763 rc = -TIPC_ERR_OVERLOAD;
1616 } 1764 }
1617 bh_unlock_sock(sk); 1765 spin_unlock_bh(&sk->sk_lock.slock);
1618 tipc_port_unlock(port); 1766 tipc_sk_put(tsk);
1619
1620 if (likely(!rc)) 1767 if (likely(!rc))
1621 return 0; 1768 return 0;
1622exit: 1769exit:
1623 if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc)) 1770 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1624 return -EHOSTUNREACH; 1771 return -EHOSTUNREACH;
1625 1772
1626 tipc_link_xmit(buf, dnode, 0); 1773 tipc_link_xmit_skb(skb, dnode, 0);
1627 return (rc < 0) ? -EHOSTUNREACH : 0; 1774 return (rc < 0) ? -EHOSTUNREACH : 0;
1628} 1775}
1629 1776
@@ -1803,10 +1950,8 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1803{ 1950{
1804 struct sock *new_sk, *sk = sock->sk; 1951 struct sock *new_sk, *sk = sock->sk;
1805 struct sk_buff *buf; 1952 struct sk_buff *buf;
1806 struct tipc_port *new_port; 1953 struct tipc_sock *new_tsock;
1807 struct tipc_msg *msg; 1954 struct tipc_msg *msg;
1808 struct tipc_portid peer;
1809 u32 new_ref;
1810 long timeo; 1955 long timeo;
1811 int res; 1956 int res;
1812 1957
@@ -1828,8 +1973,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1828 goto exit; 1973 goto exit;
1829 1974
1830 new_sk = new_sock->sk; 1975 new_sk = new_sock->sk;
1831 new_port = &tipc_sk(new_sk)->port; 1976 new_tsock = tipc_sk(new_sk);
1832 new_ref = new_port->ref;
1833 msg = buf_msg(buf); 1977 msg = buf_msg(buf);
1834 1978
1835 /* we lock on new_sk; but lockdep sees the lock on sk */ 1979 /* we lock on new_sk; but lockdep sees the lock on sk */
@@ -1839,18 +1983,16 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1839 * Reject any stray messages received by new socket 1983 * Reject any stray messages received by new socket
1840 * before the socket lock was taken (very, very unlikely) 1984 * before the socket lock was taken (very, very unlikely)
1841 */ 1985 */
1842 reject_rx_queue(new_sk); 1986 tsk_rej_rx_queue(new_sk);
1843 1987
1844 /* Connect new socket to it's peer */ 1988 /* Connect new socket to it's peer */
1845 peer.ref = msg_origport(msg); 1989 tipc_sk_finish_conn(new_tsock, msg_origport(msg), msg_orignode(msg));
1846 peer.node = msg_orignode(msg);
1847 tipc_port_connect(new_ref, &peer);
1848 new_sock->state = SS_CONNECTED; 1990 new_sock->state = SS_CONNECTED;
1849 1991
1850 tipc_port_set_importance(new_port, msg_importance(msg)); 1992 tsk_set_importance(new_tsock, msg_importance(msg));
1851 if (msg_named(msg)) { 1993 if (msg_named(msg)) {
1852 new_port->conn_type = msg_nametype(msg); 1994 new_tsock->conn_type = msg_nametype(msg);
1853 new_port->conn_instance = msg_nameinst(msg); 1995 new_tsock->conn_instance = msg_nameinst(msg);
1854 } 1996 }
1855 1997
1856 /* 1998 /*
@@ -1860,7 +2002,7 @@ static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags)
1860 if (!msg_data_sz(msg)) { 2002 if (!msg_data_sz(msg)) {
1861 struct msghdr m = {NULL,}; 2003 struct msghdr m = {NULL,};
1862 2004
1863 advance_rx_queue(sk); 2005 tsk_advance_rx_queue(sk);
1864 tipc_send_packet(NULL, new_sock, &m, 0); 2006 tipc_send_packet(NULL, new_sock, &m, 0);
1865 } else { 2007 } else {
1866 __skb_dequeue(&sk->sk_receive_queue); 2008 __skb_dequeue(&sk->sk_receive_queue);
@@ -1886,9 +2028,8 @@ static int tipc_shutdown(struct socket *sock, int how)
1886{ 2028{
1887 struct sock *sk = sock->sk; 2029 struct sock *sk = sock->sk;
1888 struct tipc_sock *tsk = tipc_sk(sk); 2030 struct tipc_sock *tsk = tipc_sk(sk);
1889 struct tipc_port *port = &tsk->port; 2031 struct sk_buff *skb;
1890 struct sk_buff *buf; 2032 u32 dnode;
1891 u32 peer;
1892 int res; 2033 int res;
1893 2034
1894 if (how != SHUT_RDWR) 2035 if (how != SHUT_RDWR)
@@ -1902,21 +2043,27 @@ static int tipc_shutdown(struct socket *sock, int how)
1902 2043
1903restart: 2044restart:
1904 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */ 2045 /* Disconnect and send a 'FIN+' or 'FIN-' message to peer */
1905 buf = __skb_dequeue(&sk->sk_receive_queue); 2046 skb = __skb_dequeue(&sk->sk_receive_queue);
1906 if (buf) { 2047 if (skb) {
1907 if (TIPC_SKB_CB(buf)->handle != NULL) { 2048 if (TIPC_SKB_CB(skb)->handle != NULL) {
1908 kfree_skb(buf); 2049 kfree_skb(skb);
1909 goto restart; 2050 goto restart;
1910 } 2051 }
1911 tipc_port_disconnect(port->ref); 2052 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN))
1912 if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN)) 2053 tipc_link_xmit_skb(skb, dnode, tsk->ref);
1913 tipc_link_xmit(buf, peer, 0); 2054 tipc_node_remove_conn(dnode, tsk->ref);
1914 } else { 2055 } else {
1915 tipc_port_shutdown(port->ref); 2056 dnode = tsk_peer_node(tsk);
2057 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2058 TIPC_CONN_MSG, SHORT_H_SIZE,
2059 0, dnode, tipc_own_addr,
2060 tsk_peer_port(tsk),
2061 tsk->ref, TIPC_CONN_SHUTDOWN);
2062 tipc_link_xmit_skb(skb, dnode, tsk->ref);
1916 } 2063 }
1917 2064 tsk->connected = 0;
1918 sock->state = SS_DISCONNECTING; 2065 sock->state = SS_DISCONNECTING;
1919 2066 tipc_node_remove_conn(dnode, tsk->ref);
1920 /* fall through */ 2067 /* fall through */
1921 2068
1922 case SS_DISCONNECTING: 2069 case SS_DISCONNECTING:
@@ -1937,6 +2084,432 @@ restart:
1937 return res; 2084 return res;
1938} 2085}
1939 2086
2087static void tipc_sk_timeout(unsigned long ref)
2088{
2089 struct tipc_sock *tsk;
2090 struct sock *sk;
2091 struct sk_buff *skb = NULL;
2092 u32 peer_port, peer_node;
2093
2094 tsk = tipc_sk_get(ref);
2095 if (!tsk)
2096 return;
2097
2098 sk = &tsk->sk;
2099 bh_lock_sock(sk);
2100 if (!tsk->connected) {
2101 bh_unlock_sock(sk);
2102 goto exit;
2103 }
2104 peer_port = tsk_peer_port(tsk);
2105 peer_node = tsk_peer_node(tsk);
2106
2107 if (tsk->probing_state == TIPC_CONN_PROBING) {
2108 /* Previous probe not answered -> self abort */
2109 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
2110 SHORT_H_SIZE, 0, tipc_own_addr,
2111 peer_node, ref, peer_port,
2112 TIPC_ERR_NO_PORT);
2113 } else {
2114 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE,
2115 0, peer_node, tipc_own_addr,
2116 peer_port, ref, TIPC_OK);
2117 tsk->probing_state = TIPC_CONN_PROBING;
2118 k_start_timer(&tsk->timer, tsk->probing_interval);
2119 }
2120 bh_unlock_sock(sk);
2121 if (skb)
2122 tipc_link_xmit_skb(skb, peer_node, ref);
2123exit:
2124 tipc_sk_put(tsk);
2125}
2126
2127static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2128 struct tipc_name_seq const *seq)
2129{
2130 struct publication *publ;
2131 u32 key;
2132
2133 if (tsk->connected)
2134 return -EINVAL;
2135 key = tsk->ref + tsk->pub_count + 1;
2136 if (key == tsk->ref)
2137 return -EADDRINUSE;
2138
2139 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
2140 scope, tsk->ref, key);
2141 if (unlikely(!publ))
2142 return -EINVAL;
2143
2144 list_add(&publ->pport_list, &tsk->publications);
2145 tsk->pub_count++;
2146 tsk->published = 1;
2147 return 0;
2148}
2149
2150static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2151 struct tipc_name_seq const *seq)
2152{
2153 struct publication *publ;
2154 struct publication *safe;
2155 int rc = -EINVAL;
2156
2157 list_for_each_entry_safe(publ, safe, &tsk->publications, pport_list) {
2158 if (seq) {
2159 if (publ->scope != scope)
2160 continue;
2161 if (publ->type != seq->type)
2162 continue;
2163 if (publ->lower != seq->lower)
2164 continue;
2165 if (publ->upper != seq->upper)
2166 break;
2167 tipc_nametbl_withdraw(publ->type, publ->lower,
2168 publ->ref, publ->key);
2169 rc = 0;
2170 break;
2171 }
2172 tipc_nametbl_withdraw(publ->type, publ->lower,
2173 publ->ref, publ->key);
2174 rc = 0;
2175 }
2176 if (list_empty(&tsk->publications))
2177 tsk->published = 0;
2178 return rc;
2179}
2180
2181static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
2182 int len, int full_id)
2183{
2184 struct publication *publ;
2185 int ret;
2186
2187 if (full_id)
2188 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
2189 tipc_zone(tipc_own_addr),
2190 tipc_cluster(tipc_own_addr),
2191 tipc_node(tipc_own_addr), tsk->ref);
2192 else
2193 ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
2194
2195 if (tsk->connected) {
2196 u32 dport = tsk_peer_port(tsk);
2197 u32 destnode = tsk_peer_node(tsk);
2198
2199 ret += tipc_snprintf(buf + ret, len - ret,
2200 " connected to <%u.%u.%u:%u>",
2201 tipc_zone(destnode),
2202 tipc_cluster(destnode),
2203 tipc_node(destnode), dport);
2204 if (tsk->conn_type != 0)
2205 ret += tipc_snprintf(buf + ret, len - ret,
2206 " via {%u,%u}", tsk->conn_type,
2207 tsk->conn_instance);
2208 } else if (tsk->published) {
2209 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
2210 list_for_each_entry(publ, &tsk->publications, pport_list) {
2211 if (publ->lower == publ->upper)
2212 ret += tipc_snprintf(buf + ret, len - ret,
2213 " {%u,%u}", publ->type,
2214 publ->lower);
2215 else
2216 ret += tipc_snprintf(buf + ret, len - ret,
2217 " {%u,%u,%u}", publ->type,
2218 publ->lower, publ->upper);
2219 }
2220 }
2221 ret += tipc_snprintf(buf + ret, len - ret, "\n");
2222 return ret;
2223}
2224
2225struct sk_buff *tipc_sk_socks_show(void)
2226{
2227 struct sk_buff *buf;
2228 struct tlv_desc *rep_tlv;
2229 char *pb;
2230 int pb_len;
2231 struct tipc_sock *tsk;
2232 int str_len = 0;
2233 u32 ref = 0;
2234
2235 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2236 if (!buf)
2237 return NULL;
2238 rep_tlv = (struct tlv_desc *)buf->data;
2239 pb = TLV_DATA(rep_tlv);
2240 pb_len = ULTRA_STRING_MAX_LEN;
2241
2242 tsk = tipc_sk_get_next(&ref);
2243 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2244 lock_sock(&tsk->sk);
2245 str_len += tipc_sk_show(tsk, pb + str_len,
2246 pb_len - str_len, 0);
2247 release_sock(&tsk->sk);
2248 tipc_sk_put(tsk);
2249 }
2250 str_len += 1; /* for "\0" */
2251 skb_put(buf, TLV_SPACE(str_len));
2252 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2253
2254 return buf;
2255}
2256
2257/* tipc_sk_reinit: set non-zero address in all existing sockets
2258 * when we go from standalone to network mode.
2259 */
2260void tipc_sk_reinit(void)
2261{
2262 struct tipc_msg *msg;
2263 u32 ref = 0;
2264 struct tipc_sock *tsk = tipc_sk_get_next(&ref);
2265
2266 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2267 lock_sock(&tsk->sk);
2268 msg = &tsk->phdr;
2269 msg_set_prevnode(msg, tipc_own_addr);
2270 msg_set_orignode(msg, tipc_own_addr);
2271 release_sock(&tsk->sk);
2272 tipc_sk_put(tsk);
2273 }
2274}
2275
2276/**
2277 * struct reference - TIPC socket reference entry
2278 * @tsk: pointer to socket associated with reference entry
2279 * @ref: reference value for socket (combines instance & array index info)
2280 */
2281struct reference {
2282 struct tipc_sock *tsk;
2283 u32 ref;
2284};
2285
2286/**
2287 * struct tipc_ref_table - table of TIPC socket reference entries
2288 * @entries: pointer to array of reference entries
2289 * @capacity: array index of first unusable entry
2290 * @init_point: array index of first uninitialized entry
2291 * @first_free: array index of first unused socket reference entry
2292 * @last_free: array index of last unused socket reference entry
2293 * @index_mask: bitmask for array index portion of reference values
2294 * @start_mask: initial value for instance value portion of reference values
2295 */
2296struct ref_table {
2297 struct reference *entries;
2298 u32 capacity;
2299 u32 init_point;
2300 u32 first_free;
2301 u32 last_free;
2302 u32 index_mask;
2303 u32 start_mask;
2304};
2305
2306/* Socket reference table consists of 2**N entries.
2307 *
2308 * State Socket ptr Reference
2309 * ----- ---------- ---------
2310 * In use non-NULL XXXX|own index
2311 * (XXXX changes each time entry is acquired)
2312 * Free NULL YYYY|next free index
2313 * (YYYY is one more than last used XXXX)
2314 * Uninitialized NULL 0
2315 *
2316 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
2317 *
2318 * Note that a reference value of 0 does not necessarily indicate that an
2319 * entry is uninitialized, since the last entry in the free list could also
2320 * have a reference value of 0 (although this is unlikely).
2321 */
2322
2323static struct ref_table tipc_ref_table;
2324
2325static DEFINE_RWLOCK(ref_table_lock);
2326
2327/**
2328 * tipc_ref_table_init - create reference table for sockets
2329 */
2330int tipc_sk_ref_table_init(u32 req_sz, u32 start)
2331{
2332 struct reference *table;
2333 u32 actual_sz;
2334
2335 /* account for unused entry, then round up size to a power of 2 */
2336
2337 req_sz++;
2338 for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
2339 /* do nothing */
2340 };
2341
2342 /* allocate table & mark all entries as uninitialized */
2343 table = vzalloc(actual_sz * sizeof(struct reference));
2344 if (table == NULL)
2345 return -ENOMEM;
2346
2347 tipc_ref_table.entries = table;
2348 tipc_ref_table.capacity = req_sz;
2349 tipc_ref_table.init_point = 1;
2350 tipc_ref_table.first_free = 0;
2351 tipc_ref_table.last_free = 0;
2352 tipc_ref_table.index_mask = actual_sz - 1;
2353 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
2354
2355 return 0;
2356}
2357
2358/**
2359 * tipc_ref_table_stop - destroy reference table for sockets
2360 */
2361void tipc_sk_ref_table_stop(void)
2362{
2363 if (!tipc_ref_table.entries)
2364 return;
2365 vfree(tipc_ref_table.entries);
2366 tipc_ref_table.entries = NULL;
2367}
2368
2369/* tipc_ref_acquire - create reference to a socket
2370 *
2371 * Register an socket pointer in the reference table.
2372 * Returns a unique reference value that is used from then on to retrieve the
2373 * socket pointer, or to determine if the socket has been deregistered.
2374 */
2375u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
2376{
2377 u32 index;
2378 u32 index_mask;
2379 u32 next_plus_upper;
2380 u32 ref = 0;
2381 struct reference *entry;
2382
2383 if (unlikely(!tsk)) {
2384 pr_err("Attempt to acquire ref. to non-existent obj\n");
2385 return 0;
2386 }
2387 if (unlikely(!tipc_ref_table.entries)) {
2388 pr_err("Ref. table not found in acquisition attempt\n");
2389 return 0;
2390 }
2391
2392 /* Take a free entry, if available; otherwise initialize a new one */
2393 write_lock_bh(&ref_table_lock);
2394 index = tipc_ref_table.first_free;
2395 entry = &tipc_ref_table.entries[index];
2396
2397 if (likely(index)) {
2398 index = tipc_ref_table.first_free;
2399 entry = &tipc_ref_table.entries[index];
2400 index_mask = tipc_ref_table.index_mask;
2401 next_plus_upper = entry->ref;
2402 tipc_ref_table.first_free = next_plus_upper & index_mask;
2403 ref = (next_plus_upper & ~index_mask) + index;
2404 entry->tsk = tsk;
2405 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
2406 index = tipc_ref_table.init_point++;
2407 entry = &tipc_ref_table.entries[index];
2408 ref = tipc_ref_table.start_mask + index;
2409 }
2410
2411 if (ref) {
2412 entry->ref = ref;
2413 entry->tsk = tsk;
2414 }
2415 write_unlock_bh(&ref_table_lock);
2416 return ref;
2417}
2418
2419/* tipc_sk_ref_discard - invalidate reference to an socket
2420 *
2421 * Disallow future references to an socket and free up the entry for re-use.
2422 */
2423void tipc_sk_ref_discard(u32 ref)
2424{
2425 struct reference *entry;
2426 u32 index;
2427 u32 index_mask;
2428
2429 if (unlikely(!tipc_ref_table.entries)) {
2430 pr_err("Ref. table not found during discard attempt\n");
2431 return;
2432 }
2433
2434 index_mask = tipc_ref_table.index_mask;
2435 index = ref & index_mask;
2436 entry = &tipc_ref_table.entries[index];
2437
2438 write_lock_bh(&ref_table_lock);
2439
2440 if (unlikely(!entry->tsk)) {
2441 pr_err("Attempt to discard ref. to non-existent socket\n");
2442 goto exit;
2443 }
2444 if (unlikely(entry->ref != ref)) {
2445 pr_err("Attempt to discard non-existent reference\n");
2446 goto exit;
2447 }
2448
2449 /* Mark entry as unused; increment instance part of entry's
2450 * reference to invalidate any subsequent references
2451 */
2452
2453 entry->tsk = NULL;
2454 entry->ref = (ref & ~index_mask) + (index_mask + 1);
2455
2456 /* Append entry to free entry list */
2457 if (unlikely(tipc_ref_table.first_free == 0))
2458 tipc_ref_table.first_free = index;
2459 else
2460 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
2461 tipc_ref_table.last_free = index;
2462exit:
2463 write_unlock_bh(&ref_table_lock);
2464}
2465
2466/* tipc_sk_get - find referenced socket and return pointer to it
2467 */
2468struct tipc_sock *tipc_sk_get(u32 ref)
2469{
2470 struct reference *entry;
2471 struct tipc_sock *tsk;
2472
2473 if (unlikely(!tipc_ref_table.entries))
2474 return NULL;
2475 read_lock_bh(&ref_table_lock);
2476 entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
2477 tsk = entry->tsk;
2478 if (likely(tsk && (entry->ref == ref)))
2479 sock_hold(&tsk->sk);
2480 else
2481 tsk = NULL;
2482 read_unlock_bh(&ref_table_lock);
2483 return tsk;
2484}
2485
2486/* tipc_sk_get_next - lock & return next socket after referenced one
2487*/
2488struct tipc_sock *tipc_sk_get_next(u32 *ref)
2489{
2490 struct reference *entry;
2491 struct tipc_sock *tsk = NULL;
2492 uint index = *ref & tipc_ref_table.index_mask;
2493
2494 read_lock_bh(&ref_table_lock);
2495 while (++index < tipc_ref_table.capacity) {
2496 entry = &tipc_ref_table.entries[index];
2497 if (!entry->tsk)
2498 continue;
2499 tsk = entry->tsk;
2500 sock_hold(&tsk->sk);
2501 *ref = entry->ref;
2502 break;
2503 }
2504 read_unlock_bh(&ref_table_lock);
2505 return tsk;
2506}
2507
2508static void tipc_sk_put(struct tipc_sock *tsk)
2509{
2510 sock_put(&tsk->sk);
2511}
2512
1940/** 2513/**
1941 * tipc_setsockopt - set socket option 2514 * tipc_setsockopt - set socket option
1942 * @sock: socket structure 2515 * @sock: socket structure
@@ -1955,7 +2528,6 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
1955{ 2528{
1956 struct sock *sk = sock->sk; 2529 struct sock *sk = sock->sk;
1957 struct tipc_sock *tsk = tipc_sk(sk); 2530 struct tipc_sock *tsk = tipc_sk(sk);
1958 struct tipc_port *port = &tsk->port;
1959 u32 value; 2531 u32 value;
1960 int res; 2532 int res;
1961 2533
@@ -1973,16 +2545,16 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
1973 2545
1974 switch (opt) { 2546 switch (opt) {
1975 case TIPC_IMPORTANCE: 2547 case TIPC_IMPORTANCE:
1976 res = tipc_port_set_importance(port, value); 2548 res = tsk_set_importance(tsk, value);
1977 break; 2549 break;
1978 case TIPC_SRC_DROPPABLE: 2550 case TIPC_SRC_DROPPABLE:
1979 if (sock->type != SOCK_STREAM) 2551 if (sock->type != SOCK_STREAM)
1980 tipc_port_set_unreliable(port, value); 2552 tsk_set_unreliable(tsk, value);
1981 else 2553 else
1982 res = -ENOPROTOOPT; 2554 res = -ENOPROTOOPT;
1983 break; 2555 break;
1984 case TIPC_DEST_DROPPABLE: 2556 case TIPC_DEST_DROPPABLE:
1985 tipc_port_set_unreturnable(port, value); 2557 tsk_set_unreturnable(tsk, value);
1986 break; 2558 break;
1987 case TIPC_CONN_TIMEOUT: 2559 case TIPC_CONN_TIMEOUT:
1988 tipc_sk(sk)->conn_timeout = value; 2560 tipc_sk(sk)->conn_timeout = value;
@@ -2015,7 +2587,6 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2015{ 2587{
2016 struct sock *sk = sock->sk; 2588 struct sock *sk = sock->sk;
2017 struct tipc_sock *tsk = tipc_sk(sk); 2589 struct tipc_sock *tsk = tipc_sk(sk);
2018 struct tipc_port *port = &tsk->port;
2019 int len; 2590 int len;
2020 u32 value; 2591 u32 value;
2021 int res; 2592 int res;
@@ -2032,16 +2603,16 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2032 2603
2033 switch (opt) { 2604 switch (opt) {
2034 case TIPC_IMPORTANCE: 2605 case TIPC_IMPORTANCE:
2035 value = tipc_port_importance(port); 2606 value = tsk_importance(tsk);
2036 break; 2607 break;
2037 case TIPC_SRC_DROPPABLE: 2608 case TIPC_SRC_DROPPABLE:
2038 value = tipc_port_unreliable(port); 2609 value = tsk_unreliable(tsk);
2039 break; 2610 break;
2040 case TIPC_DEST_DROPPABLE: 2611 case TIPC_DEST_DROPPABLE:
2041 value = tipc_port_unreturnable(port); 2612 value = tsk_unreturnable(tsk);
2042 break; 2613 break;
2043 case TIPC_CONN_TIMEOUT: 2614 case TIPC_CONN_TIMEOUT:
2044 value = tipc_sk(sk)->conn_timeout; 2615 value = tsk->conn_timeout;
2045 /* no need to set "res", since already 0 at this point */ 2616 /* no need to set "res", since already 0 at this point */
2046 break; 2617 break;
2047 case TIPC_NODE_RECVQ_DEPTH: 2618 case TIPC_NODE_RECVQ_DEPTH:
@@ -2077,7 +2648,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
2077 case SIOCGETLINKNAME: 2648 case SIOCGETLINKNAME:
2078 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2649 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2079 return -EFAULT; 2650 return -EFAULT;
2080 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, 2651 if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
2081 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2652 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2082 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2653 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2083 return -EFAULT; 2654 return -EFAULT;
@@ -2206,3 +2777,233 @@ void tipc_socket_stop(void)
2206 sock_unregister(tipc_family_ops.family); 2777 sock_unregister(tipc_family_ops.family);
2207 proto_unregister(&tipc_proto); 2778 proto_unregister(&tipc_proto);
2208} 2779}
2780
2781/* Caller should hold socket lock for the passed tipc socket. */
2782static int __tipc_nl_add_sk_con(struct sk_buff *skb, struct tipc_sock *tsk)
2783{
2784 u32 peer_node;
2785 u32 peer_port;
2786 struct nlattr *nest;
2787
2788 peer_node = tsk_peer_node(tsk);
2789 peer_port = tsk_peer_port(tsk);
2790
2791 nest = nla_nest_start(skb, TIPC_NLA_SOCK_CON);
2792
2793 if (nla_put_u32(skb, TIPC_NLA_CON_NODE, peer_node))
2794 goto msg_full;
2795 if (nla_put_u32(skb, TIPC_NLA_CON_SOCK, peer_port))
2796 goto msg_full;
2797
2798 if (tsk->conn_type != 0) {
2799 if (nla_put_flag(skb, TIPC_NLA_CON_FLAG))
2800 goto msg_full;
2801 if (nla_put_u32(skb, TIPC_NLA_CON_TYPE, tsk->conn_type))
2802 goto msg_full;
2803 if (nla_put_u32(skb, TIPC_NLA_CON_INST, tsk->conn_instance))
2804 goto msg_full;
2805 }
2806 nla_nest_end(skb, nest);
2807
2808 return 0;
2809
2810msg_full:
2811 nla_nest_cancel(skb, nest);
2812
2813 return -EMSGSIZE;
2814}
2815
2816/* Caller should hold socket lock for the passed tipc socket. */
2817static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2818 struct tipc_sock *tsk)
2819{
2820 int err;
2821 void *hdr;
2822 struct nlattr *attrs;
2823
2824 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2825 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2826 if (!hdr)
2827 goto msg_cancel;
2828
2829 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2830 if (!attrs)
2831 goto genlmsg_cancel;
2832 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref))
2833 goto attr_msg_cancel;
2834 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr))
2835 goto attr_msg_cancel;
2836
2837 if (tsk->connected) {
2838 err = __tipc_nl_add_sk_con(skb, tsk);
2839 if (err)
2840 goto attr_msg_cancel;
2841 } else if (!list_empty(&tsk->publications)) {
2842 if (nla_put_flag(skb, TIPC_NLA_SOCK_HAS_PUBL))
2843 goto attr_msg_cancel;
2844 }
2845 nla_nest_end(skb, attrs);
2846 genlmsg_end(skb, hdr);
2847
2848 return 0;
2849
2850attr_msg_cancel:
2851 nla_nest_cancel(skb, attrs);
2852genlmsg_cancel:
2853 genlmsg_cancel(skb, hdr);
2854msg_cancel:
2855 return -EMSGSIZE;
2856}
2857
2858int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859{
2860 int err;
2861 struct tipc_sock *tsk;
2862 u32 prev_ref = cb->args[0];
2863 u32 ref = prev_ref;
2864
2865 tsk = tipc_sk_get_next(&ref);
2866 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2867 lock_sock(&tsk->sk);
2868 err = __tipc_nl_add_sk(skb, cb, tsk);
2869 release_sock(&tsk->sk);
2870 tipc_sk_put(tsk);
2871 if (err)
2872 break;
2873
2874 prev_ref = ref;
2875 }
2876
2877 cb->args[0] = prev_ref;
2878
2879 return skb->len;
2880}
2881
2882/* Caller should hold socket lock for the passed tipc socket. */
2883static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2884 struct netlink_callback *cb,
2885 struct publication *publ)
2886{
2887 void *hdr;
2888 struct nlattr *attrs;
2889
2890 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2891 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2892 if (!hdr)
2893 goto msg_cancel;
2894
2895 attrs = nla_nest_start(skb, TIPC_NLA_PUBL);
2896 if (!attrs)
2897 goto genlmsg_cancel;
2898
2899 if (nla_put_u32(skb, TIPC_NLA_PUBL_KEY, publ->key))
2900 goto attr_msg_cancel;
2901 if (nla_put_u32(skb, TIPC_NLA_PUBL_TYPE, publ->type))
2902 goto attr_msg_cancel;
2903 if (nla_put_u32(skb, TIPC_NLA_PUBL_LOWER, publ->lower))
2904 goto attr_msg_cancel;
2905 if (nla_put_u32(skb, TIPC_NLA_PUBL_UPPER, publ->upper))
2906 goto attr_msg_cancel;
2907
2908 nla_nest_end(skb, attrs);
2909 genlmsg_end(skb, hdr);
2910
2911 return 0;
2912
2913attr_msg_cancel:
2914 nla_nest_cancel(skb, attrs);
2915genlmsg_cancel:
2916 genlmsg_cancel(skb, hdr);
2917msg_cancel:
2918 return -EMSGSIZE;
2919}
2920
2921/* Caller should hold socket lock for the passed tipc socket. */
2922static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2923 struct netlink_callback *cb,
2924 struct tipc_sock *tsk, u32 *last_publ)
2925{
2926 int err;
2927 struct publication *p;
2928
2929 if (*last_publ) {
2930 list_for_each_entry(p, &tsk->publications, pport_list) {
2931 if (p->key == *last_publ)
2932 break;
2933 }
2934 if (p->key != *last_publ) {
2935 /* We never set seq or call nl_dump_check_consistent()
2936 * this means that setting prev_seq here will cause the
2937 * consistence check to fail in the netlink callback
2938 * handler. Resulting in the last NLMSG_DONE message
2939 * having the NLM_F_DUMP_INTR flag set.
2940 */
2941 cb->prev_seq = 1;
2942 *last_publ = 0;
2943 return -EPIPE;
2944 }
2945 } else {
2946 p = list_first_entry(&tsk->publications, struct publication,
2947 pport_list);
2948 }
2949
2950 list_for_each_entry_from(p, &tsk->publications, pport_list) {
2951 err = __tipc_nl_add_sk_publ(skb, cb, p);
2952 if (err) {
2953 *last_publ = p->key;
2954 return err;
2955 }
2956 }
2957 *last_publ = 0;
2958
2959 return 0;
2960}
2961
2962int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2963{
2964 int err;
2965 u32 tsk_ref = cb->args[0];
2966 u32 last_publ = cb->args[1];
2967 u32 done = cb->args[2];
2968 struct tipc_sock *tsk;
2969
2970 if (!tsk_ref) {
2971 struct nlattr **attrs;
2972 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2973
2974 err = tipc_nlmsg_parse(cb->nlh, &attrs);
2975 if (err)
2976 return err;
2977
2978 err = nla_parse_nested(sock, TIPC_NLA_SOCK_MAX,
2979 attrs[TIPC_NLA_SOCK],
2980 tipc_nl_sock_policy);
2981 if (err)
2982 return err;
2983
2984 if (!sock[TIPC_NLA_SOCK_REF])
2985 return -EINVAL;
2986
2987 tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2988 }
2989
2990 if (done)
2991 return 0;
2992
2993 tsk = tipc_sk_get(tsk_ref);
2994 if (!tsk)
2995 return -EINVAL;
2996
2997 lock_sock(&tsk->sk);
2998 err = __tipc_nl_list_sk_publ(skb, cb, tsk, &last_publ);
2999 if (!err)
3000 done = 1;
3001 release_sock(&tsk->sk);
3002 tipc_sk_put(tsk);
3003
3004 cb->args[0] = tsk_ref;
3005 cb->args[1] = last_publ;
3006 cb->args[2] = done;
3007
3008 return skb->len;
3009}
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index 43b75b3ceced..d34089387006 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -35,56 +35,20 @@
35#ifndef _TIPC_SOCK_H 35#ifndef _TIPC_SOCK_H
36#define _TIPC_SOCK_H 36#define _TIPC_SOCK_H
37 37
38#include "port.h"
39#include <net/sock.h> 38#include <net/sock.h>
39#include <net/genetlink.h>
40 40
41#define TIPC_CONN_OK 0 41#define TIPC_CONNACK_INTV 256
42#define TIPC_CONN_PROBING 1 42#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
43 43#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
44/** 44 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
45 * struct tipc_sock - TIPC socket structure
46 * @sk: socket - interacts with 'port' and with user via the socket API
47 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
48 * @peer_name: the peer of the connection, if any
49 * @conn_timeout: the time we can wait for an unresponded setup request
50 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
51 * @link_cong: non-zero if owner must sleep because of link congestion
52 * @sent_unacked: # messages sent by socket, and not yet acked by peer
53 * @rcv_unacked: # messages read by user, but not yet acked back to peer
54 */
55
56struct tipc_sock {
57 struct sock sk;
58 struct tipc_port port;
59 unsigned int conn_timeout;
60 atomic_t dupl_rcvcnt;
61 int link_cong;
62 uint sent_unacked;
63 uint rcv_unacked;
64};
65
66static inline struct tipc_sock *tipc_sk(const struct sock *sk)
67{
68 return container_of(sk, struct tipc_sock, sk);
69}
70
71static inline struct tipc_sock *tipc_port_to_sock(const struct tipc_port *port)
72{
73 return container_of(port, struct tipc_sock, port);
74}
75
76static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
77{
78 tsk->sk.sk_write_space(&tsk->sk);
79}
80
81static inline int tipc_sk_conn_cong(struct tipc_sock *tsk)
82{
83 return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
84}
85
86int tipc_sk_rcv(struct sk_buff *buf); 45int tipc_sk_rcv(struct sk_buff *buf);
87 46struct sk_buff *tipc_sk_socks_show(void);
88void tipc_sk_mcast_rcv(struct sk_buff *buf); 47void tipc_sk_mcast_rcv(struct sk_buff *buf);
48void tipc_sk_reinit(void);
49int tipc_sk_ref_table_init(u32 requested_size, u32 start);
50void tipc_sk_ref_table_stop(void);
51int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
52int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
89 53
90#endif 54#endif
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 642437231ad5..0344206b984f 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -36,7 +36,6 @@
36 36
37#include "core.h" 37#include "core.h"
38#include "name_table.h" 38#include "name_table.h"
39#include "port.h"
40#include "subscr.h" 39#include "subscr.h"
41 40
42/** 41/**
@@ -306,7 +305,6 @@ static int subscr_subscribe(struct tipc_subscr *s,
306 kfree(sub); 305 kfree(sub);
307 return -EINVAL; 306 return -EINVAL;
308 } 307 }
309 INIT_LIST_HEAD(&sub->nameseq_list);
310 list_add(&sub->subscription_list, &subscriber->subscription_list); 308 list_add(&sub->subscription_list, &subscriber->subscription_list);
311 sub->subscriber = subscriber; 309 sub->subscriber = subscriber;
312 sub->swap = swap; 310 sub->swap = swap;
diff --git a/net/tipc/sysctl.c b/net/tipc/sysctl.c
index f3fef93325a8..1a779b1e8510 100644
--- a/net/tipc/sysctl.c
+++ b/net/tipc/sysctl.c
@@ -47,6 +47,13 @@ static struct ctl_table tipc_table[] = {
47 .mode = 0644, 47 .mode = 0644,
48 .proc_handler = proc_dointvec, 48 .proc_handler = proc_dointvec,
49 }, 49 },
50 {
51 .procname = "named_timeout",
52 .data = &sysctl_tipc_named_timeout,
53 .maxlen = sizeof(sysctl_tipc_named_timeout),
54 .mode = 0644,
55 .proc_handler = proc_dointvec,
56 },
50 {} 57 {}
51}; 58};
52 59