aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/link.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r--net/tipc/link.c313
1 files changed, 141 insertions, 172 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 43a515dc97b0..eaa9fe54b4ae 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -86,7 +86,7 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
86 */ 86 */
87#define STARTING_EVT 856384768 /* link processing trigger */ 87#define STARTING_EVT 856384768 /* link processing trigger */
88#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */ 88#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89#define TIMEOUT_EVT 560817u /* link timer expired */ 89#define SILENCE_EVT 560817u /* timer dicovered silence from peer */
90 90
91/* 91/*
92 * State value stored in 'failover_pkts' 92 * State value stored in 'failover_pkts'
@@ -106,6 +106,7 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
106static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); 106static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
107static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); 107static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
108static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); 108static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb);
109static void link_set_timer(struct tipc_link *link, unsigned long time);
109/* 110/*
110 * Simple link routines 111 * Simple link routines
111 */ 112 */
@@ -197,11 +198,12 @@ static void link_timeout(unsigned long data)
197 } 198 }
198 199
199 /* do all other link processing performed on a periodic basis */ 200 /* do all other link processing performed on a periodic basis */
200 link_state_event(l_ptr, TIMEOUT_EVT); 201 if (l_ptr->silent_intv_cnt || tipc_bclink_acks_missing(l_ptr->owner))
201 202 link_state_event(l_ptr, SILENCE_EVT);
203 l_ptr->silent_intv_cnt++;
202 if (skb_queue_len(&l_ptr->backlogq)) 204 if (skb_queue_len(&l_ptr->backlogq))
203 tipc_link_push_packets(l_ptr); 205 tipc_link_push_packets(l_ptr);
204 206 link_set_timer(l_ptr, l_ptr->keepalive_intv);
205 tipc_node_unlock(l_ptr->owner); 207 tipc_node_unlock(l_ptr->owner);
206 tipc_link_put(l_ptr); 208 tipc_link_put(l_ptr);
207} 209}
@@ -233,8 +235,8 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
233 235
234 if (n_ptr->link_cnt >= MAX_BEARERS) { 236 if (n_ptr->link_cnt >= MAX_BEARERS) {
235 tipc_addr_string_fill(addr_string, n_ptr->addr); 237 tipc_addr_string_fill(addr_string, n_ptr->addr);
236 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n", 238 pr_err("Cannot establish %uth link to %s. Max %u allowed.\n",
237 n_ptr->link_cnt, addr_string, MAX_BEARERS); 239 n_ptr->link_cnt, addr_string, MAX_BEARERS);
238 return NULL; 240 return NULL;
239 } 241 }
240 242
@@ -261,7 +263,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
261 /* note: peer i/f name is updated by reset/activate message */ 263 /* note: peer i/f name is updated by reset/activate message */
262 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr)); 264 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
263 l_ptr->owner = n_ptr; 265 l_ptr->owner = n_ptr;
264 l_ptr->checkpoint = 1;
265 l_ptr->peer_session = INVALID_SESSION; 266 l_ptr->peer_session = INVALID_SESSION;
266 l_ptr->bearer_id = b_ptr->identity; 267 l_ptr->bearer_id = b_ptr->identity;
267 link_set_supervision_props(l_ptr, b_ptr->tolerance); 268 link_set_supervision_props(l_ptr, b_ptr->tolerance);
@@ -280,7 +281,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
280 l_ptr->mtu = l_ptr->advertised_mtu; 281 l_ptr->mtu = l_ptr->advertised_mtu;
281 l_ptr->priority = b_ptr->priority; 282 l_ptr->priority = b_ptr->priority;
282 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 283 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
283 l_ptr->next_out_no = 1; 284 l_ptr->snd_nxt = 1;
284 __skb_queue_head_init(&l_ptr->transmq); 285 __skb_queue_head_init(&l_ptr->transmq);
285 __skb_queue_head_init(&l_ptr->backlogq); 286 __skb_queue_head_init(&l_ptr->backlogq);
286 __skb_queue_head_init(&l_ptr->deferdq); 287 __skb_queue_head_init(&l_ptr->deferdq);
@@ -311,8 +312,7 @@ void tipc_link_delete(struct tipc_link *l)
311 tipc_link_put(l); 312 tipc_link_put(l);
312} 313}
313 314
314void tipc_link_delete_list(struct net *net, unsigned int bearer_id, 315void tipc_link_delete_list(struct net *net, unsigned int bearer_id)
315 bool shutting_down)
316{ 316{
317 struct tipc_net *tn = net_generic(net, tipc_net_id); 317 struct tipc_net *tn = net_generic(net, tipc_net_id);
318 struct tipc_link *link; 318 struct tipc_link *link;
@@ -404,7 +404,7 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
404 l_ptr->reasm_buf = NULL; 404 l_ptr->reasm_buf = NULL;
405} 405}
406 406
407static void tipc_link_purge_backlog(struct tipc_link *l) 407void tipc_link_purge_backlog(struct tipc_link *l)
408{ 408{
409 __skb_queue_purge(&l->backlogq); 409 __skb_queue_purge(&l->backlogq);
410 l->backlog[TIPC_LOW_IMPORTANCE].len = 0; 410 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
@@ -451,9 +451,9 @@ void tipc_link_reset(struct tipc_link *l_ptr)
451 451
452 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { 452 if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) {
453 l_ptr->flags |= LINK_FAILINGOVER; 453 l_ptr->flags |= LINK_FAILINGOVER;
454 l_ptr->failover_checkpt = l_ptr->next_in_no; 454 l_ptr->failover_checkpt = l_ptr->rcv_nxt;
455 pl->failover_pkts = FIRST_FAILOVER; 455 pl->failover_pkts = FIRST_FAILOVER;
456 pl->failover_checkpt = l_ptr->next_in_no; 456 pl->failover_checkpt = l_ptr->rcv_nxt;
457 pl->failover_skb = l_ptr->reasm_buf; 457 pl->failover_skb = l_ptr->reasm_buf;
458 } else { 458 } else {
459 kfree_skb(l_ptr->reasm_buf); 459 kfree_skb(l_ptr->reasm_buf);
@@ -469,36 +469,19 @@ void tipc_link_reset(struct tipc_link *l_ptr)
469 tipc_link_purge_backlog(l_ptr); 469 tipc_link_purge_backlog(l_ptr);
470 l_ptr->reasm_buf = NULL; 470 l_ptr->reasm_buf = NULL;
471 l_ptr->rcv_unacked = 0; 471 l_ptr->rcv_unacked = 0;
472 l_ptr->checkpoint = 1; 472 l_ptr->snd_nxt = 1;
473 l_ptr->next_out_no = 1; 473 l_ptr->silent_intv_cnt = 0;
474 l_ptr->fsm_msg_cnt = 0;
475 l_ptr->stale_count = 0; 474 l_ptr->stale_count = 0;
476 link_reset_statistics(l_ptr); 475 link_reset_statistics(l_ptr);
477} 476}
478 477
479void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
480{
481 struct tipc_net *tn = net_generic(net, tipc_net_id);
482 struct tipc_link *l_ptr;
483 struct tipc_node *n_ptr;
484
485 rcu_read_lock();
486 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
487 tipc_node_lock(n_ptr);
488 l_ptr = n_ptr->links[bearer_id];
489 if (l_ptr)
490 tipc_link_reset(l_ptr);
491 tipc_node_unlock(n_ptr);
492 }
493 rcu_read_unlock();
494}
495
496static void link_activate(struct tipc_link *link) 478static void link_activate(struct tipc_link *link)
497{ 479{
498 struct tipc_node *node = link->owner; 480 struct tipc_node *node = link->owner;
499 481
500 link->next_in_no = 1; 482 link->rcv_nxt = 1;
501 link->stats.recv_info = 1; 483 link->stats.recv_info = 1;
484 link->silent_intv_cnt = 0;
502 tipc_node_link_up(node, link); 485 tipc_node_link_up(node, link);
503 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr); 486 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
504} 487}
@@ -511,7 +494,7 @@ static void link_activate(struct tipc_link *link)
511static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 494static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
512{ 495{
513 struct tipc_link *other; 496 struct tipc_link *other;
514 unsigned long cont_intv = l_ptr->cont_intv; 497 unsigned long timer_intv = l_ptr->keepalive_intv;
515 498
516 if (l_ptr->flags & LINK_STOPPED) 499 if (l_ptr->flags & LINK_STOPPED)
517 return; 500 return;
@@ -519,45 +502,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
519 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) 502 if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT))
520 return; /* Not yet. */ 503 return; /* Not yet. */
521 504
522 if (l_ptr->flags & LINK_FAILINGOVER) { 505 if (l_ptr->flags & LINK_FAILINGOVER)
523 if (event == TIMEOUT_EVT)
524 link_set_timer(l_ptr, cont_intv);
525 return; 506 return;
526 }
527 507
528 switch (l_ptr->state) { 508 switch (l_ptr->state) {
529 case WORKING_WORKING: 509 case WORKING_WORKING:
530 switch (event) { 510 switch (event) {
531 case TRAFFIC_MSG_EVT: 511 case TRAFFIC_MSG_EVT:
532 case ACTIVATE_MSG: 512 case ACTIVATE_MSG:
513 l_ptr->silent_intv_cnt = 0;
533 break; 514 break;
534 case TIMEOUT_EVT: 515 case SILENCE_EVT:
535 if (l_ptr->next_in_no != l_ptr->checkpoint) { 516 if (!l_ptr->silent_intv_cnt) {
536 l_ptr->checkpoint = l_ptr->next_in_no; 517 if (tipc_bclink_acks_missing(l_ptr->owner))
537 if (tipc_bclink_acks_missing(l_ptr->owner)) {
538 tipc_link_proto_xmit(l_ptr, STATE_MSG, 518 tipc_link_proto_xmit(l_ptr, STATE_MSG,
539 0, 0, 0, 0); 519 0, 0, 0, 0);
540 l_ptr->fsm_msg_cnt++;
541 }
542 link_set_timer(l_ptr, cont_intv);
543 break; 520 break;
544 } 521 }
545 l_ptr->state = WORKING_UNKNOWN; 522 l_ptr->state = WORKING_UNKNOWN;
546 l_ptr->fsm_msg_cnt = 0;
547 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); 523 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
548 l_ptr->fsm_msg_cnt++;
549 link_set_timer(l_ptr, cont_intv / 4);
550 break; 524 break;
551 case RESET_MSG: 525 case RESET_MSG:
552 pr_debug("%s<%s>, requested by peer\n", 526 pr_debug("%s<%s>, requested by peer\n",
553 link_rst_msg, l_ptr->name); 527 link_rst_msg, l_ptr->name);
554 tipc_link_reset(l_ptr); 528 tipc_link_reset(l_ptr);
555 l_ptr->state = RESET_RESET; 529 l_ptr->state = RESET_RESET;
556 l_ptr->fsm_msg_cnt = 0;
557 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 530 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
558 0, 0, 0, 0); 531 0, 0, 0, 0);
559 l_ptr->fsm_msg_cnt++;
560 link_set_timer(l_ptr, cont_intv);
561 break; 532 break;
562 default: 533 default:
563 pr_debug("%s%u in WW state\n", link_unk_evt, event); 534 pr_debug("%s%u in WW state\n", link_unk_evt, event);
@@ -568,46 +539,33 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
568 case TRAFFIC_MSG_EVT: 539 case TRAFFIC_MSG_EVT:
569 case ACTIVATE_MSG: 540 case ACTIVATE_MSG:
570 l_ptr->state = WORKING_WORKING; 541 l_ptr->state = WORKING_WORKING;
571 l_ptr->fsm_msg_cnt = 0; 542 l_ptr->silent_intv_cnt = 0;
572 link_set_timer(l_ptr, cont_intv);
573 break; 543 break;
574 case RESET_MSG: 544 case RESET_MSG:
575 pr_debug("%s<%s>, requested by peer while probing\n", 545 pr_debug("%s<%s>, requested by peer while probing\n",
576 link_rst_msg, l_ptr->name); 546 link_rst_msg, l_ptr->name);
577 tipc_link_reset(l_ptr); 547 tipc_link_reset(l_ptr);
578 l_ptr->state = RESET_RESET; 548 l_ptr->state = RESET_RESET;
579 l_ptr->fsm_msg_cnt = 0;
580 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 549 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
581 0, 0, 0, 0); 550 0, 0, 0, 0);
582 l_ptr->fsm_msg_cnt++;
583 link_set_timer(l_ptr, cont_intv);
584 break; 551 break;
585 case TIMEOUT_EVT: 552 case SILENCE_EVT:
586 if (l_ptr->next_in_no != l_ptr->checkpoint) { 553 if (!l_ptr->silent_intv_cnt) {
587 l_ptr->state = WORKING_WORKING; 554 l_ptr->state = WORKING_WORKING;
588 l_ptr->fsm_msg_cnt = 0; 555 if (tipc_bclink_acks_missing(l_ptr->owner))
589 l_ptr->checkpoint = l_ptr->next_in_no;
590 if (tipc_bclink_acks_missing(l_ptr->owner)) {
591 tipc_link_proto_xmit(l_ptr, STATE_MSG, 556 tipc_link_proto_xmit(l_ptr, STATE_MSG,
592 0, 0, 0, 0); 557 0, 0, 0, 0);
593 l_ptr->fsm_msg_cnt++; 558 } else if (l_ptr->silent_intv_cnt <
594 } 559 l_ptr->abort_limit) {
595 link_set_timer(l_ptr, cont_intv);
596 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
597 tipc_link_proto_xmit(l_ptr, STATE_MSG, 560 tipc_link_proto_xmit(l_ptr, STATE_MSG,
598 1, 0, 0, 0); 561 1, 0, 0, 0);
599 l_ptr->fsm_msg_cnt++;
600 link_set_timer(l_ptr, cont_intv / 4);
601 } else { /* Link has failed */ 562 } else { /* Link has failed */
602 pr_debug("%s<%s>, peer not responding\n", 563 pr_debug("%s<%s>, peer not responding\n",
603 link_rst_msg, l_ptr->name); 564 link_rst_msg, l_ptr->name);
604 tipc_link_reset(l_ptr); 565 tipc_link_reset(l_ptr);
605 l_ptr->state = RESET_UNKNOWN; 566 l_ptr->state = RESET_UNKNOWN;
606 l_ptr->fsm_msg_cnt = 0;
607 tipc_link_proto_xmit(l_ptr, RESET_MSG, 567 tipc_link_proto_xmit(l_ptr, RESET_MSG,
608 0, 0, 0, 0); 568 0, 0, 0, 0);
609 l_ptr->fsm_msg_cnt++;
610 link_set_timer(l_ptr, cont_intv);
611 } 569 }
612 break; 570 break;
613 default: 571 default:
@@ -623,31 +581,22 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
623 if (other && link_working_unknown(other)) 581 if (other && link_working_unknown(other))
624 break; 582 break;
625 l_ptr->state = WORKING_WORKING; 583 l_ptr->state = WORKING_WORKING;
626 l_ptr->fsm_msg_cnt = 0;
627 link_activate(l_ptr); 584 link_activate(l_ptr);
628 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); 585 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
629 l_ptr->fsm_msg_cnt++;
630 if (l_ptr->owner->working_links == 1) 586 if (l_ptr->owner->working_links == 1)
631 tipc_link_sync_xmit(l_ptr); 587 tipc_link_sync_xmit(l_ptr);
632 link_set_timer(l_ptr, cont_intv);
633 break; 588 break;
634 case RESET_MSG: 589 case RESET_MSG:
635 l_ptr->state = RESET_RESET; 590 l_ptr->state = RESET_RESET;
636 l_ptr->fsm_msg_cnt = 0;
637 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 591 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
638 1, 0, 0, 0); 592 1, 0, 0, 0);
639 l_ptr->fsm_msg_cnt++;
640 link_set_timer(l_ptr, cont_intv);
641 break; 593 break;
642 case STARTING_EVT: 594 case STARTING_EVT:
643 l_ptr->flags |= LINK_STARTED; 595 l_ptr->flags |= LINK_STARTED;
644 l_ptr->fsm_msg_cnt++; 596 link_set_timer(l_ptr, timer_intv);
645 link_set_timer(l_ptr, cont_intv);
646 break; 597 break;
647 case TIMEOUT_EVT: 598 case SILENCE_EVT:
648 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); 599 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0);
649 l_ptr->fsm_msg_cnt++;
650 link_set_timer(l_ptr, cont_intv);
651 break; 600 break;
652 default: 601 default:
653 pr_err("%s%u in RU state\n", link_unk_evt, event); 602 pr_err("%s%u in RU state\n", link_unk_evt, event);
@@ -661,21 +610,16 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
661 if (other && link_working_unknown(other)) 610 if (other && link_working_unknown(other))
662 break; 611 break;
663 l_ptr->state = WORKING_WORKING; 612 l_ptr->state = WORKING_WORKING;
664 l_ptr->fsm_msg_cnt = 0;
665 link_activate(l_ptr); 613 link_activate(l_ptr);
666 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); 614 tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0);
667 l_ptr->fsm_msg_cnt++;
668 if (l_ptr->owner->working_links == 1) 615 if (l_ptr->owner->working_links == 1)
669 tipc_link_sync_xmit(l_ptr); 616 tipc_link_sync_xmit(l_ptr);
670 link_set_timer(l_ptr, cont_intv);
671 break; 617 break;
672 case RESET_MSG: 618 case RESET_MSG:
673 break; 619 break;
674 case TIMEOUT_EVT: 620 case SILENCE_EVT:
675 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, 621 tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG,
676 0, 0, 0, 0); 622 0, 0, 0, 0);
677 l_ptr->fsm_msg_cnt++;
678 link_set_timer(l_ptr, cont_intv);
679 break; 623 break;
680 default: 624 default:
681 pr_err("%s%u in RR state\n", link_unk_evt, event); 625 pr_err("%s%u in RR state\n", link_unk_evt, event);
@@ -701,53 +645,58 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
701{ 645{
702 struct tipc_msg *msg = buf_msg(skb_peek(list)); 646 struct tipc_msg *msg = buf_msg(skb_peek(list));
703 unsigned int maxwin = link->window; 647 unsigned int maxwin = link->window;
704 unsigned int imp = msg_importance(msg); 648 unsigned int i, imp = msg_importance(msg);
705 uint mtu = link->mtu; 649 uint mtu = link->mtu;
706 uint ack = mod(link->next_in_no - 1); 650 u16 ack = mod(link->rcv_nxt - 1);
707 uint seqno = link->next_out_no; 651 u16 seqno = link->snd_nxt;
708 uint bc_last_in = link->owner->bclink.last_in; 652 u16 bc_last_in = link->owner->bclink.last_in;
709 struct tipc_media_addr *addr = &link->media_addr; 653 struct tipc_media_addr *addr = &link->media_addr;
710 struct sk_buff_head *transmq = &link->transmq; 654 struct sk_buff_head *transmq = &link->transmq;
711 struct sk_buff_head *backlogq = &link->backlogq; 655 struct sk_buff_head *backlogq = &link->backlogq;
712 struct sk_buff *skb, *tmp; 656 struct sk_buff *skb, *bskb;
713
714 /* Match backlog limit against msg importance: */
715 if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
716 return link_schedule_user(link, list);
717 657
658 /* Match msg importance against this and all higher backlog limits: */
659 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
660 if (unlikely(link->backlog[i].len >= link->backlog[i].limit))
661 return link_schedule_user(link, list);
662 }
718 if (unlikely(msg_size(msg) > mtu)) { 663 if (unlikely(msg_size(msg) > mtu)) {
719 __skb_queue_purge(list); 664 __skb_queue_purge(list);
720 return -EMSGSIZE; 665 return -EMSGSIZE;
721 } 666 }
722 /* Prepare each packet for sending, and add to relevant queue: */ 667 /* Prepare each packet for sending, and add to relevant queue: */
723 skb_queue_walk_safe(list, skb, tmp) { 668 while (skb_queue_len(list)) {
724 __skb_unlink(skb, list); 669 skb = skb_peek(list);
725 msg = buf_msg(skb); 670 msg = buf_msg(skb);
726 msg_set_seqno(msg, seqno); 671 msg_set_seqno(msg, seqno);
727 msg_set_ack(msg, ack); 672 msg_set_ack(msg, ack);
728 msg_set_bcast_ack(msg, bc_last_in); 673 msg_set_bcast_ack(msg, bc_last_in);
729 674
730 if (likely(skb_queue_len(transmq) < maxwin)) { 675 if (likely(skb_queue_len(transmq) < maxwin)) {
676 __skb_dequeue(list);
731 __skb_queue_tail(transmq, skb); 677 __skb_queue_tail(transmq, skb);
732 tipc_bearer_send(net, link->bearer_id, skb, addr); 678 tipc_bearer_send(net, link->bearer_id, skb, addr);
733 link->rcv_unacked = 0; 679 link->rcv_unacked = 0;
734 seqno++; 680 seqno++;
735 continue; 681 continue;
736 } 682 }
737 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { 683 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
684 kfree_skb(__skb_dequeue(list));
738 link->stats.sent_bundled++; 685 link->stats.sent_bundled++;
739 continue; 686 continue;
740 } 687 }
741 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { 688 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
689 kfree_skb(__skb_dequeue(list));
690 __skb_queue_tail(backlogq, bskb);
691 link->backlog[msg_importance(buf_msg(bskb))].len++;
742 link->stats.sent_bundled++; 692 link->stats.sent_bundled++;
743 link->stats.sent_bundles++; 693 link->stats.sent_bundles++;
744 imp = msg_importance(buf_msg(skb)); 694 continue;
745 } 695 }
746 __skb_queue_tail(backlogq, skb); 696 link->backlog[imp].len += skb_queue_len(list);
747 link->backlog[imp].len++; 697 skb_queue_splice_tail_init(list, backlogq);
748 seqno++;
749 } 698 }
750 link->next_out_no = seqno; 699 link->snd_nxt = seqno;
751 return 0; 700 return 0;
752} 701}
753 702
@@ -877,7 +826,8 @@ void tipc_link_push_packets(struct tipc_link *link)
877{ 826{
878 struct sk_buff *skb; 827 struct sk_buff *skb;
879 struct tipc_msg *msg; 828 struct tipc_msg *msg;
880 unsigned int ack = mod(link->next_in_no - 1); 829 u16 seqno = link->snd_nxt;
830 u16 ack = mod(link->rcv_nxt - 1);
881 831
882 while (skb_queue_len(&link->transmq) < link->window) { 832 while (skb_queue_len(&link->transmq) < link->window) {
883 skb = __skb_dequeue(&link->backlogq); 833 skb = __skb_dequeue(&link->backlogq);
@@ -886,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
886 msg = buf_msg(skb); 836 msg = buf_msg(skb);
887 link->backlog[msg_importance(msg)].len--; 837 link->backlog[msg_importance(msg)].len--;
888 msg_set_ack(msg, ack); 838 msg_set_ack(msg, ack);
839 msg_set_seqno(msg, seqno);
840 seqno = mod(seqno + 1);
889 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 841 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
890 link->rcv_unacked = 0; 842 link->rcv_unacked = 0;
891 __skb_queue_tail(&link->transmq, skb); 843 __skb_queue_tail(&link->transmq, skb);
892 tipc_bearer_send(link->owner->net, link->bearer_id, 844 tipc_bearer_send(link->owner->net, link->bearer_id,
893 skb, &link->media_addr); 845 skb, &link->media_addr);
894 } 846 }
847 link->snd_nxt = seqno;
895} 848}
896 849
897void tipc_link_reset_all(struct tipc_node *node) 850void tipc_link_reset_all(struct tipc_node *node)
@@ -964,13 +917,13 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
964 msg = buf_msg(skb); 917 msg = buf_msg(skb);
965 918
966 /* Detect repeated retransmit failures */ 919 /* Detect repeated retransmit failures */
967 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 920 if (l_ptr->last_retransm == msg_seqno(msg)) {
968 if (++l_ptr->stale_count > 100) { 921 if (++l_ptr->stale_count > 100) {
969 link_retransmit_failure(l_ptr, skb); 922 link_retransmit_failure(l_ptr, skb);
970 return; 923 return;
971 } 924 }
972 } else { 925 } else {
973 l_ptr->last_retransmitted = msg_seqno(msg); 926 l_ptr->last_retransm = msg_seqno(msg);
974 l_ptr->stale_count = 1; 927 l_ptr->stale_count = 1;
975 } 928 }
976 929
@@ -978,7 +931,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
978 if (!retransmits) 931 if (!retransmits)
979 break; 932 break;
980 msg = buf_msg(skb); 933 msg = buf_msg(skb);
981 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 934 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
982 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 935 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
983 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb, 936 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
984 &l_ptr->media_addr); 937 &l_ptr->media_addr);
@@ -1001,11 +954,11 @@ static bool link_synch(struct tipc_link *l)
1001 goto synched; 954 goto synched;
1002 955
1003 /* Was last pre-synch packet added to input queue ? */ 956 /* Was last pre-synch packet added to input queue ? */
1004 if (less_eq(pl->next_in_no, l->synch_point)) 957 if (less_eq(pl->rcv_nxt, l->synch_point))
1005 return false; 958 return false;
1006 959
1007 /* Is it still in the input queue ? */ 960 /* Is it still in the input queue ? */
1008 post_synch = mod(pl->next_in_no - l->synch_point) - 1; 961 post_synch = mod(pl->rcv_nxt - l->synch_point) - 1;
1009 if (skb_queue_len(&pl->inputq) > post_synch) 962 if (skb_queue_len(&pl->inputq) > post_synch)
1010 return false; 963 return false;
1011synched: 964synched:
@@ -1016,13 +969,13 @@ synched:
1016static void link_retrieve_defq(struct tipc_link *link, 969static void link_retrieve_defq(struct tipc_link *link,
1017 struct sk_buff_head *list) 970 struct sk_buff_head *list)
1018{ 971{
1019 u32 seq_no; 972 u16 seq_no;
1020 973
1021 if (skb_queue_empty(&link->deferdq)) 974 if (skb_queue_empty(&link->deferdq))
1022 return; 975 return;
1023 976
1024 seq_no = buf_seqno(skb_peek(&link->deferdq)); 977 seq_no = buf_seqno(skb_peek(&link->deferdq));
1025 if (seq_no == mod(link->next_in_no)) 978 if (seq_no == link->rcv_nxt)
1026 skb_queue_splice_tail_init(&link->deferdq, list); 979 skb_queue_splice_tail_init(&link->deferdq, list);
1027} 980}
1028 981
@@ -1043,8 +996,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1043 struct tipc_link *l_ptr; 996 struct tipc_link *l_ptr;
1044 struct sk_buff *skb1, *tmp; 997 struct sk_buff *skb1, *tmp;
1045 struct tipc_msg *msg; 998 struct tipc_msg *msg;
1046 u32 seq_no; 999 u16 seq_no;
1047 u32 ackd; 1000 u16 ackd;
1048 u32 released; 1001 u32 released;
1049 1002
1050 skb2list(skb, &head); 1003 skb2list(skb, &head);
@@ -1137,18 +1090,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1137 } 1090 }
1138 1091
1139 /* Link is now in state WORKING_WORKING */ 1092 /* Link is now in state WORKING_WORKING */
1140 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1093 if (unlikely(seq_no != l_ptr->rcv_nxt)) {
1141 link_handle_out_of_seq_msg(l_ptr, skb); 1094 link_handle_out_of_seq_msg(l_ptr, skb);
1142 link_retrieve_defq(l_ptr, &head); 1095 link_retrieve_defq(l_ptr, &head);
1143 skb = NULL; 1096 skb = NULL;
1144 goto unlock; 1097 goto unlock;
1145 } 1098 }
1099 l_ptr->silent_intv_cnt = 0;
1100
1146 /* Synchronize with parallel link if applicable */ 1101 /* Synchronize with parallel link if applicable */
1147 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { 1102 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1148 if (!link_synch(l_ptr)) 1103 if (!link_synch(l_ptr))
1149 goto unlock; 1104 goto unlock;
1150 } 1105 }
1151 l_ptr->next_in_no++; 1106 l_ptr->rcv_nxt++;
1152 if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) 1107 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1153 link_retrieve_defq(l_ptr, &head); 1108 link_retrieve_defq(l_ptr, &head);
1154 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { 1109 if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) {
@@ -1268,7 +1223,7 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1268u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb) 1223u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1269{ 1224{
1270 struct sk_buff *skb1; 1225 struct sk_buff *skb1;
1271 u32 seq_no = buf_seqno(skb); 1226 u16 seq_no = buf_seqno(skb);
1272 1227
1273 /* Empty queue ? */ 1228 /* Empty queue ? */
1274 if (skb_queue_empty(list)) { 1229 if (skb_queue_empty(list)) {
@@ -1284,7 +1239,7 @@ u32 tipc_link_defer_pkt(struct sk_buff_head *list, struct sk_buff *skb)
1284 1239
1285 /* Locate insertion point in queue, then insert; discard if duplicate */ 1240 /* Locate insertion point in queue, then insert; discard if duplicate */
1286 skb_queue_walk(list, skb1) { 1241 skb_queue_walk(list, skb1) {
1287 u32 curr_seqno = buf_seqno(skb1); 1242 u16 curr_seqno = buf_seqno(skb1);
1288 1243
1289 if (seq_no == curr_seqno) { 1244 if (seq_no == curr_seqno) {
1290 kfree_skb(skb); 1245 kfree_skb(skb);
@@ -1312,14 +1267,14 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
1312 return; 1267 return;
1313 } 1268 }
1314 1269
1315 /* Record OOS packet arrival (force mismatch on next timeout) */ 1270 /* Record OOS packet arrival */
1316 l_ptr->checkpoint--; 1271 l_ptr->silent_intv_cnt = 0;
1317 1272
1318 /* 1273 /*
1319 * Discard packet if a duplicate; otherwise add it to deferred queue 1274 * Discard packet if a duplicate; otherwise add it to deferred queue
1320 * and notify peer of gap as per protocol specification 1275 * and notify peer of gap as per protocol specification
1321 */ 1276 */
1322 if (less(seq_no, mod(l_ptr->next_in_no))) { 1277 if (less(seq_no, l_ptr->rcv_nxt)) {
1323 l_ptr->stats.duplicates++; 1278 l_ptr->stats.duplicates++;
1324 kfree_skb(buf); 1279 kfree_skb(buf);
1325 return; 1280 return;
@@ -1344,6 +1299,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1344 struct tipc_msg *msg = l_ptr->pmsg; 1299 struct tipc_msg *msg = l_ptr->pmsg;
1345 u32 msg_size = sizeof(l_ptr->proto_msg); 1300 u32 msg_size = sizeof(l_ptr->proto_msg);
1346 int r_flag; 1301 int r_flag;
1302 u16 last_rcv;
1347 1303
1348 /* Don't send protocol message during link failover */ 1304 /* Don't send protocol message during link failover */
1349 if (l_ptr->flags & LINK_FAILINGOVER) 1305 if (l_ptr->flags & LINK_FAILINGOVER)
@@ -1360,16 +1316,14 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1360 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net)); 1316 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1361 1317
1362 if (msg_typ == STATE_MSG) { 1318 if (msg_typ == STATE_MSG) {
1363 u32 next_sent = mod(l_ptr->next_out_no); 1319 u16 next_sent = l_ptr->snd_nxt;
1364 1320
1365 if (!tipc_link_is_up(l_ptr)) 1321 if (!tipc_link_is_up(l_ptr))
1366 return; 1322 return;
1367 if (skb_queue_len(&l_ptr->backlogq))
1368 next_sent = buf_seqno(skb_peek(&l_ptr->backlogq));
1369 msg_set_next_sent(msg, next_sent); 1323 msg_set_next_sent(msg, next_sent);
1370 if (!skb_queue_empty(&l_ptr->deferdq)) { 1324 if (!skb_queue_empty(&l_ptr->deferdq)) {
1371 u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); 1325 last_rcv = buf_seqno(skb_peek(&l_ptr->deferdq));
1372 gap = mod(rec - mod(l_ptr->next_in_no)); 1326 gap = mod(last_rcv - l_ptr->rcv_nxt);
1373 } 1327 }
1374 msg_set_seq_gap(msg, gap); 1328 msg_set_seq_gap(msg, gap);
1375 if (gap) 1329 if (gap)
@@ -1377,7 +1331,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1377 msg_set_link_tolerance(msg, tolerance); 1331 msg_set_link_tolerance(msg, tolerance);
1378 msg_set_linkprio(msg, priority); 1332 msg_set_linkprio(msg, priority);
1379 msg_set_max_pkt(msg, l_ptr->mtu); 1333 msg_set_max_pkt(msg, l_ptr->mtu);
1380 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1334 msg_set_ack(msg, mod(l_ptr->rcv_nxt - 1));
1381 msg_set_probe(msg, probe_msg != 0); 1335 msg_set_probe(msg, probe_msg != 0);
1382 if (probe_msg) 1336 if (probe_msg)
1383 l_ptr->stats.sent_probes++; 1337 l_ptr->stats.sent_probes++;
@@ -1397,7 +1351,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1397 msg_set_linkprio(msg, l_ptr->priority); 1351 msg_set_linkprio(msg, l_ptr->priority);
1398 msg_set_size(msg, msg_size); 1352 msg_set_size(msg, msg_size);
1399 1353
1400 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2))); 1354 msg_set_seqno(msg, mod(l_ptr->snd_nxt + (0xffff / 2)));
1401 1355
1402 buf = tipc_buf_acquire(msg_size); 1356 buf = tipc_buf_acquire(msg_size);
1403 if (!buf) 1357 if (!buf)
@@ -1496,17 +1450,15 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1496 } 1450 }
1497 1451
1498 /* Record reception; force mismatch at next timeout: */ 1452 /* Record reception; force mismatch at next timeout: */
1499 l_ptr->checkpoint--; 1453 l_ptr->silent_intv_cnt = 0;
1500 1454
1501 link_state_event(l_ptr, TRAFFIC_MSG_EVT); 1455 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1502 l_ptr->stats.recv_states++; 1456 l_ptr->stats.recv_states++;
1503 if (link_reset_unknown(l_ptr)) 1457 if (link_reset_unknown(l_ptr))
1504 break; 1458 break;
1505 1459
1506 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) { 1460 if (less_eq(l_ptr->rcv_nxt, msg_next_sent(msg)))
1507 rec_gap = mod(msg_next_sent(msg) - 1461 rec_gap = mod(msg_next_sent(msg) - l_ptr->rcv_nxt);
1508 mod(l_ptr->next_in_no));
1509 }
1510 1462
1511 if (msg_probe(msg)) 1463 if (msg_probe(msg))
1512 l_ptr->stats.recv_probes++; 1464 l_ptr->stats.recv_probes++;
@@ -1580,6 +1532,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1580 1532
1581 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, 1533 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1582 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); 1534 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1535
1536 skb_queue_walk(&l_ptr->backlogq, skb) {
1537 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1538 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1539 }
1583 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); 1540 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1584 tipc_link_purge_backlog(l_ptr); 1541 tipc_link_purge_backlog(l_ptr);
1585 msgcount = skb_queue_len(&l_ptr->transmq); 1542 msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1640,6 +1597,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
1640 struct tipc_msg tnl_hdr; 1597 struct tipc_msg tnl_hdr;
1641 struct sk_buff_head *queue = &link->transmq; 1598 struct sk_buff_head *queue = &link->transmq;
1642 int mcnt; 1599 int mcnt;
1600 u16 seqno;
1643 1601
1644 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, 1602 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1645 SYNCH_MSG, INT_H_SIZE, link->addr); 1603 SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1653,7 +1611,7 @@ tunnel_queue:
1653 struct tipc_msg *msg = buf_msg(skb); 1611 struct tipc_msg *msg = buf_msg(skb);
1654 u32 len = msg_size(msg); 1612 u32 len = msg_size(msg);
1655 1613
1656 msg_set_ack(msg, mod(link->next_in_no - 1)); 1614 msg_set_ack(msg, mod(link->rcv_nxt - 1));
1657 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 1615 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
1658 msg_set_size(&tnl_hdr, len + INT_H_SIZE); 1616 msg_set_size(&tnl_hdr, len + INT_H_SIZE);
1659 outskb = tipc_buf_acquire(len + INT_H_SIZE); 1617 outskb = tipc_buf_acquire(len + INT_H_SIZE);
@@ -1671,6 +1629,11 @@ tunnel_queue:
1671 } 1629 }
1672 if (queue == &link->backlogq) 1630 if (queue == &link->backlogq)
1673 return; 1631 return;
1632 seqno = link->snd_nxt;
1633 skb_queue_walk(&link->backlogq, skb) {
1634 msg_set_seqno(buf_msg(skb), seqno);
1635 seqno = mod(seqno + 1);
1636 }
1674 queue = &link->backlogq; 1637 queue = &link->backlogq;
1675 goto tunnel_queue; 1638 goto tunnel_queue;
1676} 1639}
@@ -1742,8 +1705,8 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1742 return; 1705 return;
1743 1706
1744 l_ptr->tolerance = tol; 1707 l_ptr->tolerance = tol;
1745 l_ptr->cont_intv = msecs_to_jiffies(intv); 1708 l_ptr->keepalive_intv = msecs_to_jiffies(intv);
1746 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); 1709 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->keepalive_intv));
1747} 1710}
1748 1711
1749void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) 1712void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
@@ -1803,8 +1766,8 @@ static struct tipc_node *tipc_link_find_owner(struct net *net,
1803static void link_reset_statistics(struct tipc_link *l_ptr) 1766static void link_reset_statistics(struct tipc_link *l_ptr)
1804{ 1767{
1805 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats)); 1768 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
1806 l_ptr->stats.sent_info = l_ptr->next_out_no; 1769 l_ptr->stats.sent_info = l_ptr->snd_nxt;
1807 l_ptr->stats.recv_info = l_ptr->next_in_no; 1770 l_ptr->stats.recv_info = l_ptr->rcv_nxt;
1808} 1771}
1809 1772
1810static void link_print(struct tipc_link *l_ptr, const char *str) 1773static void link_print(struct tipc_link *l_ptr, const char *str)
@@ -1893,6 +1856,9 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
1893 1856
1894 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 1857 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1895 1858
1859 if (strcmp(name, tipc_bclink_name) == 0)
1860 return tipc_nl_bc_link_set(net, attrs);
1861
1896 node = tipc_link_find_owner(net, name, &bearer_id); 1862 node = tipc_link_find_owner(net, name, &bearer_id);
1897 if (!node) 1863 if (!node)
1898 return -EINVAL; 1864 return -EINVAL;
@@ -2034,9 +2000,9 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2034 goto attr_msg_full; 2000 goto attr_msg_full;
2035 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) 2001 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
2036 goto attr_msg_full; 2002 goto attr_msg_full;
2037 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) 2003 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
2038 goto attr_msg_full; 2004 goto attr_msg_full;
2039 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->next_out_no)) 2005 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
2040 goto attr_msg_full; 2006 goto attr_msg_full;
2041 2007
2042 if (tipc_link_is_up(link)) 2008 if (tipc_link_is_up(link))
@@ -2175,50 +2141,53 @@ out:
2175int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 2141int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2176{ 2142{
2177 struct net *net = genl_info_net(info); 2143 struct net *net = genl_info_net(info);
2178 struct sk_buff *ans_skb;
2179 struct tipc_nl_msg msg; 2144 struct tipc_nl_msg msg;
2180 struct tipc_link *link;
2181 struct tipc_node *node;
2182 char *name; 2145 char *name;
2183 int bearer_id;
2184 int err; 2146 int err;
2185 2147
2148 msg.portid = info->snd_portid;
2149 msg.seq = info->snd_seq;
2150
2186 if (!info->attrs[TIPC_NLA_LINK_NAME]) 2151 if (!info->attrs[TIPC_NLA_LINK_NAME])
2187 return -EINVAL; 2152 return -EINVAL;
2188
2189 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 2153 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2190 node = tipc_link_find_owner(net, name, &bearer_id);
2191 if (!node)
2192 return -EINVAL;
2193 2154
2194 ans_skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); 2155 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
2195 if (!ans_skb) 2156 if (!msg.skb)
2196 return -ENOMEM; 2157 return -ENOMEM;
2197 2158
2198 msg.skb = ans_skb; 2159 if (strcmp(name, tipc_bclink_name) == 0) {
2199 msg.portid = info->snd_portid; 2160 err = tipc_nl_add_bc_link(net, &msg);
2200 msg.seq = info->snd_seq; 2161 if (err) {
2201 2162 nlmsg_free(msg.skb);
2202 tipc_node_lock(node); 2163 return err;
2203 link = node->links[bearer_id]; 2164 }
2204 if (!link) { 2165 } else {
2205 err = -EINVAL; 2166 int bearer_id;
2206 goto err_out; 2167 struct tipc_node *node;
2207 } 2168 struct tipc_link *link;
2208
2209 err = __tipc_nl_add_link(net, &msg, link, 0);
2210 if (err)
2211 goto err_out;
2212 2169
2213 tipc_node_unlock(node); 2170 node = tipc_link_find_owner(net, name, &bearer_id);
2171 if (!node)
2172 return -EINVAL;
2214 2173
2215 return genlmsg_reply(ans_skb, info); 2174 tipc_node_lock(node);
2175 link = node->links[bearer_id];
2176 if (!link) {
2177 tipc_node_unlock(node);
2178 nlmsg_free(msg.skb);
2179 return -EINVAL;
2180 }
2216 2181
2217err_out: 2182 err = __tipc_nl_add_link(net, &msg, link, 0);
2218 tipc_node_unlock(node); 2183 tipc_node_unlock(node);
2219 nlmsg_free(ans_skb); 2184 if (err) {
2185 nlmsg_free(msg.skb);
2186 return err;
2187 }
2188 }
2220 2189
2221 return err; 2190 return genlmsg_reply(msg.skb, info);
2222} 2191}
2223 2192
2224int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info) 2193int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)