aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--net/tipc/bcast.c2
-rw-r--r--net/tipc/link.c207
-rw-r--r--net/tipc/link.h9
-rw-r--r--net/tipc/msg.h36
4 files changed, 157 insertions, 97 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 52307397e0b1..79355531c3e2 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -831,7 +831,7 @@ int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
831 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP); 831 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
832 if (!prop) 832 if (!prop)
833 goto attr_msg_full; 833 goto attr_msg_full;
834 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->queue_limit[0])) 834 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
835 goto prop_msg_full; 835 goto prop_msg_full;
836 nla_nest_end(msg->skb, prop); 836 nla_nest_end(msg->skb, prop);
837 837
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 8c98c4d00ad6..1287161e9424 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -139,6 +139,13 @@ static void tipc_link_put(struct tipc_link *l_ptr)
139 kref_put(&l_ptr->ref, tipc_link_release); 139 kref_put(&l_ptr->ref, tipc_link_release);
140} 140}
141 141
142static struct tipc_link *tipc_parallel_link(struct tipc_link *l)
143{
144 if (l->owner->active_links[0] != l)
145 return l->owner->active_links[0];
146 return l->owner->active_links[1];
147}
148
142static void link_init_max_pkt(struct tipc_link *l_ptr) 149static void link_init_max_pkt(struct tipc_link *l_ptr)
143{ 150{
144 struct tipc_node *node = l_ptr->owner; 151 struct tipc_node *node = l_ptr->owner;
@@ -310,7 +317,6 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
310 link_init_max_pkt(l_ptr); 317 link_init_max_pkt(l_ptr);
311 l_ptr->priority = b_ptr->priority; 318 l_ptr->priority = b_ptr->priority;
312 tipc_link_set_queue_limits(l_ptr, b_ptr->window); 319 tipc_link_set_queue_limits(l_ptr, b_ptr->window);
313
314 l_ptr->next_out_no = 1; 320 l_ptr->next_out_no = 1;
315 __skb_queue_head_init(&l_ptr->transmq); 321 __skb_queue_head_init(&l_ptr->transmq);
316 __skb_queue_head_init(&l_ptr->backlogq); 322 __skb_queue_head_init(&l_ptr->backlogq);
@@ -368,28 +374,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
368} 374}
369 375
370/** 376/**
371 * link_schedule_user - schedule user for wakeup after congestion 377 * link_schedule_user - schedule a message sender for wakeup after congestion
372 * @link: congested link 378 * @link: congested link
373 * @oport: sending port 379 * @list: message that was attempted sent
374 * @chain_sz: size of buffer chain that was attempted sent
375 * @imp: importance of message attempted sent
376 * Create pseudo msg to send back to user when congestion abates 380 * Create pseudo msg to send back to user when congestion abates
381 * Only consumes message if there is an error
377 */ 382 */
378static bool link_schedule_user(struct tipc_link *link, u32 oport, 383static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
379 uint chain_sz, uint imp)
380{ 384{
381 struct sk_buff *buf; 385 struct tipc_msg *msg = buf_msg(skb_peek(list));
386 int imp = msg_importance(msg);
387 u32 oport = msg_origport(msg);
388 u32 addr = link_own_addr(link);
389 struct sk_buff *skb;
382 390
383 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, 391 /* This really cannot happen... */
384 link_own_addr(link), link_own_addr(link), 392 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
385 oport, 0, 0); 393 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
386 if (!buf) 394 tipc_link_reset(link);
387 return false; 395 goto err;
388 TIPC_SKB_CB(buf)->chain_sz = chain_sz; 396 }
389 TIPC_SKB_CB(buf)->chain_imp = imp; 397 /* Non-blocking sender: */
390 skb_queue_tail(&link->wakeupq, buf); 398 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
399 return -ELINKCONG;
400
401 /* Create and schedule wakeup pseudo message */
402 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
403 addr, addr, oport, 0, 0);
404 if (!skb)
405 goto err;
406 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
407 TIPC_SKB_CB(skb)->chain_imp = imp;
408 skb_queue_tail(&link->wakeupq, skb);
391 link->stats.link_congs++; 409 link->stats.link_congs++;
392 return true; 410 return -ELINKCONG;
411err:
412 __skb_queue_purge(list);
413 return -ENOBUFS;
393} 414}
394 415
395/** 416/**
@@ -398,19 +419,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
398 * Move a number of waiting users, as permitted by available space in 419 * Move a number of waiting users, as permitted by available space in
399 * the send queue, from link wait queue to node wait queue for wakeup 420 * the send queue, from link wait queue to node wait queue for wakeup
400 */ 421 */
401void link_prepare_wakeup(struct tipc_link *link) 422void link_prepare_wakeup(struct tipc_link *l)
402{ 423{
403 uint pend_qsz = skb_queue_len(&link->backlogq); 424 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
425 int imp, lim;
404 struct sk_buff *skb, *tmp; 426 struct sk_buff *skb, *tmp;
405 427
406 skb_queue_walk_safe(&link->wakeupq, skb, tmp) { 428 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
407 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) 429 imp = TIPC_SKB_CB(skb)->chain_imp;
430 lim = l->window + l->backlog[imp].limit;
431 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
432 if ((pnd[imp] + l->backlog[imp].len) >= lim)
408 break; 433 break;
409 pend_qsz += TIPC_SKB_CB(skb)->chain_sz; 434 skb_unlink(skb, &l->wakeupq);
410 skb_unlink(skb, &link->wakeupq); 435 skb_queue_tail(&l->inputq, skb);
411 skb_queue_tail(&link->inputq, skb); 436 l->owner->inputq = &l->inputq;
412 link->owner->inputq = &link->inputq; 437 l->owner->action_flags |= TIPC_MSG_EVT;
413 link->owner->action_flags |= TIPC_MSG_EVT;
414 } 438 }
415} 439}
416 440
@@ -424,6 +448,16 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr)
424 l_ptr->reasm_buf = NULL; 448 l_ptr->reasm_buf = NULL;
425} 449}
426 450
451static void tipc_link_purge_backlog(struct tipc_link *l)
452{
453 __skb_queue_purge(&l->backlogq);
454 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
455 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
456 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
457 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
458 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
459}
460
427/** 461/**
428 * tipc_link_purge_queues - purge all pkt queues associated with link 462 * tipc_link_purge_queues - purge all pkt queues associated with link
429 * @l_ptr: pointer to link 463 * @l_ptr: pointer to link
@@ -432,7 +466,7 @@ void tipc_link_purge_queues(struct tipc_link *l_ptr)
432{ 466{
433 __skb_queue_purge(&l_ptr->deferdq); 467 __skb_queue_purge(&l_ptr->deferdq);
434 __skb_queue_purge(&l_ptr->transmq); 468 __skb_queue_purge(&l_ptr->transmq);
435 __skb_queue_purge(&l_ptr->backlogq); 469 tipc_link_purge_backlog(l_ptr);
436 tipc_link_reset_fragments(l_ptr); 470 tipc_link_reset_fragments(l_ptr);
437} 471}
438 472
@@ -466,13 +500,13 @@ void tipc_link_reset(struct tipc_link *l_ptr)
466 500
467 /* Clean up all queues, except inputq: */ 501 /* Clean up all queues, except inputq: */
468 __skb_queue_purge(&l_ptr->transmq); 502 __skb_queue_purge(&l_ptr->transmq);
469 __skb_queue_purge(&l_ptr->backlogq);
470 __skb_queue_purge(&l_ptr->deferdq); 503 __skb_queue_purge(&l_ptr->deferdq);
471 if (!owner->inputq) 504 if (!owner->inputq)
472 owner->inputq = &l_ptr->inputq; 505 owner->inputq = &l_ptr->inputq;
473 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); 506 skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq);
474 if (!skb_queue_empty(owner->inputq)) 507 if (!skb_queue_empty(owner->inputq))
475 owner->action_flags |= TIPC_MSG_EVT; 508 owner->action_flags |= TIPC_MSG_EVT;
509 tipc_link_purge_backlog(l_ptr);
476 l_ptr->rcv_unacked = 0; 510 l_ptr->rcv_unacked = 0;
477 l_ptr->checkpoint = 1; 511 l_ptr->checkpoint = 1;
478 l_ptr->next_out_no = 1; 512 l_ptr->next_out_no = 1;
@@ -696,48 +730,15 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
696 } 730 }
697} 731}
698 732
699/* tipc_link_cong: determine return value and how to treat the
700 * sent buffer during link congestion.
701 * - For plain, errorless user data messages we keep the buffer and
702 * return -ELINKONG.
703 * - For all other messages we discard the buffer and return -EHOSTUNREACH
704 * - For TIPC internal messages we also reset the link
705 */
706static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list)
707{
708 struct sk_buff *skb = skb_peek(list);
709 struct tipc_msg *msg = buf_msg(skb);
710 int imp = msg_importance(msg);
711 u32 oport = msg_tot_origport(msg);
712
713 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
714 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
715 tipc_link_reset(link);
716 goto drop;
717 }
718 if (unlikely(msg_errcode(msg)))
719 goto drop;
720 if (unlikely(msg_reroute_cnt(msg)))
721 goto drop;
722 if (TIPC_SKB_CB(skb)->wakeup_pending)
723 return -ELINKCONG;
724 if (link_schedule_user(link, oport, skb_queue_len(list), imp))
725 return -ELINKCONG;
726drop:
727 __skb_queue_purge(list);
728 return -EHOSTUNREACH;
729}
730
731/** 733/**
732 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked 734 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
733 * @link: link to use 735 * @link: link to use
734 * @list: chain of buffers containing message 736 * @list: chain of buffers containing message
735 * 737 *
736 * Consumes the buffer chain, except when returning -ELINKCONG 738 * Consumes the buffer chain, except when returning -ELINKCONG,
737 * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket 739 * since the caller then may want to make more send attempts.
738 * user data messages) or -EHOSTUNREACH (all other messages/senders) 740 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
739 * Only the socket functions tipc_send_stream() and tipc_send_packet() need 741 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
740 * to act on the return value, since they may need to do more send attempts.
741 */ 742 */
742int __tipc_link_xmit(struct net *net, struct tipc_link *link, 743int __tipc_link_xmit(struct net *net, struct tipc_link *link,
743 struct sk_buff_head *list) 744 struct sk_buff_head *list)
@@ -754,16 +755,14 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
754 struct sk_buff_head *backlogq = &link->backlogq; 755 struct sk_buff_head *backlogq = &link->backlogq;
755 struct sk_buff *skb, *tmp; 756 struct sk_buff *skb, *tmp;
756 757
757 /* Match queue limit against msg importance: */ 758 /* Match backlog limit against msg importance: */
758 if (unlikely(skb_queue_len(backlogq) >= link->queue_limit[imp])) 759 if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit))
759 return tipc_link_cong(link, list); 760 return link_schedule_user(link, list);
760 761
761 /* Has valid packet limit been used ? */
762 if (unlikely(msg_size(msg) > mtu)) { 762 if (unlikely(msg_size(msg) > mtu)) {
763 __skb_queue_purge(list); 763 __skb_queue_purge(list);
764 return -EMSGSIZE; 764 return -EMSGSIZE;
765 } 765 }
766
767 /* Prepare each packet for sending, and add to relevant queue: */ 766 /* Prepare each packet for sending, and add to relevant queue: */
768 skb_queue_walk_safe(list, skb, tmp) { 767 skb_queue_walk_safe(list, skb, tmp) {
769 __skb_unlink(skb, list); 768 __skb_unlink(skb, list);
@@ -786,8 +785,10 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
786 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { 785 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) {
787 link->stats.sent_bundled++; 786 link->stats.sent_bundled++;
788 link->stats.sent_bundles++; 787 link->stats.sent_bundles++;
788 imp = msg_importance(buf_msg(skb));
789 } 789 }
790 __skb_queue_tail(backlogq, skb); 790 __skb_queue_tail(backlogq, skb);
791 link->backlog[imp].len++;
791 seqno++; 792 seqno++;
792 } 793 }
793 link->next_out_no = seqno; 794 link->next_out_no = seqno;
@@ -808,13 +809,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
808 return __tipc_link_xmit(link->owner->net, link, &head); 809 return __tipc_link_xmit(link->owner->net, link, &head);
809} 810}
810 811
812/* tipc_link_xmit_skb(): send single buffer to destination
813 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
814 * messages, which will not be rejected
815 * The only exception is datagram messages rerouted after secondary
816 * lookup, which are rare and safe to dispose of anyway.
817 * TODO: Return real return value, and let callers use
818 * tipc_wait_for_sendpkt() where applicable
819 */
811int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, 820int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
812 u32 selector) 821 u32 selector)
813{ 822{
814 struct sk_buff_head head; 823 struct sk_buff_head head;
824 int rc;
815 825
816 skb2list(skb, &head); 826 skb2list(skb, &head);
817 return tipc_link_xmit(net, &head, dnode, selector); 827 rc = tipc_link_xmit(net, &head, dnode, selector);
828 if (rc == -ELINKCONG)
829 kfree_skb(skb);
830 return 0;
818} 831}
819 832
820/** 833/**
@@ -914,6 +927,7 @@ void tipc_link_push_packets(struct tipc_link *link)
914 if (!skb) 927 if (!skb)
915 break; 928 break;
916 msg = buf_msg(skb); 929 msg = buf_msg(skb);
930 link->backlog[msg_importance(msg)].len--;
917 msg_set_ack(msg, ack); 931 msg_set_ack(msg, ack);
918 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 932 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
919 link->rcv_unacked = 0; 933 link->rcv_unacked = 0;
@@ -1019,6 +1033,32 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
1019 } 1033 }
1020} 1034}
1021 1035
1036/* link_synch(): check if all packets arrived before the synch
1037 * point have been consumed
1038 * Returns true if the parallel links are synched, otherwise false
1039 */
1040static bool link_synch(struct tipc_link *l)
1041{
1042 unsigned int post_synch;
1043 struct tipc_link *pl;
1044
1045 pl = tipc_parallel_link(l);
1046 if (pl == l)
1047 goto synched;
1048
1049 /* Was last pre-synch packet added to input queue ? */
1050 if (less_eq(pl->next_in_no, l->synch_point))
1051 return false;
1052
1053 /* Is it still in the input queue ? */
1054 post_synch = mod(pl->next_in_no - l->synch_point) - 1;
1055 if (skb_queue_len(&pl->inputq) > post_synch)
1056 return false;
1057synched:
1058 l->flags &= ~LINK_SYNCHING;
1059 return true;
1060}
1061
1022static void link_retrieve_defq(struct tipc_link *link, 1062static void link_retrieve_defq(struct tipc_link *link,
1023 struct sk_buff_head *list) 1063 struct sk_buff_head *list)
1024{ 1064{
@@ -1149,6 +1189,14 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1149 skb = NULL; 1189 skb = NULL;
1150 goto unlock; 1190 goto unlock;
1151 } 1191 }
1192 /* Synchronize with parallel link if applicable */
1193 if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
1194 link_handle_out_of_seq_msg(l_ptr, skb);
1195 if (link_synch(l_ptr))
1196 link_retrieve_defq(l_ptr, &head);
1197 skb = NULL;
1198 goto unlock;
1199 }
1152 l_ptr->next_in_no++; 1200 l_ptr->next_in_no++;
1153 if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) 1201 if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
1154 link_retrieve_defq(l_ptr, &head); 1202 link_retrieve_defq(l_ptr, &head);
@@ -1224,6 +1272,10 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1224 1272
1225 switch (msg_user(msg)) { 1273 switch (msg_user(msg)) {
1226 case CHANGEOVER_PROTOCOL: 1274 case CHANGEOVER_PROTOCOL:
1275 if (msg_dup(msg)) {
1276 link->flags |= LINK_SYNCHING;
1277 link->synch_point = msg_seqno(msg_get_wrapped(msg));
1278 }
1227 if (!tipc_link_tunnel_rcv(node, &skb)) 1279 if (!tipc_link_tunnel_rcv(node, &skb))
1228 break; 1280 break;
1229 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { 1281 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
@@ -1610,6 +1662,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1610 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, 1662 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1611 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 1663 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1612 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); 1664 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1665 tipc_link_purge_backlog(l_ptr);
1613 msgcount = skb_queue_len(&l_ptr->transmq); 1666 msgcount = skb_queue_len(&l_ptr->transmq);
1614 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1667 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1615 msg_set_msgcnt(&tunnel_hdr, msgcount); 1668 msg_set_msgcnt(&tunnel_hdr, msgcount);
@@ -1817,11 +1870,11 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1817 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE); 1870 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->max_pkt / ITEM_SIZE);
1818 1871
1819 l->window = win; 1872 l->window = win;
1820 l->queue_limit[TIPC_LOW_IMPORTANCE] = win / 2; 1873 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1821 l->queue_limit[TIPC_MEDIUM_IMPORTANCE] = win; 1874 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1822 l->queue_limit[TIPC_HIGH_IMPORTANCE] = win / 2 * 3; 1875 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1823 l->queue_limit[TIPC_CRITICAL_IMPORTANCE] = win * 2; 1876 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1824 l->queue_limit[TIPC_SYSTEM_IMPORTANCE] = max_bulk; 1877 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1825} 1878}
1826 1879
1827/* tipc_link_find_owner - locate owner node of link by link's name 1880/* tipc_link_find_owner - locate owner node of link by link's name
@@ -2120,7 +2173,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2120 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) 2173 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
2121 goto prop_msg_full; 2174 goto prop_msg_full;
2122 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, 2175 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
2123 link->queue_limit[TIPC_LOW_IMPORTANCE])) 2176 link->window))
2124 goto prop_msg_full; 2177 goto prop_msg_full;
2125 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) 2178 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
2126 goto prop_msg_full; 2179 goto prop_msg_full;
diff --git a/net/tipc/link.h b/net/tipc/link.h
index eec3ecf2d450..d2b5663643da 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -60,6 +60,7 @@
60 */ 60 */
61#define LINK_STARTED 0x0001 61#define LINK_STARTED 0x0001
62#define LINK_STOPPED 0x0002 62#define LINK_STOPPED 0x0002
63#define LINK_SYNCHING 0x0004
63 64
64/* Starting value for maximum packet size negotiation on unicast links 65/* Starting value for maximum packet size negotiation on unicast links
65 * (unless bearer MTU is less) 66 * (unless bearer MTU is less)
@@ -118,7 +119,7 @@ struct tipc_stats {
118 * @pmsg: convenience pointer to "proto_msg" field 119 * @pmsg: convenience pointer to "proto_msg" field
119 * @priority: current link priority 120 * @priority: current link priority
120 * @net_plane: current link network plane ('A' through 'H') 121 * @net_plane: current link network plane ('A' through 'H')
121 * @queue_limit: outbound message queue congestion thresholds (indexed by user) 122 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
122 * @exp_msg_count: # of tunnelled messages expected during link changeover 123 * @exp_msg_count: # of tunnelled messages expected during link changeover
123 * @reset_checkpoint: seq # of last acknowledged message at time of link reset 124 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
124 * @max_pkt: current maximum packet size for this link 125 * @max_pkt: current maximum packet size for this link
@@ -166,11 +167,11 @@ struct tipc_link {
166 struct tipc_msg *pmsg; 167 struct tipc_msg *pmsg;
167 u32 priority; 168 u32 priority;
168 char net_plane; 169 char net_plane;
169 u32 queue_limit[15]; /* queue_limit[0]==window limit */
170 170
171 /* Changeover */ 171 /* Changeover */
172 u32 exp_msg_count; 172 u32 exp_msg_count;
173 u32 reset_checkpoint; 173 u32 reset_checkpoint;
174 u32 synch_point;
174 175
175 /* Max packet negotiation */ 176 /* Max packet negotiation */
176 u32 max_pkt; 177 u32 max_pkt;
@@ -180,6 +181,10 @@ struct tipc_link {
180 /* Sending */ 181 /* Sending */
181 struct sk_buff_head transmq; 182 struct sk_buff_head transmq;
182 struct sk_buff_head backlogq; 183 struct sk_buff_head backlogq;
184 struct {
185 u16 len;
186 u16 limit;
187 } backlog[5];
183 u32 next_out_no; 188 u32 next_out_no;
184 u32 window; 189 u32 window;
185 u32 last_retransmitted; 190 u32 last_retransmitted;
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index bd3969a80dd4..d273207ede28 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -240,6 +240,15 @@ static inline void msg_set_size(struct tipc_msg *m, u32 sz)
240 m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz); 240 m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
241} 241}
242 242
243static inline unchar *msg_data(struct tipc_msg *m)
244{
245 return ((unchar *)m) + msg_hdr_sz(m);
246}
247
248static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
249{
250 return (struct tipc_msg *)msg_data(m);
251}
243 252
244/* 253/*
245 * Word 1 254 * Word 1
@@ -372,6 +381,8 @@ static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
372 381
373static inline u32 msg_origport(struct tipc_msg *m) 382static inline u32 msg_origport(struct tipc_msg *m)
374{ 383{
384 if (msg_user(m) == MSG_FRAGMENTER)
385 m = msg_get_wrapped(m);
375 return msg_word(m, 4); 386 return msg_word(m, 4);
376} 387}
377 388
@@ -467,16 +478,6 @@ static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
467 msg_set_word(m, 10, n); 478 msg_set_word(m, 10, n);
468} 479}
469 480
470static inline unchar *msg_data(struct tipc_msg *m)
471{
472 return ((unchar *)m) + msg_hdr_sz(m);
473}
474
475static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
476{
477 return (struct tipc_msg *)msg_data(m);
478}
479
480/* 481/*
481 * Constants and routines used to read and write TIPC internal message headers 482 * Constants and routines used to read and write TIPC internal message headers
482 */ 483 */
@@ -553,6 +554,14 @@ static inline void msg_set_node_capabilities(struct tipc_msg *m, u32 n)
553 msg_set_bits(m, 1, 15, 0x1fff, n); 554 msg_set_bits(m, 1, 15, 0x1fff, n);
554} 555}
555 556
557static inline bool msg_dup(struct tipc_msg *m)
558{
559 if (likely(msg_user(m) != CHANGEOVER_PROTOCOL))
560 return false;
561 if (msg_type(m) != DUPLICATE_MSG)
562 return false;
563 return true;
564}
556 565
557/* 566/*
558 * Word 2 567 * Word 2
@@ -753,13 +762,6 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
753 msg_set_bits(m, 9, 0, 0xffff, n); 762 msg_set_bits(m, 9, 0, 0xffff, n);
754} 763}
755 764
756static inline u32 msg_tot_origport(struct tipc_msg *m)
757{
758 if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
759 return msg_origport(msg_get_wrapped(m));
760 return msg_origport(m);
761}
762
763struct sk_buff *tipc_buf_acquire(u32 size); 765struct sk_buff *tipc_buf_acquire(u32 size);
764bool tipc_msg_validate(struct sk_buff *skb); 766bool tipc_msg_validate(struct sk_buff *skb);
765bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode, 767bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,