aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/link.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-05-14 10:46:18 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-14 12:24:46 -0400
commitdd3f9e70f59f43a5712eba9cf3ee4f1e6999540c (patch)
tree89be13ccd5e7dc4c904f28fde9ebf9ee17f1bb0a /net/tipc/link.c
parentf21e897eccb5a236f4191ecc1b4391eda895d6ed (diff)
tipc: add packet sequence number at instant of transmission
Currently, the packet sequence number is updated and added to each packet at the moment a packet is added to the link backlog queue. This is wasteful, since it forces the code to traverse the send packet list packet by packet when adding them to the backlog queue. It would be better to just splice the whole packet list into the backlog queue when that is the right action to do. In this commit, we do this change. Also, since the sequence numbers cannot now be assigned to the packets at the moment they are added the backlog queue, we do instead calculate and add them at the moment of transmission, when the backlog queue has to be traversed anyway. We do this in the function tipc_link_push_packet(). Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r--net/tipc/link.c37
1 files changed, 28 insertions, 9 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c1aba697776f..fb2a003c8e6d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -653,7 +653,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
653 struct tipc_media_addr *addr = &link->media_addr; 653 struct tipc_media_addr *addr = &link->media_addr;
654 struct sk_buff_head *transmq = &link->transmq; 654 struct sk_buff_head *transmq = &link->transmq;
655 struct sk_buff_head *backlogq = &link->backlogq; 655 struct sk_buff_head *backlogq = &link->backlogq;
656 struct sk_buff *skb, *tmp; 656 struct sk_buff *skb, *bskb;
657 657
658 /* Match msg importance against this and all higher backlog limits: */ 658 /* Match msg importance against this and all higher backlog limits: */
659 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 659 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
@@ -665,32 +665,36 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
665 return -EMSGSIZE; 665 return -EMSGSIZE;
666 } 666 }
667 /* Prepare each packet for sending, and add to relevant queue: */ 667 /* Prepare each packet for sending, and add to relevant queue: */
668 skb_queue_walk_safe(list, skb, tmp) { 668 while (skb_queue_len(list)) {
669 __skb_unlink(skb, list); 669 skb = skb_peek(list);
670 msg = buf_msg(skb); 670 msg = buf_msg(skb);
671 msg_set_seqno(msg, seqno); 671 msg_set_seqno(msg, seqno);
672 msg_set_ack(msg, ack); 672 msg_set_ack(msg, ack);
673 msg_set_bcast_ack(msg, bc_last_in); 673 msg_set_bcast_ack(msg, bc_last_in);
674 674
675 if (likely(skb_queue_len(transmq) < maxwin)) { 675 if (likely(skb_queue_len(transmq) < maxwin)) {
676 __skb_dequeue(list);
676 __skb_queue_tail(transmq, skb); 677 __skb_queue_tail(transmq, skb);
677 tipc_bearer_send(net, link->bearer_id, skb, addr); 678 tipc_bearer_send(net, link->bearer_id, skb, addr);
678 link->rcv_unacked = 0; 679 link->rcv_unacked = 0;
679 seqno++; 680 seqno++;
680 continue; 681 continue;
681 } 682 }
682 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { 683 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
684 kfree_skb(__skb_dequeue(list));
683 link->stats.sent_bundled++; 685 link->stats.sent_bundled++;
684 continue; 686 continue;
685 } 687 }
686 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { 688 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
689 kfree_skb(__skb_dequeue(list));
690 __skb_queue_tail(backlogq, bskb);
691 link->backlog[msg_importance(buf_msg(bskb))].len++;
687 link->stats.sent_bundled++; 692 link->stats.sent_bundled++;
688 link->stats.sent_bundles++; 693 link->stats.sent_bundles++;
689 imp = msg_importance(buf_msg(skb)); 694 continue;
690 } 695 }
691 __skb_queue_tail(backlogq, skb); 696 link->backlog[imp].len += skb_queue_len(list);
692 link->backlog[imp].len++; 697 skb_queue_splice_tail_init(list, backlogq);
693 seqno++;
694 } 698 }
695 link->snd_nxt = seqno; 699 link->snd_nxt = seqno;
696 return 0; 700 return 0;
@@ -822,6 +826,7 @@ void tipc_link_push_packets(struct tipc_link *link)
822{ 826{
823 struct sk_buff *skb; 827 struct sk_buff *skb;
824 struct tipc_msg *msg; 828 struct tipc_msg *msg;
829 u16 seqno = link->snd_nxt;
825 u16 ack = mod(link->rcv_nxt - 1); 830 u16 ack = mod(link->rcv_nxt - 1);
826 831
827 while (skb_queue_len(&link->transmq) < link->window) { 832 while (skb_queue_len(&link->transmq) < link->window) {
@@ -831,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
831 msg = buf_msg(skb); 836 msg = buf_msg(skb);
832 link->backlog[msg_importance(msg)].len--; 837 link->backlog[msg_importance(msg)].len--;
833 msg_set_ack(msg, ack); 838 msg_set_ack(msg, ack);
839 msg_set_seqno(msg, seqno);
840 seqno = mod(seqno + 1);
834 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 841 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
835 link->rcv_unacked = 0; 842 link->rcv_unacked = 0;
836 __skb_queue_tail(&link->transmq, skb); 843 __skb_queue_tail(&link->transmq, skb);
837 tipc_bearer_send(link->owner->net, link->bearer_id, 844 tipc_bearer_send(link->owner->net, link->bearer_id,
838 skb, &link->media_addr); 845 skb, &link->media_addr);
839 } 846 }
847 link->snd_nxt = seqno;
840} 848}
841 849
842void tipc_link_reset_all(struct tipc_node *node) 850void tipc_link_reset_all(struct tipc_node *node)
@@ -1526,6 +1534,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1526 1534
1527 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, 1535 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1528 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); 1536 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1537
1538 skb_queue_walk(&l_ptr->backlogq, skb) {
1539 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1540 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1541 }
1529 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); 1542 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1530 tipc_link_purge_backlog(l_ptr); 1543 tipc_link_purge_backlog(l_ptr);
1531 msgcount = skb_queue_len(&l_ptr->transmq); 1544 msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1586,6 +1599,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
1586 struct tipc_msg tnl_hdr; 1599 struct tipc_msg tnl_hdr;
1587 struct sk_buff_head *queue = &link->transmq; 1600 struct sk_buff_head *queue = &link->transmq;
1588 int mcnt; 1601 int mcnt;
1602 u16 seqno;
1589 1603
1590 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, 1604 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1591 SYNCH_MSG, INT_H_SIZE, link->addr); 1605 SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1617,6 +1631,11 @@ tunnel_queue:
1617 } 1631 }
1618 if (queue == &link->backlogq) 1632 if (queue == &link->backlogq)
1619 return; 1633 return;
1634 seqno = link->snd_nxt;
1635 skb_queue_walk(&link->backlogq, skb) {
1636 msg_set_seqno(buf_msg(skb), seqno);
1637 seqno = mod(seqno + 1);
1638 }
1620 queue = &link->backlogq; 1639 queue = &link->backlogq;
1621 goto tunnel_queue; 1640 goto tunnel_queue;
1622} 1641}