summaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-05-14 10:46:18 -0400
committerDavid S. Miller <davem@davemloft.net>2015-05-14 12:24:46 -0400
commitdd3f9e70f59f43a5712eba9cf3ee4f1e6999540c (patch)
tree89be13ccd5e7dc4c904f28fde9ebf9ee17f1bb0a /net/tipc
parentf21e897eccb5a236f4191ecc1b4391eda895d6ed (diff)
tipc: add packet sequence number at instant of transmission
Currently, the packet sequence number is updated and added to each packet at the moment a packet is added to the link backlog queue. This is wasteful, since it forces the code to traverse the send packet list packet by packet when adding them to the backlog queue. It would be better to just splice the whole packet list into the backlog queue when that is the right action to do. In this commit, we do this change. Also, since the sequence numbers cannot now be assigned to the packets at the moment they are added the backlog queue, we do instead calculate and add them at the moment of transmission, when the backlog queue has to be traversed anyway. We do this in the function tipc_link_push_packet(). Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/bcast.c6
-rw-r--r--net/tipc/link.c37
-rw-r--r--net/tipc/msg.c44
-rw-r--r--net/tipc/msg.h6
-rw-r--r--net/tipc/node.c2
5 files changed, 54 insertions, 41 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 842e19f6abf6..4906ca3c0f3a 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -115,12 +115,8 @@ static void bclink_set_last_sent(struct net *net)
115{ 115{
116 struct tipc_net *tn = net_generic(net, tipc_net_id); 116 struct tipc_net *tn = net_generic(net, tipc_net_id);
117 struct tipc_link *bcl = tn->bcl; 117 struct tipc_link *bcl = tn->bcl;
118 struct sk_buff *skb = skb_peek(&bcl->backlogq);
119 118
120 if (skb) 119 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
121 bcl->silent_intv_cnt = mod(buf_seqno(skb) - 1);
122 else
123 bcl->silent_intv_cnt = mod(bcl->snd_nxt - 1);
124} 120}
125 121
126u32 tipc_bclink_get_last_sent(struct net *net) 122u32 tipc_bclink_get_last_sent(struct net *net)
diff --git a/net/tipc/link.c b/net/tipc/link.c
index c1aba697776f..fb2a003c8e6d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -653,7 +653,7 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
653 struct tipc_media_addr *addr = &link->media_addr; 653 struct tipc_media_addr *addr = &link->media_addr;
654 struct sk_buff_head *transmq = &link->transmq; 654 struct sk_buff_head *transmq = &link->transmq;
655 struct sk_buff_head *backlogq = &link->backlogq; 655 struct sk_buff_head *backlogq = &link->backlogq;
656 struct sk_buff *skb, *tmp; 656 struct sk_buff *skb, *bskb;
657 657
658 /* Match msg importance against this and all higher backlog limits: */ 658 /* Match msg importance against this and all higher backlog limits: */
659 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 659 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
@@ -665,32 +665,36 @@ int __tipc_link_xmit(struct net *net, struct tipc_link *link,
665 return -EMSGSIZE; 665 return -EMSGSIZE;
666 } 666 }
667 /* Prepare each packet for sending, and add to relevant queue: */ 667 /* Prepare each packet for sending, and add to relevant queue: */
668 skb_queue_walk_safe(list, skb, tmp) { 668 while (skb_queue_len(list)) {
669 __skb_unlink(skb, list); 669 skb = skb_peek(list);
670 msg = buf_msg(skb); 670 msg = buf_msg(skb);
671 msg_set_seqno(msg, seqno); 671 msg_set_seqno(msg, seqno);
672 msg_set_ack(msg, ack); 672 msg_set_ack(msg, ack);
673 msg_set_bcast_ack(msg, bc_last_in); 673 msg_set_bcast_ack(msg, bc_last_in);
674 674
675 if (likely(skb_queue_len(transmq) < maxwin)) { 675 if (likely(skb_queue_len(transmq) < maxwin)) {
676 __skb_dequeue(list);
676 __skb_queue_tail(transmq, skb); 677 __skb_queue_tail(transmq, skb);
677 tipc_bearer_send(net, link->bearer_id, skb, addr); 678 tipc_bearer_send(net, link->bearer_id, skb, addr);
678 link->rcv_unacked = 0; 679 link->rcv_unacked = 0;
679 seqno++; 680 seqno++;
680 continue; 681 continue;
681 } 682 }
682 if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { 683 if (tipc_msg_bundle(skb_peek_tail(backlogq), msg, mtu)) {
684 kfree_skb(__skb_dequeue(list));
683 link->stats.sent_bundled++; 685 link->stats.sent_bundled++;
684 continue; 686 continue;
685 } 687 }
686 if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { 688 if (tipc_msg_make_bundle(&bskb, msg, mtu, link->addr)) {
689 kfree_skb(__skb_dequeue(list));
690 __skb_queue_tail(backlogq, bskb);
691 link->backlog[msg_importance(buf_msg(bskb))].len++;
687 link->stats.sent_bundled++; 692 link->stats.sent_bundled++;
688 link->stats.sent_bundles++; 693 link->stats.sent_bundles++;
689 imp = msg_importance(buf_msg(skb)); 694 continue;
690 } 695 }
691 __skb_queue_tail(backlogq, skb); 696 link->backlog[imp].len += skb_queue_len(list);
692 link->backlog[imp].len++; 697 skb_queue_splice_tail_init(list, backlogq);
693 seqno++;
694 } 698 }
695 link->snd_nxt = seqno; 699 link->snd_nxt = seqno;
696 return 0; 700 return 0;
@@ -822,6 +826,7 @@ void tipc_link_push_packets(struct tipc_link *link)
822{ 826{
823 struct sk_buff *skb; 827 struct sk_buff *skb;
824 struct tipc_msg *msg; 828 struct tipc_msg *msg;
829 u16 seqno = link->snd_nxt;
825 u16 ack = mod(link->rcv_nxt - 1); 830 u16 ack = mod(link->rcv_nxt - 1);
826 831
827 while (skb_queue_len(&link->transmq) < link->window) { 832 while (skb_queue_len(&link->transmq) < link->window) {
@@ -831,12 +836,15 @@ void tipc_link_push_packets(struct tipc_link *link)
831 msg = buf_msg(skb); 836 msg = buf_msg(skb);
832 link->backlog[msg_importance(msg)].len--; 837 link->backlog[msg_importance(msg)].len--;
833 msg_set_ack(msg, ack); 838 msg_set_ack(msg, ack);
839 msg_set_seqno(msg, seqno);
840 seqno = mod(seqno + 1);
834 msg_set_bcast_ack(msg, link->owner->bclink.last_in); 841 msg_set_bcast_ack(msg, link->owner->bclink.last_in);
835 link->rcv_unacked = 0; 842 link->rcv_unacked = 0;
836 __skb_queue_tail(&link->transmq, skb); 843 __skb_queue_tail(&link->transmq, skb);
837 tipc_bearer_send(link->owner->net, link->bearer_id, 844 tipc_bearer_send(link->owner->net, link->bearer_id,
838 skb, &link->media_addr); 845 skb, &link->media_addr);
839 } 846 }
847 link->snd_nxt = seqno;
840} 848}
841 849
842void tipc_link_reset_all(struct tipc_node *node) 850void tipc_link_reset_all(struct tipc_node *node)
@@ -1526,6 +1534,11 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1526 1534
1527 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, 1535 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL,
1528 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); 1536 FAILOVER_MSG, INT_H_SIZE, l_ptr->addr);
1537
1538 skb_queue_walk(&l_ptr->backlogq, skb) {
1539 msg_set_seqno(buf_msg(skb), l_ptr->snd_nxt);
1540 l_ptr->snd_nxt = mod(l_ptr->snd_nxt + 1);
1541 }
1529 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); 1542 skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq);
1530 tipc_link_purge_backlog(l_ptr); 1543 tipc_link_purge_backlog(l_ptr);
1531 msgcount = skb_queue_len(&l_ptr->transmq); 1544 msgcount = skb_queue_len(&l_ptr->transmq);
@@ -1586,6 +1599,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *link,
1586 struct tipc_msg tnl_hdr; 1599 struct tipc_msg tnl_hdr;
1587 struct sk_buff_head *queue = &link->transmq; 1600 struct sk_buff_head *queue = &link->transmq;
1588 int mcnt; 1601 int mcnt;
1602 u16 seqno;
1589 1603
1590 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, 1604 tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL,
1591 SYNCH_MSG, INT_H_SIZE, link->addr); 1605 SYNCH_MSG, INT_H_SIZE, link->addr);
@@ -1617,6 +1631,11 @@ tunnel_queue:
1617 } 1631 }
1618 if (queue == &link->backlogq) 1632 if (queue == &link->backlogq)
1619 return; 1633 return;
1634 seqno = link->snd_nxt;
1635 skb_queue_walk(&link->backlogq, skb) {
1636 msg_set_seqno(buf_msg(skb), seqno);
1637 seqno = mod(seqno + 1);
1638 }
1620 queue = &link->backlogq; 1639 queue = &link->backlogq;
1621 goto tunnel_queue; 1640 goto tunnel_queue;
1622} 1641}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index ff7362d40cb3..08b4cc7d496d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -331,16 +331,15 @@ error:
331 331
332/** 332/**
333 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one 333 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
334 * @bskb: the buffer to append to ("bundle") 334 * @skb: the buffer to append to ("bundle")
335 * @skb: buffer to be appended 335 * @msg: message to be appended
336 * @mtu: max allowable size for the bundle buffer 336 * @mtu: max allowable size for the bundle buffer
337 * Consumes buffer if successful 337 * Consumes buffer if successful
338 * Returns true if bundling could be performed, otherwise false 338 * Returns true if bundling could be performed, otherwise false
339 */ 339 */
340bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu) 340bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu)
341{ 341{
342 struct tipc_msg *bmsg; 342 struct tipc_msg *bmsg;
343 struct tipc_msg *msg = buf_msg(skb);
344 unsigned int bsz; 343 unsigned int bsz;
345 unsigned int msz = msg_size(msg); 344 unsigned int msz = msg_size(msg);
346 u32 start, pad; 345 u32 start, pad;
@@ -348,9 +347,9 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
348 347
349 if (likely(msg_user(msg) == MSG_FRAGMENTER)) 348 if (likely(msg_user(msg) == MSG_FRAGMENTER))
350 return false; 349 return false;
351 if (!bskb) 350 if (!skb)
352 return false; 351 return false;
353 bmsg = buf_msg(bskb); 352 bmsg = buf_msg(skb);
354 bsz = msg_size(bmsg); 353 bsz = msg_size(bmsg);
355 start = align(bsz); 354 start = align(bsz);
356 pad = start - bsz; 355 pad = start - bsz;
@@ -359,9 +358,9 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
359 return false; 358 return false;
360 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) 359 if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
361 return false; 360 return false;
362 if (likely(msg_user(bmsg) != MSG_BUNDLER)) 361 if (unlikely(msg_user(bmsg) != MSG_BUNDLER))
363 return false; 362 return false;
364 if (unlikely(skb_tailroom(bskb) < (pad + msz))) 363 if (unlikely(skb_tailroom(skb) < (pad + msz)))
365 return false; 364 return false;
366 if (unlikely(max < (start + msz))) 365 if (unlikely(max < (start + msz)))
367 return false; 366 return false;
@@ -369,11 +368,10 @@ bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu)
369 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE)) 368 (msg_importance(bmsg) == TIPC_SYSTEM_IMPORTANCE))
370 return false; 369 return false;
371 370
372 skb_put(bskb, pad + msz); 371 skb_put(skb, pad + msz);
373 skb_copy_to_linear_data_offset(bskb, start, skb->data, msz); 372 skb_copy_to_linear_data_offset(skb, start, msg, msz);
374 msg_set_size(bmsg, start + msz); 373 msg_set_size(bmsg, start + msz);
375 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1); 374 msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
376 kfree_skb(skb);
377 return true; 375 return true;
378} 376}
379 377
@@ -419,18 +417,18 @@ none:
419 417
420/** 418/**
421 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 419 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
422 * @list: the buffer chain 420 * @list: the buffer chain, where head is the buffer to replace/append
423 * @skb: buffer to be appended and replaced 421 * @skb: buffer to be created, appended to and returned in case of success
422 * @msg: message to be appended
424 * @mtu: max allowable size for the bundle buffer, inclusive header 423 * @mtu: max allowable size for the bundle buffer, inclusive header
425 * @dnode: destination node for message. (Not always present in header) 424 * @dnode: destination node for message. (Not always present in header)
426 * Replaces buffer if successful
427 * Returns true if success, otherwise false 425 * Returns true if success, otherwise false
428 */ 426 */
429bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode) 427bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
428 u32 mtu, u32 dnode)
430{ 429{
431 struct sk_buff *bskb; 430 struct sk_buff *_skb;
432 struct tipc_msg *bmsg; 431 struct tipc_msg *bmsg;
433 struct tipc_msg *msg = buf_msg(*skb);
434 u32 msz = msg_size(msg); 432 u32 msz = msg_size(msg);
435 u32 max = mtu - INT_H_SIZE; 433 u32 max = mtu - INT_H_SIZE;
436 434
@@ -443,12 +441,12 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
443 if (msz > (max / 2)) 441 if (msz > (max / 2))
444 return false; 442 return false;
445 443
446 bskb = tipc_buf_acquire(max); 444 _skb = tipc_buf_acquire(max);
447 if (!bskb) 445 if (!_skb)
448 return false; 446 return false;
449 447
450 skb_trim(bskb, INT_H_SIZE); 448 skb_trim(_skb, INT_H_SIZE);
451 bmsg = buf_msg(bskb); 449 bmsg = buf_msg(_skb);
452 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0, 450 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
453 INT_H_SIZE, dnode); 451 INT_H_SIZE, dnode);
454 if (msg_isdata(msg)) 452 if (msg_isdata(msg))
@@ -458,8 +456,8 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode)
458 msg_set_seqno(bmsg, msg_seqno(msg)); 456 msg_set_seqno(bmsg, msg_seqno(msg));
459 msg_set_ack(bmsg, msg_ack(msg)); 457 msg_set_ack(bmsg, msg_ack(msg));
460 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 458 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
461 tipc_msg_bundle(bskb, *skb, mtu); 459 tipc_msg_bundle(_skb, msg, mtu);
462 *skb = bskb; 460 *skb = _skb;
463 return true; 461 return true;
464} 462}
465 463
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 6caf16c475e0..19c45fb66238 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -776,9 +776,9 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
776 uint data_sz, u32 dnode, u32 onode, 776 uint data_sz, u32 dnode, u32 onode,
777 u32 dport, u32 oport, int errcode); 777 u32 dport, u32 oport, int errcode);
778int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); 778int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
779bool tipc_msg_bundle(struct sk_buff *bskb, struct sk_buff *skb, u32 mtu); 779bool tipc_msg_bundle(struct sk_buff *skb, struct tipc_msg *msg, u32 mtu);
780 780bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
781bool tipc_msg_make_bundle(struct sk_buff **skb, u32 mtu, u32 dnode); 781 u32 mtu, u32 dnode);
782bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos); 782bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
783int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, 783int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
784 int offset, int dsz, int mtu, struct sk_buff_head *list); 784 int offset, int dsz, int mtu, struct sk_buff_head *list);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index eb3856bb8c5a..0b1d61a5f853 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, 2012-2014, Ericsson AB 4 * Copyright (c) 2000-2006, 2012-2015, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *