aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2016-08-16 11:53:51 -0400
committerDavid S. Miller <davem@davemloft.net>2016-08-19 00:14:37 -0400
commit5a0950c27236dc6f6a3e9d13259c1a2e89fd1cf7 (patch)
tree5be9dff97c6e466c0956df3a5bc1f60581f2758c /net/tipc
parent0d051bf93c0640483788db56dfc118d307f8893b (diff)
tipc: ensure that link congestion and wakeup use same criteria
When a link is attempted woken up after congestion, it uses a different, more generous criteria than when it was originally declared congested. This has the effect that the link, and the sending process, sometimes will be woken up unnecessarily, just to immediately return to congestion when it turns out there is not not enough space in its send queue to host the pending message. This is a waste of CPU cycles. We now change the function link_prepare_wakeup() to use exactly the same criteria as tipc_link_xmit(). However, since we are now excluding the window limit from the wakeup calculation, and the current backlog limit for the lowest level is too small to house even a single maximum-size message, we have to expand this limit. We do this by evaluating an alternative, minimum value during the setting of the importance limits. Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/link.c18
1 files changed, 10 insertions, 8 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 877d94f34814..2c6e1b9e024b 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -807,7 +807,7 @@ void link_prepare_wakeup(struct tipc_link *l)
807 807
808 skb_queue_walk_safe(&l->wakeupq, skb, tmp) { 808 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
809 imp = TIPC_SKB_CB(skb)->chain_imp; 809 imp = TIPC_SKB_CB(skb)->chain_imp;
810 lim = l->window + l->backlog[imp].limit; 810 lim = l->backlog[imp].limit;
811 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; 811 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
812 if ((pnd[imp] + l->backlog[imp].len) >= lim) 812 if ((pnd[imp] + l->backlog[imp].len) >= lim)
813 break; 813 break;
@@ -873,9 +873,11 @@ int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
873 struct sk_buff *skb, *_skb, *bskb; 873 struct sk_buff *skb, *_skb, *bskb;
874 874
875 /* Match msg importance against this and all higher backlog limits: */ 875 /* Match msg importance against this and all higher backlog limits: */
876 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) { 876 if (!skb_queue_empty(backlogq)) {
877 if (unlikely(l->backlog[i].len >= l->backlog[i].limit)) 877 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
878 return link_schedule_user(l, list); 878 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
879 return link_schedule_user(l, list);
880 }
879 } 881 }
880 if (unlikely(msg_size(hdr) > mtu)) { 882 if (unlikely(msg_size(hdr) > mtu)) {
881 skb_queue_purge(list); 883 skb_queue_purge(list);
@@ -1692,10 +1694,10 @@ void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1692 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); 1694 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1693 1695
1694 l->window = win; 1696 l->window = win;
1695 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; 1697 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1696 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; 1698 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1697 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; 1699 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1698 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; 1700 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1699 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; 1701 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1700} 1702}
1701 1703