summaryrefslogtreecommitdiffstats
path: root/net/tipc/bcast.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2015-03-13 16:08:10 -0400
committerDavid S. Miller <davem@davemloft.net>2015-03-14 14:38:32 -0400
commit05dcc5aa4dcced4f59f925625cea669e82b75519 (patch)
tree0a516e1012ee7e9b7eee037d8e31278a425e7d68 /net/tipc/bcast.c
parent2cdf3918e47e98c8f34f7a64455ea9fd433756e7 (diff)
tipc: split link outqueue
struct tipc_link contains one single queue for outgoing packets, where both transmitted and waiting packets are queued. This infrastructure is hard to maintain, because we need to keep a number of fields to keep track of which packets are sent or unsent, and the number of packets in each category. A lot of code becomes simpler if we split this queue into a transmission queue, where sent/unacknowledged packets are kept, and a backlog queue, where we keep the not yet sent packets. In this commit we do this separation. Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/bcast.c')
-rw-r--r--net/tipc/bcast.c48
1 files changed, 20 insertions, 28 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 5ee5076a8b27..17cb0ff5f344 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -135,9 +135,10 @@ static void bclink_set_last_sent(struct net *net)
135{ 135{
136 struct tipc_net *tn = net_generic(net, tipc_net_id); 136 struct tipc_net *tn = net_generic(net, tipc_net_id);
137 struct tipc_link *bcl = tn->bcl; 137 struct tipc_link *bcl = tn->bcl;
138 struct sk_buff *skb = skb_peek(&bcl->backlogq);
138 139
139 if (bcl->next_out) 140 if (skb)
140 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); 141 bcl->fsm_msg_cnt = mod(buf_seqno(skb) - 1);
141 else 142 else
142 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); 143 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
143} 144}
@@ -180,7 +181,7 @@ static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
180 struct sk_buff *skb; 181 struct sk_buff *skb;
181 struct tipc_link *bcl = tn->bcl; 182 struct tipc_link *bcl = tn->bcl;
182 183
183 skb_queue_walk(&bcl->outqueue, skb) { 184 skb_queue_walk(&bcl->transmq, skb) {
184 if (more(buf_seqno(skb), after)) { 185 if (more(buf_seqno(skb), after)) {
185 tipc_link_retransmit(bcl, skb, mod(to - after)); 186 tipc_link_retransmit(bcl, skb, mod(to - after));
186 break; 187 break;
@@ -210,7 +211,6 @@ void tipc_bclink_wakeup_users(struct net *net)
210void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked) 211void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
211{ 212{
212 struct sk_buff *skb, *tmp; 213 struct sk_buff *skb, *tmp;
213 struct sk_buff *next;
214 unsigned int released = 0; 214 unsigned int released = 0;
215 struct net *net = n_ptr->net; 215 struct net *net = n_ptr->net;
216 struct tipc_net *tn = net_generic(net, tipc_net_id); 216 struct tipc_net *tn = net_generic(net, tipc_net_id);
@@ -221,7 +221,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
221 tipc_bclink_lock(net); 221 tipc_bclink_lock(net);
222 222
223 /* Bail out if tx queue is empty (no clean up is required) */ 223 /* Bail out if tx queue is empty (no clean up is required) */
224 skb = skb_peek(&tn->bcl->outqueue); 224 skb = skb_peek(&tn->bcl->transmq);
225 if (!skb) 225 if (!skb)
226 goto exit; 226 goto exit;
227 227
@@ -248,27 +248,19 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
248 } 248 }
249 249
250 /* Skip over packets that node has previously acknowledged */ 250 /* Skip over packets that node has previously acknowledged */
251 skb_queue_walk(&tn->bcl->outqueue, skb) { 251 skb_queue_walk(&tn->bcl->transmq, skb) {
252 if (more(buf_seqno(skb), n_ptr->bclink.acked)) 252 if (more(buf_seqno(skb), n_ptr->bclink.acked))
253 break; 253 break;
254 } 254 }
255 255
256 /* Update packets that node is now acknowledging */ 256 /* Update packets that node is now acknowledging */
257 skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) { 257 skb_queue_walk_from_safe(&tn->bcl->transmq, skb, tmp) {
258 if (more(buf_seqno(skb), acked)) 258 if (more(buf_seqno(skb), acked))
259 break; 259 break;
260 260 bcbuf_decr_acks(skb);
261 next = tipc_skb_queue_next(&tn->bcl->outqueue, skb); 261 bclink_set_last_sent(net);
262 if (skb != tn->bcl->next_out) {
263 bcbuf_decr_acks(skb);
264 } else {
265 bcbuf_set_acks(skb, 0);
266 tn->bcl->next_out = next;
267 bclink_set_last_sent(net);
268 }
269
270 if (bcbuf_acks(skb) == 0) { 262 if (bcbuf_acks(skb) == 0) {
271 __skb_unlink(skb, &tn->bcl->outqueue); 263 __skb_unlink(skb, &tn->bcl->transmq);
272 kfree_skb(skb); 264 kfree_skb(skb);
273 released = 1; 265 released = 1;
274 } 266 }
@@ -276,7 +268,7 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
276 n_ptr->bclink.acked = acked; 268 n_ptr->bclink.acked = acked;
277 269
278 /* Try resolving broadcast link congestion, if necessary */ 270 /* Try resolving broadcast link congestion, if necessary */
279 if (unlikely(tn->bcl->next_out)) { 271 if (unlikely(skb_peek(&tn->bcl->backlogq))) {
280 tipc_link_push_packets(tn->bcl); 272 tipc_link_push_packets(tn->bcl);
281 bclink_set_last_sent(net); 273 bclink_set_last_sent(net);
282 } 274 }
@@ -323,7 +315,7 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
323 buf = tipc_buf_acquire(INT_H_SIZE); 315 buf = tipc_buf_acquire(INT_H_SIZE);
324 if (buf) { 316 if (buf) {
325 struct tipc_msg *msg = buf_msg(buf); 317 struct tipc_msg *msg = buf_msg(buf);
326 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); 318 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferdq);
327 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; 319 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
328 320
329 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG, 321 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
@@ -398,7 +390,7 @@ int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
398 if (likely(bclink->bcast_nodes.count)) { 390 if (likely(bclink->bcast_nodes.count)) {
399 rc = __tipc_link_xmit(net, bcl, list); 391 rc = __tipc_link_xmit(net, bcl, list);
400 if (likely(!rc)) { 392 if (likely(!rc)) {
401 u32 len = skb_queue_len(&bcl->outqueue); 393 u32 len = skb_queue_len(&bcl->transmq);
402 394
403 bclink_set_last_sent(net); 395 bclink_set_last_sent(net);
404 bcl->stats.queue_sz_counts++; 396 bcl->stats.queue_sz_counts++;
@@ -563,25 +555,25 @@ receive:
563 if (node->bclink.last_in == node->bclink.last_sent) 555 if (node->bclink.last_in == node->bclink.last_sent)
564 goto unlock; 556 goto unlock;
565 557
566 if (skb_queue_empty(&node->bclink.deferred_queue)) { 558 if (skb_queue_empty(&node->bclink.deferdq)) {
567 node->bclink.oos_state = 1; 559 node->bclink.oos_state = 1;
568 goto unlock; 560 goto unlock;
569 } 561 }
570 562
571 msg = buf_msg(skb_peek(&node->bclink.deferred_queue)); 563 msg = buf_msg(skb_peek(&node->bclink.deferdq));
572 seqno = msg_seqno(msg); 564 seqno = msg_seqno(msg);
573 next_in = mod(next_in + 1); 565 next_in = mod(next_in + 1);
574 if (seqno != next_in) 566 if (seqno != next_in)
575 goto unlock; 567 goto unlock;
576 568
577 /* Take in-sequence message from deferred queue & deliver it */ 569 /* Take in-sequence message from deferred queue & deliver it */
578 buf = __skb_dequeue(&node->bclink.deferred_queue); 570 buf = __skb_dequeue(&node->bclink.deferdq);
579 goto receive; 571 goto receive;
580 } 572 }
581 573
582 /* Handle out-of-sequence broadcast message */ 574 /* Handle out-of-sequence broadcast message */
583 if (less(next_in, seqno)) { 575 if (less(next_in, seqno)) {
584 deferred = tipc_link_defer_pkt(&node->bclink.deferred_queue, 576 deferred = tipc_link_defer_pkt(&node->bclink.deferdq,
585 buf); 577 buf);
586 bclink_update_last_sent(node, seqno); 578 bclink_update_last_sent(node, seqno);
587 buf = NULL; 579 buf = NULL;
@@ -638,7 +630,6 @@ static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
638 msg_set_non_seq(msg, 1); 630 msg_set_non_seq(msg, 1);
639 msg_set_mc_netid(msg, tn->net_id); 631 msg_set_mc_netid(msg, tn->net_id);
640 tn->bcl->stats.sent_info++; 632 tn->bcl->stats.sent_info++;
641
642 if (WARN_ON(!bclink->bcast_nodes.count)) { 633 if (WARN_ON(!bclink->bcast_nodes.count)) {
643 dump_stack(); 634 dump_stack();
644 return 0; 635 return 0;
@@ -917,8 +908,9 @@ int tipc_bclink_init(struct net *net)
917 sprintf(bcbearer->media.name, "tipc-broadcast"); 908 sprintf(bcbearer->media.name, "tipc-broadcast");
918 909
919 spin_lock_init(&bclink->lock); 910 spin_lock_init(&bclink->lock);
920 __skb_queue_head_init(&bcl->outqueue); 911 __skb_queue_head_init(&bcl->transmq);
921 __skb_queue_head_init(&bcl->deferred_queue); 912 __skb_queue_head_init(&bcl->backlogq);
913 __skb_queue_head_init(&bcl->deferdq);
922 skb_queue_head_init(&bcl->wakeupq); 914 skb_queue_head_init(&bcl->wakeupq);
923 bcl->next_out_no = 1; 915 bcl->next_out_no = 1;
924 spin_lock_init(&bclink->node.lock); 916 spin_lock_init(&bclink->node.lock);