aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2012-11-14 22:34:45 -0500
committerPaul Gortmaker <paul.gortmaker@windriver.com>2012-11-21 20:07:25 -0500
commit3c294cb374bf7ad6f5c2763f994d75935fb7814d (patch)
treef85f567d4c69ae8dbbbdce32022d6f306cf7fc35
parent7503115107e5862870eaf5133627051b2e23ac0a (diff)
tipc: remove the bearer congestion mechanism
Currently at the TIPC bearer layer there is the following congestion mechanism: Once sending packets has failed via that bearer, the bearer will be flagged as being in congested state at once. During bearer congestion, all packets arriving at link will be queued on the link's outgoing buffer. When we detect that the state of bearer congestion has relaxed (e.g. some packets are received from the bearer) we will try our best to push all packets in the link's outgoing buffer until the buffer is empty, or until the bearer is congested again. However, in fact the TIPC bearer never receives any feedback from the device layer whether a send was successful or not, so it must always assume it was successful. Therefore, the bearer congestion mechanism as it exists currently is of no value. But the bearer blocking state is still useful for us. For example, when the physical media goes down/up, we need to change the state of the links bound to the bearer. So the code maintaing the state information is not removed. Signed-off-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
-rw-r--r--net/tipc/bcast.c21
-rw-r--r--net/tipc/bearer.c110
-rw-r--r--net/tipc/bearer.h24
-rw-r--r--net/tipc/discover.c2
-rw-r--r--net/tipc/link.c117
-rw-r--r--net/tipc/link.h4
6 files changed, 61 insertions, 217 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index e4e6d8cd47e6..40da098eeb39 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -619,16 +619,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf,
619 if (bcbearer->remains_new.count == bcbearer->remains.count) 619 if (bcbearer->remains_new.count == bcbearer->remains.count)
620 continue; /* bearer pair doesn't add anything */ 620 continue; /* bearer pair doesn't add anything */
621 621
622 if (p->blocked || 622 if (!tipc_bearer_blocked(p))
623 p->media->send_msg(buf, p, &p->media->bcast_addr)) { 623 tipc_bearer_send(p, buf, &p->media->bcast_addr);
624 else if (s && !tipc_bearer_blocked(s))
624 /* unable to send on primary bearer */ 625 /* unable to send on primary bearer */
625 if (!s || s->blocked || 626 tipc_bearer_send(s, buf, &s->media->bcast_addr);
626 s->media->send_msg(buf, s, 627 else
627 &s->media->bcast_addr)) { 628 /* unable to send on either bearer */
628 /* unable to send on either bearer */ 629 continue;
629 continue;
630 }
631 }
632 630
633 if (s) { 631 if (s) {
634 bcbearer->bpairs[bp_index].primary = s; 632 bcbearer->bpairs[bp_index].primary = s;
@@ -731,8 +729,8 @@ int tipc_bclink_stats(char *buf, const u32 buf_size)
731 " TX naks:%u acks:%u dups:%u\n", 729 " TX naks:%u acks:%u dups:%u\n",
732 s->sent_nacks, s->sent_acks, s->retransmitted); 730 s->sent_nacks, s->sent_acks, s->retransmitted);
733 ret += tipc_snprintf(buf + ret, buf_size - ret, 731 ret += tipc_snprintf(buf + ret, buf_size - ret,
734 " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n", 732 " Congestion link:%u Send queue max:%u avg:%u\n",
735 s->bearer_congs, s->link_congs, s->max_queue_sz, 733 s->link_congs, s->max_queue_sz,
736 s->queue_sz_counts ? 734 s->queue_sz_counts ?
737 (s->accu_queue_sz / s->queue_sz_counts) : 0); 735 (s->accu_queue_sz / s->queue_sz_counts) : 0);
738 736
@@ -766,7 +764,6 @@ int tipc_bclink_set_queue_limits(u32 limit)
766 764
767void tipc_bclink_init(void) 765void tipc_bclink_init(void)
768{ 766{
769 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
770 bcbearer->bearer.media = &bcbearer->media; 767 bcbearer->bearer.media = &bcbearer->media;
771 bcbearer->media.send_msg = tipc_bcbearer_send; 768 bcbearer->media.send_msg = tipc_bcbearer_send;
772 sprintf(bcbearer->media.name, "tipc-broadcast"); 769 sprintf(bcbearer->media.name, "tipc-broadcast");
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 4ec5c80e8a7c..aa62f93a9127 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -279,116 +279,31 @@ void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
279} 279}
280 280
281/* 281/*
282 * bearer_push(): Resolve bearer congestion. Force the waiting 282 * Interrupt enabling new requests after bearer blocking:
283 * links to push out their unsent packets, one packet per link
284 * per iteration, until all packets are gone or congestion reoccurs.
285 * 'tipc_net_lock' is read_locked when this function is called
286 * bearer.lock must be taken before calling
287 * Returns binary true(1) ore false(0)
288 */
289static int bearer_push(struct tipc_bearer *b_ptr)
290{
291 u32 res = 0;
292 struct tipc_link *ln, *tln;
293
294 if (b_ptr->blocked)
295 return 0;
296
297 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
298 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
299 res = tipc_link_push_packet(ln);
300 if (res == PUSH_FAILED)
301 break;
302 if (res == PUSH_FINISHED)
303 list_move_tail(&ln->link_list, &b_ptr->links);
304 }
305 }
306 return list_empty(&b_ptr->cong_links);
307}
308
309void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
310{
311 spin_lock_bh(&b_ptr->lock);
312 bearer_push(b_ptr);
313 spin_unlock_bh(&b_ptr->lock);
314}
315
316
317/*
318 * Interrupt enabling new requests after bearer congestion or blocking:
319 * See bearer_send(). 283 * See bearer_send().
320 */ 284 */
321void tipc_continue(struct tipc_bearer *b_ptr) 285void tipc_continue(struct tipc_bearer *b)
322{ 286{
323 spin_lock_bh(&b_ptr->lock); 287 spin_lock_bh(&b->lock);
324 if (!list_empty(&b_ptr->cong_links)) 288 b->blocked = 0;
325 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr); 289 spin_unlock_bh(&b->lock);
326 b_ptr->blocked = 0;
327 spin_unlock_bh(&b_ptr->lock);
328} 290}
329 291
330/* 292/*
331 * Schedule link for sending of messages after the bearer 293 * tipc_bearer_blocked - determines if bearer is currently blocked
332 * has been deblocked by 'continue()'. This method is called
333 * when somebody tries to send a message via this link while
334 * the bearer is congested. 'tipc_net_lock' is in read_lock here
335 * bearer.lock is busy
336 */ 294 */
337static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, 295int tipc_bearer_blocked(struct tipc_bearer *b)
338 struct tipc_link *l_ptr)
339{ 296{
340 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links); 297 int res;
341}
342
343/*
344 * Schedule link for sending of messages after the bearer
345 * has been deblocked by 'continue()'. This method is called
346 * when somebody tries to send a message via this link while
347 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
348 * bearer.lock is free
349 */
350void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
351{
352 spin_lock_bh(&b_ptr->lock);
353 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
354 spin_unlock_bh(&b_ptr->lock);
355}
356
357 298
358/* 299 spin_lock_bh(&b->lock);
359 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion, 300 res = b->blocked;
360 * and if there is, try to resolve it before returning. 301 spin_unlock_bh(&b->lock);
361 * 'tipc_net_lock' is read_locked when this function is called
362 */
363int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr,
364 struct tipc_link *l_ptr)
365{
366 int res = 1;
367 302
368 if (list_empty(&b_ptr->cong_links))
369 return 1;
370 spin_lock_bh(&b_ptr->lock);
371 if (!bearer_push(b_ptr)) {
372 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
373 res = 0;
374 }
375 spin_unlock_bh(&b_ptr->lock);
376 return res; 303 return res;
377} 304}
378 305
379/** 306/**
380 * tipc_bearer_congested - determines if bearer is currently congested
381 */
382int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr)
383{
384 if (unlikely(b_ptr->blocked))
385 return 1;
386 if (likely(list_empty(&b_ptr->cong_links)))
387 return 0;
388 return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
389}
390
391/**
392 * tipc_enable_bearer - enable bearer with the given name 307 * tipc_enable_bearer - enable bearer with the given name
393 */ 308 */
394int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) 309int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
@@ -489,7 +404,6 @@ restart:
489 b_ptr->net_plane = bearer_id + 'A'; 404 b_ptr->net_plane = bearer_id + 'A';
490 b_ptr->active = 1; 405 b_ptr->active = 1;
491 b_ptr->priority = priority; 406 b_ptr->priority = priority;
492 INIT_LIST_HEAD(&b_ptr->cong_links);
493 INIT_LIST_HEAD(&b_ptr->links); 407 INIT_LIST_HEAD(&b_ptr->links);
494 spin_lock_init(&b_ptr->lock); 408 spin_lock_init(&b_ptr->lock);
495 409
@@ -528,7 +442,6 @@ int tipc_block_bearer(const char *name)
528 pr_info("Blocking bearer <%s>\n", name); 442 pr_info("Blocking bearer <%s>\n", name);
529 spin_lock_bh(&b_ptr->lock); 443 spin_lock_bh(&b_ptr->lock);
530 b_ptr->blocked = 1; 444 b_ptr->blocked = 1;
531 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
532 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 445 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
533 struct tipc_node *n_ptr = l_ptr->owner; 446 struct tipc_node *n_ptr = l_ptr->owner;
534 447
@@ -555,7 +468,6 @@ static void bearer_disable(struct tipc_bearer *b_ptr)
555 spin_lock_bh(&b_ptr->lock); 468 spin_lock_bh(&b_ptr->lock);
556 b_ptr->blocked = 1; 469 b_ptr->blocked = 1;
557 b_ptr->media->disable_bearer(b_ptr); 470 b_ptr->media->disable_bearer(b_ptr);
558 list_splice_init(&b_ptr->cong_links, &b_ptr->links);
559 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) { 471 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
560 tipc_link_delete(l_ptr); 472 tipc_link_delete(l_ptr);
561 } 473 }
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index dd4c2abf08e7..39f1192d04bf 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -120,7 +120,6 @@ struct tipc_media {
120 * @identity: array index of this bearer within TIPC bearer array 120 * @identity: array index of this bearer within TIPC bearer array
121 * @link_req: ptr to (optional) structure making periodic link setup requests 121 * @link_req: ptr to (optional) structure making periodic link setup requests
122 * @links: list of non-congested links associated with bearer 122 * @links: list of non-congested links associated with bearer
123 * @cong_links: list of congested links associated with bearer
124 * @active: non-zero if bearer structure is represents a bearer 123 * @active: non-zero if bearer structure is represents a bearer
125 * @net_plane: network plane ('A' through 'H') currently associated with bearer 124 * @net_plane: network plane ('A' through 'H') currently associated with bearer
126 * @nodes: indicates which nodes in cluster can be reached through bearer 125 * @nodes: indicates which nodes in cluster can be reached through bearer
@@ -143,7 +142,6 @@ struct tipc_bearer {
143 u32 identity; 142 u32 identity;
144 struct tipc_link_req *link_req; 143 struct tipc_link_req *link_req;
145 struct list_head links; 144 struct list_head links;
146 struct list_head cong_links;
147 int active; 145 int active;
148 char net_plane; 146 char net_plane;
149 struct tipc_node_map nodes; 147 struct tipc_node_map nodes;
@@ -185,39 +183,23 @@ struct sk_buff *tipc_media_get_names(void);
185struct sk_buff *tipc_bearer_get_names(void); 183struct sk_buff *tipc_bearer_get_names(void);
186void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest); 184void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest);
187void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest); 185void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest);
188void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
189struct tipc_bearer *tipc_bearer_find(const char *name); 186struct tipc_bearer *tipc_bearer_find(const char *name);
190struct tipc_bearer *tipc_bearer_find_interface(const char *if_name); 187struct tipc_bearer *tipc_bearer_find_interface(const char *if_name);
191struct tipc_media *tipc_media_find(const char *name); 188struct tipc_media *tipc_media_find(const char *name);
192int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, 189int tipc_bearer_blocked(struct tipc_bearer *b_ptr);
193 struct tipc_link *l_ptr);
194int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct tipc_link *l_ptr);
195void tipc_bearer_stop(void); 190void tipc_bearer_stop(void);
196void tipc_bearer_lock_push(struct tipc_bearer *b_ptr);
197
198 191
199/** 192/**
200 * tipc_bearer_send- sends buffer to destination over bearer 193 * tipc_bearer_send- sends buffer to destination over bearer
201 * 194 *
202 * Returns true (1) if successful, or false (0) if unable to send
203 *
204 * IMPORTANT: 195 * IMPORTANT:
205 * The media send routine must not alter the buffer being passed in 196 * The media send routine must not alter the buffer being passed in
206 * as it may be needed for later retransmission! 197 * as it may be needed for later retransmission!
207 *
208 * If the media send routine returns a non-zero value (indicating that
209 * it was unable to send the buffer), it must:
210 * 1) mark the bearer as blocked,
211 * 2) call tipc_continue() once the bearer is able to send again.
212 * Media types that are unable to meet these two critera must ensure their
213 * send routine always returns success -- even if the buffer was not sent --
214 * and let TIPC's link code deal with the undelivered message.
215 */ 198 */
216static inline int tipc_bearer_send(struct tipc_bearer *b_ptr, 199static inline void tipc_bearer_send(struct tipc_bearer *b, struct sk_buff *buf,
217 struct sk_buff *buf,
218 struct tipc_media_addr *dest) 200 struct tipc_media_addr *dest)
219{ 201{
220 return !b_ptr->media->send_msg(buf, b_ptr, dest); 202 b->media->send_msg(buf, b, dest);
221} 203}
222 204
223#endif /* _TIPC_BEARER_H */ 205#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 50eaa403eb6e..1074b9587e81 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -243,7 +243,7 @@ void tipc_disc_recv_msg(struct sk_buff *buf, struct tipc_bearer *b_ptr)
243 if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) { 243 if ((type == DSC_REQ_MSG) && !link_fully_up && !b_ptr->blocked) {
244 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr); 244 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, orig, b_ptr);
245 if (rbuf) { 245 if (rbuf) {
246 b_ptr->media->send_msg(rbuf, b_ptr, &media_addr); 246 tipc_bearer_send(b_ptr, rbuf, &media_addr);
247 kfree_skb(rbuf); 247 kfree_skb(rbuf);
248 } 248 }
249 } 249 }
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a79c755cb417..0cc64800ab93 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -872,17 +872,12 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
872 return link_send_long_buf(l_ptr, buf); 872 return link_send_long_buf(l_ptr, buf);
873 873
874 /* Packet can be queued or sent. */ 874 /* Packet can be queued or sent. */
875 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && 875 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr) &&
876 !link_congested(l_ptr))) { 876 !link_congested(l_ptr))) {
877 link_add_to_outqueue(l_ptr, buf, msg); 877 link_add_to_outqueue(l_ptr, buf, msg);
878 878
879 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) { 879 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
880 l_ptr->unacked_window = 0; 880 l_ptr->unacked_window = 0;
881 } else {
882 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
883 l_ptr->stats.bearer_congs++;
884 l_ptr->next_out = buf;
885 }
886 return dsz; 881 return dsz;
887 } 882 }
888 /* Congestion: can message be bundled ? */ 883 /* Congestion: can message be bundled ? */
@@ -891,10 +886,8 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
891 886
892 /* Try adding message to an existing bundle */ 887 /* Try adding message to an existing bundle */
893 if (l_ptr->next_out && 888 if (l_ptr->next_out &&
894 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) { 889 link_bundle_buf(l_ptr, l_ptr->last_out, buf))
895 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
896 return dsz; 890 return dsz;
897 }
898 891
899 /* Try creating a new bundle */ 892 /* Try creating a new bundle */
900 if (size <= max_packet * 2 / 3) { 893 if (size <= max_packet * 2 / 3) {
@@ -917,7 +910,6 @@ int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf)
917 if (!l_ptr->next_out) 910 if (!l_ptr->next_out)
918 l_ptr->next_out = buf; 911 l_ptr->next_out = buf;
919 link_add_to_outqueue(l_ptr, buf, msg); 912 link_add_to_outqueue(l_ptr, buf, msg);
920 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
921 return dsz; 913 return dsz;
922} 914}
923 915
@@ -1006,16 +998,11 @@ static int link_send_buf_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
1006 998
1007 if (likely(!link_congested(l_ptr))) { 999 if (likely(!link_congested(l_ptr))) {
1008 if (likely(msg_size(msg) <= l_ptr->max_pkt)) { 1000 if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
1009 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) { 1001 if (likely(!tipc_bearer_blocked(l_ptr->b_ptr))) {
1010 link_add_to_outqueue(l_ptr, buf, msg); 1002 link_add_to_outqueue(l_ptr, buf, msg);
1011 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, 1003 tipc_bearer_send(l_ptr->b_ptr, buf,
1012 &l_ptr->media_addr))) { 1004 &l_ptr->media_addr);
1013 l_ptr->unacked_window = 0; 1005 l_ptr->unacked_window = 0;
1014 return res;
1015 }
1016 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1017 l_ptr->stats.bearer_congs++;
1018 l_ptr->next_out = buf;
1019 return res; 1006 return res;
1020 } 1007 }
1021 } else 1008 } else
@@ -1106,7 +1093,7 @@ exit:
1106 1093
1107 /* Exit if link (or bearer) is congested */ 1094 /* Exit if link (or bearer) is congested */
1108 if (link_congested(l_ptr) || 1095 if (link_congested(l_ptr) ||
1109 !list_empty(&l_ptr->b_ptr->cong_links)) { 1096 tipc_bearer_blocked(l_ptr->b_ptr)) {
1110 res = link_schedule_port(l_ptr, 1097 res = link_schedule_port(l_ptr,
1111 sender->ref, res); 1098 sender->ref, res);
1112 goto exit; 1099 goto exit;
@@ -1329,15 +1316,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1329 if (r_q_size && buf) { 1316 if (r_q_size && buf) {
1330 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1317 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1331 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1318 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1332 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1319 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1333 l_ptr->retransm_queue_head = mod(++r_q_head); 1320 l_ptr->retransm_queue_head = mod(++r_q_head);
1334 l_ptr->retransm_queue_size = --r_q_size; 1321 l_ptr->retransm_queue_size = --r_q_size;
1335 l_ptr->stats.retransmitted++; 1322 l_ptr->stats.retransmitted++;
1336 return 0; 1323 return 0;
1337 } else {
1338 l_ptr->stats.bearer_congs++;
1339 return PUSH_FAILED;
1340 }
1341 } 1324 }
1342 1325
1343 /* Send deferred protocol message, if any: */ 1326 /* Send deferred protocol message, if any: */
@@ -1345,15 +1328,11 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1345 if (buf) { 1328 if (buf) {
1346 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1)); 1329 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1347 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in); 1330 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1348 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1331 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1349 l_ptr->unacked_window = 0; 1332 l_ptr->unacked_window = 0;
1350 kfree_skb(buf); 1333 kfree_skb(buf);
1351 l_ptr->proto_msg_queue = NULL; 1334 l_ptr->proto_msg_queue = NULL;
1352 return 0; 1335 return 0;
1353 } else {
1354 l_ptr->stats.bearer_congs++;
1355 return PUSH_FAILED;
1356 }
1357 } 1336 }
1358 1337
1359 /* Send one deferred data message, if send window not full: */ 1338 /* Send one deferred data message, if send window not full: */
@@ -1366,18 +1345,14 @@ u32 tipc_link_push_packet(struct tipc_link *l_ptr)
1366 if (mod(next - first) < l_ptr->queue_limit[0]) { 1345 if (mod(next - first) < l_ptr->queue_limit[0]) {
1367 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1346 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1368 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1347 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1369 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1348 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1370 if (msg_user(msg) == MSG_BUNDLER) 1349 if (msg_user(msg) == MSG_BUNDLER)
1371 msg_set_type(msg, CLOSED_MSG); 1350 msg_set_type(msg, CLOSED_MSG);
1372 l_ptr->next_out = buf->next; 1351 l_ptr->next_out = buf->next;
1373 return 0; 1352 return 0;
1374 } else {
1375 l_ptr->stats.bearer_congs++;
1376 return PUSH_FAILED;
1377 }
1378 } 1353 }
1379 } 1354 }
1380 return PUSH_FINISHED; 1355 return 1;
1381} 1356}
1382 1357
1383/* 1358/*
@@ -1388,15 +1363,12 @@ void tipc_link_push_queue(struct tipc_link *l_ptr)
1388{ 1363{
1389 u32 res; 1364 u32 res;
1390 1365
1391 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) 1366 if (tipc_bearer_blocked(l_ptr->b_ptr))
1392 return; 1367 return;
1393 1368
1394 do { 1369 do {
1395 res = tipc_link_push_packet(l_ptr); 1370 res = tipc_link_push_packet(l_ptr);
1396 } while (!res); 1371 } while (!res);
1397
1398 if (res == PUSH_FAILED)
1399 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1400} 1372}
1401 1373
1402static void link_reset_all(unsigned long addr) 1374static void link_reset_all(unsigned long addr)
@@ -1481,7 +1453,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1481 1453
1482 msg = buf_msg(buf); 1454 msg = buf_msg(buf);
1483 1455
1484 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1456 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1485 if (l_ptr->retransm_queue_size == 0) { 1457 if (l_ptr->retransm_queue_size == 0) {
1486 l_ptr->retransm_queue_head = msg_seqno(msg); 1458 l_ptr->retransm_queue_head = msg_seqno(msg);
1487 l_ptr->retransm_queue_size = retransmits; 1459 l_ptr->retransm_queue_size = retransmits;
@@ -1491,7 +1463,7 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1491 } 1463 }
1492 return; 1464 return;
1493 } else { 1465 } else {
1494 /* Detect repeated retransmit failures on uncongested bearer */ 1466 /* Detect repeated retransmit failures on unblocked bearer */
1495 if (l_ptr->last_retransmitted == msg_seqno(msg)) { 1467 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1496 if (++l_ptr->stale_count > 100) { 1468 if (++l_ptr->stale_count > 100) {
1497 link_retransmit_failure(l_ptr, buf); 1469 link_retransmit_failure(l_ptr, buf);
@@ -1507,17 +1479,10 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *buf,
1507 msg = buf_msg(buf); 1479 msg = buf_msg(buf);
1508 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1480 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1509 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1481 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1510 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) { 1482 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1511 buf = buf->next; 1483 buf = buf->next;
1512 retransmits--; 1484 retransmits--;
1513 l_ptr->stats.retransmitted++; 1485 l_ptr->stats.retransmitted++;
1514 } else {
1515 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1516 l_ptr->stats.bearer_congs++;
1517 l_ptr->retransm_queue_head = buf_seqno(buf);
1518 l_ptr->retransm_queue_size = retransmits;
1519 return;
1520 }
1521 } 1486 }
1522 1487
1523 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0; 1488 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
@@ -1972,21 +1937,13 @@ void tipc_link_send_proto_msg(struct tipc_link *l_ptr, u32 msg_typ,
1972 1937
1973 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1938 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1974 1939
1975 /* Defer message if bearer is already congested */ 1940 /* Defer message if bearer is already blocked */
1976 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) { 1941 if (tipc_bearer_blocked(l_ptr->b_ptr)) {
1977 l_ptr->proto_msg_queue = buf;
1978 return;
1979 }
1980
1981 /* Defer message if attempting to send results in bearer congestion */
1982 if (!tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1983 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1984 l_ptr->proto_msg_queue = buf; 1942 l_ptr->proto_msg_queue = buf;
1985 l_ptr->stats.bearer_congs++;
1986 return; 1943 return;
1987 } 1944 }
1988 1945
1989 /* Discard message if it was sent successfully */ 1946 tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr);
1990 l_ptr->unacked_window = 0; 1947 l_ptr->unacked_window = 0;
1991 kfree_skb(buf); 1948 kfree_skb(buf);
1992} 1949}
@@ -2937,8 +2894,8 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2937 s->sent_nacks, s->sent_acks, s->retransmitted); 2894 s->sent_nacks, s->sent_acks, s->retransmitted);
2938 2895
2939 ret += tipc_snprintf(buf + ret, buf_size - ret, 2896 ret += tipc_snprintf(buf + ret, buf_size - ret,
2940 " Congestion bearer:%u link:%u Send queue" 2897 " Congestion link:%u Send queue"
2941 " max:%u avg:%u\n", s->bearer_congs, s->link_congs, 2898 " max:%u avg:%u\n", s->link_congs,
2942 s->max_queue_sz, s->queue_sz_counts ? 2899 s->max_queue_sz, s->queue_sz_counts ?
2943 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2900 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2944 2901
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 6e921121be06..c048ed1cbd76 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -40,9 +40,6 @@
40#include "msg.h" 40#include "msg.h"
41#include "node.h" 41#include "node.h"
42 42
43#define PUSH_FAILED 1
44#define PUSH_FINISHED 2
45
46/* 43/*
47 * Out-of-range value for link sequence numbers 44 * Out-of-range value for link sequence numbers
48 */ 45 */
@@ -82,7 +79,6 @@ struct tipc_stats {
82 u32 recv_fragmented; 79 u32 recv_fragmented;
83 u32 recv_fragments; 80 u32 recv_fragments;
84 u32 link_congs; /* # port sends blocked by congestion */ 81 u32 link_congs; /* # port sends blocked by congestion */
85 u32 bearer_congs;
86 u32 deferred_recv; 82 u32 deferred_recv;
87 u32 duplicates; 83 u32 duplicates;
88 u32 max_queue_sz; /* send queue size high water mark */ 84 u32 max_queue_sz; /* send queue size high water mark */