diff options
Diffstat (limited to 'net/tipc/link.c')
-rw-r--r-- | net/tipc/link.c | 869 |
1 files changed, 356 insertions, 513 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c index a4cf364316de..a6b30df6ec02 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * net/tipc/link.c: TIPC link code | 2 | * net/tipc/link.c: TIPC link code |
3 | * | 3 | * |
4 | * Copyright (c) 1996-2007, 2012-2014, Ericsson AB | 4 | * Copyright (c) 1996-2007, 2012-2015, Ericsson AB |
5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems |
6 | * All rights reserved. | 6 | * All rights reserved. |
7 | * | 7 | * |
@@ -35,6 +35,7 @@ | |||
35 | */ | 35 | */ |
36 | 36 | ||
37 | #include "core.h" | 37 | #include "core.h" |
38 | #include "subscr.h" | ||
38 | #include "link.h" | 39 | #include "link.h" |
39 | #include "bcast.h" | 40 | #include "bcast.h" |
40 | #include "socket.h" | 41 | #include "socket.h" |
@@ -88,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |||
88 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 89 | #define TIMEOUT_EVT 560817u /* link timer expired */ |
89 | 90 | ||
90 | /* | 91 | /* |
91 | * The following two 'message types' is really just implementation | 92 | * State value stored in 'failover_pkts' |
92 | * data conveniently stored in the message header. | ||
93 | * They must not be considered part of the protocol | ||
94 | */ | 93 | */ |
95 | #define OPEN_MSG 0 | 94 | #define FIRST_FAILOVER 0xffffu |
96 | #define CLOSED_MSG 1 | ||
97 | |||
98 | /* | ||
99 | * State value stored in 'exp_msg_count' | ||
100 | */ | ||
101 | #define START_CHANGEOVER 100000u | ||
102 | 95 | ||
103 | static void link_handle_out_of_seq_msg(struct tipc_link *link, | 96 | static void link_handle_out_of_seq_msg(struct tipc_link *link, |
104 | struct sk_buff *skb); | 97 | struct sk_buff *skb); |
105 | static void tipc_link_proto_rcv(struct tipc_link *link, | 98 | static void tipc_link_proto_rcv(struct tipc_link *link, |
106 | struct sk_buff *skb); | 99 | struct sk_buff *skb); |
107 | static int tipc_link_tunnel_rcv(struct tipc_node *node, | ||
108 | struct sk_buff **skb); | ||
109 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | 100 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); |
110 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 101 | static void link_state_event(struct tipc_link *l_ptr, u32 event); |
111 | static void link_reset_statistics(struct tipc_link *l_ptr); | 102 | static void link_reset_statistics(struct tipc_link *l_ptr); |
@@ -114,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l); | |||
114 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 105 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); |
115 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); | 106 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); |
116 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); | 107 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); |
117 | 108 | static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); | |
118 | /* | 109 | /* |
119 | * Simple link routines | 110 | * Simple link routines |
120 | */ | 111 | */ |
@@ -138,32 +129,11 @@ static void tipc_link_put(struct tipc_link *l_ptr) | |||
138 | kref_put(&l_ptr->ref, tipc_link_release); | 129 | kref_put(&l_ptr->ref, tipc_link_release); |
139 | } | 130 | } |
140 | 131 | ||
141 | static void link_init_max_pkt(struct tipc_link *l_ptr) | 132 | static struct tipc_link *tipc_parallel_link(struct tipc_link *l) |
142 | { | 133 | { |
143 | struct tipc_node *node = l_ptr->owner; | 134 | if (l->owner->active_links[0] != l) |
144 | struct tipc_net *tn = net_generic(node->net, tipc_net_id); | 135 | return l->owner->active_links[0]; |
145 | struct tipc_bearer *b_ptr; | 136 | return l->owner->active_links[1]; |
146 | u32 max_pkt; | ||
147 | |||
148 | rcu_read_lock(); | ||
149 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]); | ||
150 | if (!b_ptr) { | ||
151 | rcu_read_unlock(); | ||
152 | return; | ||
153 | } | ||
154 | max_pkt = (b_ptr->mtu & ~3); | ||
155 | rcu_read_unlock(); | ||
156 | |||
157 | if (max_pkt > MAX_MSG_SIZE) | ||
158 | max_pkt = MAX_MSG_SIZE; | ||
159 | |||
160 | l_ptr->max_pkt_target = max_pkt; | ||
161 | if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) | ||
162 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
163 | else | ||
164 | l_ptr->max_pkt = MAX_PKT_DEFAULT; | ||
165 | |||
166 | l_ptr->max_pkt_probes = 0; | ||
167 | } | 137 | } |
168 | 138 | ||
169 | /* | 139 | /* |
@@ -194,10 +164,10 @@ static void link_timeout(unsigned long data) | |||
194 | tipc_node_lock(l_ptr->owner); | 164 | tipc_node_lock(l_ptr->owner); |
195 | 165 | ||
196 | /* update counters used in statistical profiling of send traffic */ | 166 | /* update counters used in statistical profiling of send traffic */ |
197 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); | 167 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq); |
198 | l_ptr->stats.queue_sz_counts++; | 168 | l_ptr->stats.queue_sz_counts++; |
199 | 169 | ||
200 | skb = skb_peek(&l_ptr->outqueue); | 170 | skb = skb_peek(&l_ptr->transmq); |
201 | if (skb) { | 171 | if (skb) { |
202 | struct tipc_msg *msg = buf_msg(skb); | 172 | struct tipc_msg *msg = buf_msg(skb); |
203 | u32 length = msg_size(msg); | 173 | u32 length = msg_size(msg); |
@@ -229,7 +199,7 @@ static void link_timeout(unsigned long data) | |||
229 | /* do all other link processing performed on a periodic basis */ | 199 | /* do all other link processing performed on a periodic basis */ |
230 | link_state_event(l_ptr, TIMEOUT_EVT); | 200 | link_state_event(l_ptr, TIMEOUT_EVT); |
231 | 201 | ||
232 | if (l_ptr->next_out) | 202 | if (skb_queue_len(&l_ptr->backlogq)) |
233 | tipc_link_push_packets(l_ptr); | 203 | tipc_link_push_packets(l_ptr); |
234 | 204 | ||
235 | tipc_node_unlock(l_ptr->owner); | 205 | tipc_node_unlock(l_ptr->owner); |
@@ -305,16 +275,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
305 | msg_set_session(msg, (tn->random & 0xffff)); | 275 | msg_set_session(msg, (tn->random & 0xffff)); |
306 | msg_set_bearer_id(msg, b_ptr->identity); | 276 | msg_set_bearer_id(msg, b_ptr->identity); |
307 | strcpy((char *)msg_data(msg), if_name); | 277 | strcpy((char *)msg_data(msg), if_name); |
308 | 278 | l_ptr->net_plane = b_ptr->net_plane; | |
279 | l_ptr->advertised_mtu = b_ptr->mtu; | ||
280 | l_ptr->mtu = l_ptr->advertised_mtu; | ||
309 | l_ptr->priority = b_ptr->priority; | 281 | l_ptr->priority = b_ptr->priority; |
310 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 282 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); |
311 | |||
312 | l_ptr->net_plane = b_ptr->net_plane; | ||
313 | link_init_max_pkt(l_ptr); | ||
314 | |||
315 | l_ptr->next_out_no = 1; | 283 | l_ptr->next_out_no = 1; |
316 | __skb_queue_head_init(&l_ptr->outqueue); | 284 | __skb_queue_head_init(&l_ptr->transmq); |
317 | __skb_queue_head_init(&l_ptr->deferred_queue); | 285 | __skb_queue_head_init(&l_ptr->backlogq); |
286 | __skb_queue_head_init(&l_ptr->deferdq); | ||
318 | skb_queue_head_init(&l_ptr->wakeupq); | 287 | skb_queue_head_init(&l_ptr->wakeupq); |
319 | skb_queue_head_init(&l_ptr->inputq); | 288 | skb_queue_head_init(&l_ptr->inputq); |
320 | skb_queue_head_init(&l_ptr->namedq); | 289 | skb_queue_head_init(&l_ptr->namedq); |
@@ -327,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
327 | } | 296 | } |
328 | 297 | ||
329 | /** | 298 | /** |
330 | * link_delete - Conditional deletion of link. | 299 | * tipc_link_delete - Delete a link |
331 | * If timer still running, real delete is done when it expires | 300 | * @l: link to be deleted |
332 | * @link: link to be deleted | ||
333 | */ | 301 | */ |
334 | void tipc_link_delete(struct tipc_link *link) | 302 | void tipc_link_delete(struct tipc_link *l) |
335 | { | 303 | { |
336 | tipc_link_reset_fragments(link); | 304 | tipc_link_reset(l); |
337 | tipc_node_detach_link(link->owner, link); | 305 | if (del_timer(&l->timer)) |
338 | tipc_link_put(link); | 306 | tipc_link_put(l); |
307 | l->flags |= LINK_STOPPED; | ||
308 | /* Delete link now, or when timer is finished: */ | ||
309 | tipc_link_reset_fragments(l); | ||
310 | tipc_node_detach_link(l->owner, l); | ||
311 | tipc_link_put(l); | ||
339 | } | 312 | } |
340 | 313 | ||
341 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | 314 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, |
@@ -349,16 +322,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
349 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 322 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
350 | tipc_node_lock(node); | 323 | tipc_node_lock(node); |
351 | link = node->links[bearer_id]; | 324 | link = node->links[bearer_id]; |
352 | if (!link) { | 325 | if (link) |
353 | tipc_node_unlock(node); | ||
354 | continue; | ||
355 | } | ||
356 | tipc_link_reset(link); | ||
357 | if (del_timer(&link->timer)) | ||
358 | tipc_link_put(link); | ||
359 | link->flags |= LINK_STOPPED; | ||
360 | /* Delete link now, or when failover is finished: */ | ||
361 | if (shutting_down || !tipc_node_is_up(node)) | ||
362 | tipc_link_delete(link); | 326 | tipc_link_delete(link); |
363 | tipc_node_unlock(node); | 327 | tipc_node_unlock(node); |
364 | } | 328 | } |
@@ -366,28 +330,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
366 | } | 330 | } |
367 | 331 | ||
368 | /** | 332 | /** |
369 | * link_schedule_user - schedule user for wakeup after congestion | 333 | * link_schedule_user - schedule a message sender for wakeup after congestion |
370 | * @link: congested link | 334 | * @link: congested link |
371 | * @oport: sending port | 335 | * @list: message that was attempted sent |
372 | * @chain_sz: size of buffer chain that was attempted sent | ||
373 | * @imp: importance of message attempted sent | ||
374 | * Create pseudo msg to send back to user when congestion abates | 336 | * Create pseudo msg to send back to user when congestion abates |
337 | * Only consumes message if there is an error | ||
375 | */ | 338 | */ |
376 | static bool link_schedule_user(struct tipc_link *link, u32 oport, | 339 | static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) |
377 | uint chain_sz, uint imp) | ||
378 | { | 340 | { |
379 | struct sk_buff *buf; | 341 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
342 | int imp = msg_importance(msg); | ||
343 | u32 oport = msg_origport(msg); | ||
344 | u32 addr = link_own_addr(link); | ||
345 | struct sk_buff *skb; | ||
380 | 346 | ||
381 | buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | 347 | /* This really cannot happen... */ |
382 | link_own_addr(link), link_own_addr(link), | 348 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { |
383 | oport, 0, 0); | 349 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); |
384 | if (!buf) | 350 | tipc_link_reset(link); |
385 | return false; | 351 | goto err; |
386 | TIPC_SKB_CB(buf)->chain_sz = chain_sz; | 352 | } |
387 | TIPC_SKB_CB(buf)->chain_imp = imp; | 353 | /* Non-blocking sender: */ |
388 | skb_queue_tail(&link->wakeupq, buf); | 354 | if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) |
355 | return -ELINKCONG; | ||
356 | |||
357 | /* Create and schedule wakeup pseudo message */ | ||
358 | skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | ||
359 | addr, addr, oport, 0, 0); | ||
360 | if (!skb) | ||
361 | goto err; | ||
362 | TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); | ||
363 | TIPC_SKB_CB(skb)->chain_imp = imp; | ||
364 | skb_queue_tail(&link->wakeupq, skb); | ||
389 | link->stats.link_congs++; | 365 | link->stats.link_congs++; |
390 | return true; | 366 | return -ELINKCONG; |
367 | err: | ||
368 | __skb_queue_purge(list); | ||
369 | return -ENOBUFS; | ||
391 | } | 370 | } |
392 | 371 | ||
393 | /** | 372 | /** |
@@ -396,19 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, | |||
396 | * Move a number of waiting users, as permitted by available space in | 375 | * Move a number of waiting users, as permitted by available space in |
397 | * the send queue, from link wait queue to node wait queue for wakeup | 376 | * the send queue, from link wait queue to node wait queue for wakeup |
398 | */ | 377 | */ |
399 | void link_prepare_wakeup(struct tipc_link *link) | 378 | void link_prepare_wakeup(struct tipc_link *l) |
400 | { | 379 | { |
401 | uint pend_qsz = skb_queue_len(&link->outqueue); | 380 | int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; |
381 | int imp, lim; | ||
402 | struct sk_buff *skb, *tmp; | 382 | struct sk_buff *skb, *tmp; |
403 | 383 | ||
404 | skb_queue_walk_safe(&link->wakeupq, skb, tmp) { | 384 | skb_queue_walk_safe(&l->wakeupq, skb, tmp) { |
405 | if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) | 385 | imp = TIPC_SKB_CB(skb)->chain_imp; |
386 | lim = l->window + l->backlog[imp].limit; | ||
387 | pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; | ||
388 | if ((pnd[imp] + l->backlog[imp].len) >= lim) | ||
406 | break; | 389 | break; |
407 | pend_qsz += TIPC_SKB_CB(skb)->chain_sz; | 390 | skb_unlink(skb, &l->wakeupq); |
408 | skb_unlink(skb, &link->wakeupq); | 391 | skb_queue_tail(&l->inputq, skb); |
409 | skb_queue_tail(&link->inputq, skb); | 392 | l->owner->inputq = &l->inputq; |
410 | link->owner->inputq = &link->inputq; | 393 | l->owner->action_flags |= TIPC_MSG_EVT; |
411 | link->owner->action_flags |= TIPC_MSG_EVT; | ||
412 | } | 394 | } |
413 | } | 395 | } |
414 | 396 | ||
@@ -422,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) | |||
422 | l_ptr->reasm_buf = NULL; | 404 | l_ptr->reasm_buf = NULL; |
423 | } | 405 | } |
424 | 406 | ||
407 | static void tipc_link_purge_backlog(struct tipc_link *l) | ||
408 | { | ||
409 | __skb_queue_purge(&l->backlogq); | ||
410 | l->backlog[TIPC_LOW_IMPORTANCE].len = 0; | ||
411 | l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; | ||
412 | l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; | ||
413 | l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; | ||
414 | l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; | ||
415 | } | ||
416 | |||
425 | /** | 417 | /** |
426 | * tipc_link_purge_queues - purge all pkt queues associated with link | 418 | * tipc_link_purge_queues - purge all pkt queues associated with link |
427 | * @l_ptr: pointer to link | 419 | * @l_ptr: pointer to link |
428 | */ | 420 | */ |
429 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | 421 | void tipc_link_purge_queues(struct tipc_link *l_ptr) |
430 | { | 422 | { |
431 | __skb_queue_purge(&l_ptr->deferred_queue); | 423 | __skb_queue_purge(&l_ptr->deferdq); |
432 | __skb_queue_purge(&l_ptr->outqueue); | 424 | __skb_queue_purge(&l_ptr->transmq); |
425 | tipc_link_purge_backlog(l_ptr); | ||
433 | tipc_link_reset_fragments(l_ptr); | 426 | tipc_link_reset_fragments(l_ptr); |
434 | } | 427 | } |
435 | 428 | ||
436 | void tipc_link_reset(struct tipc_link *l_ptr) | 429 | void tipc_link_reset(struct tipc_link *l_ptr) |
437 | { | 430 | { |
438 | u32 prev_state = l_ptr->state; | 431 | u32 prev_state = l_ptr->state; |
439 | u32 checkpoint = l_ptr->next_in_no; | ||
440 | int was_active_link = tipc_link_is_active(l_ptr); | 432 | int was_active_link = tipc_link_is_active(l_ptr); |
441 | struct tipc_node *owner = l_ptr->owner; | 433 | struct tipc_node *owner = l_ptr->owner; |
434 | struct tipc_link *pl = tipc_parallel_link(l_ptr); | ||
442 | 435 | ||
443 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 436 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); |
444 | 437 | ||
445 | /* Link is down, accept any session */ | 438 | /* Link is down, accept any session */ |
446 | l_ptr->peer_session = INVALID_SESSION; | 439 | l_ptr->peer_session = INVALID_SESSION; |
447 | 440 | ||
448 | /* Prepare for max packet size negotiation */ | 441 | /* Prepare for renewed mtu size negotiation */ |
449 | link_init_max_pkt(l_ptr); | 442 | l_ptr->mtu = l_ptr->advertised_mtu; |
450 | 443 | ||
451 | l_ptr->state = RESET_UNKNOWN; | 444 | l_ptr->state = RESET_UNKNOWN; |
452 | 445 | ||
@@ -456,20 +449,26 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
456 | tipc_node_link_down(l_ptr->owner, l_ptr); | 449 | tipc_node_link_down(l_ptr->owner, l_ptr); |
457 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); | 450 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); |
458 | 451 | ||
459 | if (was_active_link && tipc_node_active_links(l_ptr->owner)) { | 452 | if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { |
460 | l_ptr->reset_checkpoint = checkpoint; | 453 | l_ptr->flags |= LINK_FAILINGOVER; |
461 | l_ptr->exp_msg_count = START_CHANGEOVER; | 454 | l_ptr->failover_checkpt = l_ptr->next_in_no; |
455 | pl->failover_pkts = FIRST_FAILOVER; | ||
456 | pl->failover_checkpt = l_ptr->next_in_no; | ||
457 | pl->failover_skb = l_ptr->reasm_buf; | ||
458 | } else { | ||
459 | kfree_skb(l_ptr->reasm_buf); | ||
462 | } | 460 | } |
463 | |||
464 | /* Clean up all queues, except inputq: */ | 461 | /* Clean up all queues, except inputq: */ |
465 | __skb_queue_purge(&l_ptr->outqueue); | 462 | __skb_queue_purge(&l_ptr->transmq); |
466 | __skb_queue_purge(&l_ptr->deferred_queue); | 463 | __skb_queue_purge(&l_ptr->deferdq); |
467 | skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq); | 464 | if (!owner->inputq) |
468 | if (!skb_queue_empty(&l_ptr->inputq)) | 465 | owner->inputq = &l_ptr->inputq; |
466 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | ||
467 | if (!skb_queue_empty(owner->inputq)) | ||
469 | owner->action_flags |= TIPC_MSG_EVT; | 468 | owner->action_flags |= TIPC_MSG_EVT; |
470 | owner->inputq = &l_ptr->inputq; | 469 | tipc_link_purge_backlog(l_ptr); |
471 | l_ptr->next_out = NULL; | 470 | l_ptr->reasm_buf = NULL; |
472 | l_ptr->unacked_window = 0; | 471 | l_ptr->rcv_unacked = 0; |
473 | l_ptr->checkpoint = 1; | 472 | l_ptr->checkpoint = 1; |
474 | l_ptr->next_out_no = 1; | 473 | l_ptr->next_out_no = 1; |
475 | l_ptr->fsm_msg_cnt = 0; | 474 | l_ptr->fsm_msg_cnt = 0; |
@@ -520,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
520 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 519 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) |
521 | return; /* Not yet. */ | 520 | return; /* Not yet. */ |
522 | 521 | ||
523 | /* Check whether changeover is going on */ | 522 | if (l_ptr->flags & LINK_FAILINGOVER) { |
524 | if (l_ptr->exp_msg_count) { | ||
525 | if (event == TIMEOUT_EVT) | 523 | if (event == TIMEOUT_EVT) |
526 | link_set_timer(l_ptr, cont_intv); | 524 | link_set_timer(l_ptr, cont_intv); |
527 | return; | 525 | return; |
@@ -538,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
538 | l_ptr->checkpoint = l_ptr->next_in_no; | 536 | l_ptr->checkpoint = l_ptr->next_in_no; |
539 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 537 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
540 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 538 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
541 | 0, 0, 0, 0, 0); | 539 | 0, 0, 0, 0); |
542 | l_ptr->fsm_msg_cnt++; | ||
543 | } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { | ||
544 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | ||
545 | 1, 0, 0, 0, 0); | ||
546 | l_ptr->fsm_msg_cnt++; | 540 | l_ptr->fsm_msg_cnt++; |
547 | } | 541 | } |
548 | link_set_timer(l_ptr, cont_intv); | 542 | link_set_timer(l_ptr, cont_intv); |
@@ -550,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
550 | } | 544 | } |
551 | l_ptr->state = WORKING_UNKNOWN; | 545 | l_ptr->state = WORKING_UNKNOWN; |
552 | l_ptr->fsm_msg_cnt = 0; | 546 | l_ptr->fsm_msg_cnt = 0; |
553 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 547 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
554 | l_ptr->fsm_msg_cnt++; | 548 | l_ptr->fsm_msg_cnt++; |
555 | link_set_timer(l_ptr, cont_intv / 4); | 549 | link_set_timer(l_ptr, cont_intv / 4); |
556 | break; | 550 | break; |
@@ -561,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
561 | l_ptr->state = RESET_RESET; | 555 | l_ptr->state = RESET_RESET; |
562 | l_ptr->fsm_msg_cnt = 0; | 556 | l_ptr->fsm_msg_cnt = 0; |
563 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 557 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
564 | 0, 0, 0, 0, 0); | 558 | 0, 0, 0, 0); |
565 | l_ptr->fsm_msg_cnt++; | 559 | l_ptr->fsm_msg_cnt++; |
566 | link_set_timer(l_ptr, cont_intv); | 560 | link_set_timer(l_ptr, cont_intv); |
567 | break; | 561 | break; |
@@ -584,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
584 | l_ptr->state = RESET_RESET; | 578 | l_ptr->state = RESET_RESET; |
585 | l_ptr->fsm_msg_cnt = 0; | 579 | l_ptr->fsm_msg_cnt = 0; |
586 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 580 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
587 | 0, 0, 0, 0, 0); | 581 | 0, 0, 0, 0); |
588 | l_ptr->fsm_msg_cnt++; | 582 | l_ptr->fsm_msg_cnt++; |
589 | link_set_timer(l_ptr, cont_intv); | 583 | link_set_timer(l_ptr, cont_intv); |
590 | break; | 584 | break; |
@@ -595,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
595 | l_ptr->checkpoint = l_ptr->next_in_no; | 589 | l_ptr->checkpoint = l_ptr->next_in_no; |
596 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 590 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
598 | 0, 0, 0, 0, 0); | 592 | 0, 0, 0, 0); |
599 | l_ptr->fsm_msg_cnt++; | 593 | l_ptr->fsm_msg_cnt++; |
600 | } | 594 | } |
601 | link_set_timer(l_ptr, cont_intv); | 595 | link_set_timer(l_ptr, cont_intv); |
602 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 596 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { |
603 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
604 | 1, 0, 0, 0, 0); | 598 | 1, 0, 0, 0); |
605 | l_ptr->fsm_msg_cnt++; | 599 | l_ptr->fsm_msg_cnt++; |
606 | link_set_timer(l_ptr, cont_intv / 4); | 600 | link_set_timer(l_ptr, cont_intv / 4); |
607 | } else { /* Link has failed */ | 601 | } else { /* Link has failed */ |
@@ -611,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
611 | l_ptr->state = RESET_UNKNOWN; | 605 | l_ptr->state = RESET_UNKNOWN; |
612 | l_ptr->fsm_msg_cnt = 0; | 606 | l_ptr->fsm_msg_cnt = 0; |
613 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 607 | tipc_link_proto_xmit(l_ptr, RESET_MSG, |
614 | 0, 0, 0, 0, 0); | 608 | 0, 0, 0, 0); |
615 | l_ptr->fsm_msg_cnt++; | 609 | l_ptr->fsm_msg_cnt++; |
616 | link_set_timer(l_ptr, cont_intv); | 610 | link_set_timer(l_ptr, cont_intv); |
617 | } | 611 | } |
@@ -631,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
631 | l_ptr->state = WORKING_WORKING; | 625 | l_ptr->state = WORKING_WORKING; |
632 | l_ptr->fsm_msg_cnt = 0; | 626 | l_ptr->fsm_msg_cnt = 0; |
633 | link_activate(l_ptr); | 627 | link_activate(l_ptr); |
634 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 628 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
635 | l_ptr->fsm_msg_cnt++; | 629 | l_ptr->fsm_msg_cnt++; |
636 | if (l_ptr->owner->working_links == 1) | 630 | if (l_ptr->owner->working_links == 1) |
637 | tipc_link_sync_xmit(l_ptr); | 631 | tipc_link_sync_xmit(l_ptr); |
@@ -641,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
641 | l_ptr->state = RESET_RESET; | 635 | l_ptr->state = RESET_RESET; |
642 | l_ptr->fsm_msg_cnt = 0; | 636 | l_ptr->fsm_msg_cnt = 0; |
643 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 637 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
644 | 1, 0, 0, 0, 0); | 638 | 1, 0, 0, 0); |
645 | l_ptr->fsm_msg_cnt++; | 639 | l_ptr->fsm_msg_cnt++; |
646 | link_set_timer(l_ptr, cont_intv); | 640 | link_set_timer(l_ptr, cont_intv); |
647 | break; | 641 | break; |
@@ -651,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
651 | link_set_timer(l_ptr, cont_intv); | 645 | link_set_timer(l_ptr, cont_intv); |
652 | break; | 646 | break; |
653 | case TIMEOUT_EVT: | 647 | case TIMEOUT_EVT: |
654 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 648 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); |
655 | l_ptr->fsm_msg_cnt++; | 649 | l_ptr->fsm_msg_cnt++; |
656 | link_set_timer(l_ptr, cont_intv); | 650 | link_set_timer(l_ptr, cont_intv); |
657 | break; | 651 | break; |
@@ -669,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
669 | l_ptr->state = WORKING_WORKING; | 663 | l_ptr->state = WORKING_WORKING; |
670 | l_ptr->fsm_msg_cnt = 0; | 664 | l_ptr->fsm_msg_cnt = 0; |
671 | link_activate(l_ptr); | 665 | link_activate(l_ptr); |
672 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 666 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
673 | l_ptr->fsm_msg_cnt++; | 667 | l_ptr->fsm_msg_cnt++; |
674 | if (l_ptr->owner->working_links == 1) | 668 | if (l_ptr->owner->working_links == 1) |
675 | tipc_link_sync_xmit(l_ptr); | 669 | tipc_link_sync_xmit(l_ptr); |
@@ -679,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
679 | break; | 673 | break; |
680 | case TIMEOUT_EVT: | 674 | case TIMEOUT_EVT: |
681 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 675 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
682 | 0, 0, 0, 0, 0); | 676 | 0, 0, 0, 0); |
683 | l_ptr->fsm_msg_cnt++; | 677 | l_ptr->fsm_msg_cnt++; |
684 | link_set_timer(l_ptr, cont_intv); | 678 | link_set_timer(l_ptr, cont_intv); |
685 | break; | 679 | break; |
@@ -692,101 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
692 | } | 686 | } |
693 | } | 687 | } |
694 | 688 | ||
695 | /* tipc_link_cong: determine return value and how to treat the | ||
696 | * sent buffer during link congestion. | ||
697 | * - For plain, errorless user data messages we keep the buffer and | ||
698 | * return -ELINKONG. | ||
699 | * - For all other messages we discard the buffer and return -EHOSTUNREACH | ||
700 | * - For TIPC internal messages we also reset the link | ||
701 | */ | ||
702 | static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) | ||
703 | { | ||
704 | struct sk_buff *skb = skb_peek(list); | ||
705 | struct tipc_msg *msg = buf_msg(skb); | ||
706 | uint imp = tipc_msg_tot_importance(msg); | ||
707 | u32 oport = msg_tot_origport(msg); | ||
708 | |||
709 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | ||
710 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | ||
711 | tipc_link_reset(link); | ||
712 | goto drop; | ||
713 | } | ||
714 | if (unlikely(msg_errcode(msg))) | ||
715 | goto drop; | ||
716 | if (unlikely(msg_reroute_cnt(msg))) | ||
717 | goto drop; | ||
718 | if (TIPC_SKB_CB(skb)->wakeup_pending) | ||
719 | return -ELINKCONG; | ||
720 | if (link_schedule_user(link, oport, skb_queue_len(list), imp)) | ||
721 | return -ELINKCONG; | ||
722 | drop: | ||
723 | __skb_queue_purge(list); | ||
724 | return -EHOSTUNREACH; | ||
725 | } | ||
726 | |||
727 | /** | 689 | /** |
728 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | 690 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked |
729 | * @link: link to use | 691 | * @link: link to use |
730 | * @list: chain of buffers containing message | 692 | * @list: chain of buffers containing message |
731 | * | 693 | * |
732 | * Consumes the buffer chain, except when returning -ELINKCONG | 694 | * Consumes the buffer chain, except when returning -ELINKCONG, |
733 | * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket | 695 | * since the caller then may want to make more send attempts. |
734 | * user data messages) or -EHOSTUNREACH (all other messages/senders) | 696 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS |
735 | * Only the socket functions tipc_send_stream() and tipc_send_packet() need | 697 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted |
736 | * to act on the return value, since they may need to do more send attempts. | ||
737 | */ | 698 | */ |
738 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 699 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, |
739 | struct sk_buff_head *list) | 700 | struct sk_buff_head *list) |
740 | { | 701 | { |
741 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 702 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
742 | uint psz = msg_size(msg); | 703 | unsigned int maxwin = link->window; |
743 | uint sndlim = link->queue_limit[0]; | 704 | unsigned int imp = msg_importance(msg); |
744 | uint imp = tipc_msg_tot_importance(msg); | 705 | uint mtu = link->mtu; |
745 | uint mtu = link->max_pkt; | ||
746 | uint ack = mod(link->next_in_no - 1); | 706 | uint ack = mod(link->next_in_no - 1); |
747 | uint seqno = link->next_out_no; | 707 | uint seqno = link->next_out_no; |
748 | uint bc_last_in = link->owner->bclink.last_in; | 708 | uint bc_last_in = link->owner->bclink.last_in; |
749 | struct tipc_media_addr *addr = &link->media_addr; | 709 | struct tipc_media_addr *addr = &link->media_addr; |
750 | struct sk_buff_head *outqueue = &link->outqueue; | 710 | struct sk_buff_head *transmq = &link->transmq; |
711 | struct sk_buff_head *backlogq = &link->backlogq; | ||
751 | struct sk_buff *skb, *tmp; | 712 | struct sk_buff *skb, *tmp; |
752 | 713 | ||
753 | /* Match queue limits against msg importance: */ | 714 | /* Match backlog limit against msg importance: */ |
754 | if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) | 715 | if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit)) |
755 | return tipc_link_cong(link, list); | 716 | return link_schedule_user(link, list); |
756 | 717 | ||
757 | /* Has valid packet limit been used ? */ | 718 | if (unlikely(msg_size(msg) > mtu)) { |
758 | if (unlikely(psz > mtu)) { | ||
759 | __skb_queue_purge(list); | 719 | __skb_queue_purge(list); |
760 | return -EMSGSIZE; | 720 | return -EMSGSIZE; |
761 | } | 721 | } |
762 | 722 | /* Prepare each packet for sending, and add to relevant queue: */ | |
763 | /* Prepare each packet for sending, and add to outqueue: */ | ||
764 | skb_queue_walk_safe(list, skb, tmp) { | 723 | skb_queue_walk_safe(list, skb, tmp) { |
765 | __skb_unlink(skb, list); | 724 | __skb_unlink(skb, list); |
766 | msg = buf_msg(skb); | 725 | msg = buf_msg(skb); |
767 | msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); | 726 | msg_set_seqno(msg, seqno); |
727 | msg_set_ack(msg, ack); | ||
768 | msg_set_bcast_ack(msg, bc_last_in); | 728 | msg_set_bcast_ack(msg, bc_last_in); |
769 | 729 | ||
770 | if (skb_queue_len(outqueue) < sndlim) { | 730 | if (likely(skb_queue_len(transmq) < maxwin)) { |
771 | __skb_queue_tail(outqueue, skb); | 731 | __skb_queue_tail(transmq, skb); |
772 | tipc_bearer_send(net, link->bearer_id, | 732 | tipc_bearer_send(net, link->bearer_id, skb, addr); |
773 | skb, addr); | 733 | link->rcv_unacked = 0; |
774 | link->next_out = NULL; | 734 | seqno++; |
775 | link->unacked_window = 0; | 735 | continue; |
776 | } else if (tipc_msg_bundle(outqueue, skb, mtu)) { | 736 | } |
737 | if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { | ||
777 | link->stats.sent_bundled++; | 738 | link->stats.sent_bundled++; |
778 | continue; | 739 | continue; |
779 | } else if (tipc_msg_make_bundle(outqueue, skb, mtu, | 740 | } |
780 | link->addr)) { | 741 | if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { |
781 | link->stats.sent_bundled++; | 742 | link->stats.sent_bundled++; |
782 | link->stats.sent_bundles++; | 743 | link->stats.sent_bundles++; |
783 | if (!link->next_out) | 744 | imp = msg_importance(buf_msg(skb)); |
784 | link->next_out = skb_peek_tail(outqueue); | ||
785 | } else { | ||
786 | __skb_queue_tail(outqueue, skb); | ||
787 | if (!link->next_out) | ||
788 | link->next_out = skb; | ||
789 | } | 745 | } |
746 | __skb_queue_tail(backlogq, skb); | ||
747 | link->backlog[imp].len++; | ||
790 | seqno++; | 748 | seqno++; |
791 | } | 749 | } |
792 | link->next_out_no = seqno; | 750 | link->next_out_no = seqno; |
@@ -807,13 +765,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) | |||
807 | return __tipc_link_xmit(link->owner->net, link, &head); | 765 | return __tipc_link_xmit(link->owner->net, link, &head); |
808 | } | 766 | } |
809 | 767 | ||
768 | /* tipc_link_xmit_skb(): send single buffer to destination | ||
769 | * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE | ||
770 | * messages, which will not be rejected | ||
771 | * The only exception is datagram messages rerouted after secondary | ||
772 | * lookup, which are rare and safe to dispose of anyway. | ||
773 | * TODO: Return real return value, and let callers use | ||
774 | * tipc_wait_for_sendpkt() where applicable | ||
775 | */ | ||
810 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | 776 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, |
811 | u32 selector) | 777 | u32 selector) |
812 | { | 778 | { |
813 | struct sk_buff_head head; | 779 | struct sk_buff_head head; |
780 | int rc; | ||
814 | 781 | ||
815 | skb2list(skb, &head); | 782 | skb2list(skb, &head); |
816 | return tipc_link_xmit(net, &head, dnode, selector); | 783 | rc = tipc_link_xmit(net, &head, dnode, selector); |
784 | if (rc == -ELINKCONG) | ||
785 | kfree_skb(skb); | ||
786 | return 0; | ||
817 | } | 787 | } |
818 | 788 | ||
819 | /** | 789 | /** |
@@ -840,12 +810,15 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, | |||
840 | if (link) | 810 | if (link) |
841 | rc = __tipc_link_xmit(net, link, list); | 811 | rc = __tipc_link_xmit(net, link, list); |
842 | tipc_node_unlock(node); | 812 | tipc_node_unlock(node); |
813 | tipc_node_put(node); | ||
843 | } | 814 | } |
844 | if (link) | 815 | if (link) |
845 | return rc; | 816 | return rc; |
846 | 817 | ||
847 | if (likely(in_own_node(net, dnode))) | 818 | if (likely(in_own_node(net, dnode))) { |
848 | return tipc_sk_rcv(net, list); | 819 | tipc_sk_rcv(net, list); |
820 | return 0; | ||
821 | } | ||
849 | 822 | ||
850 | __skb_queue_purge(list); | 823 | __skb_queue_purge(list); |
851 | return rc; | 824 | return rc; |
@@ -892,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) | |||
892 | kfree_skb(buf); | 865 | kfree_skb(buf); |
893 | } | 866 | } |
894 | 867 | ||
895 | struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | ||
896 | const struct sk_buff *skb) | ||
897 | { | ||
898 | if (skb_queue_is_last(list, skb)) | ||
899 | return NULL; | ||
900 | return skb->next; | ||
901 | } | ||
902 | |||
903 | /* | 868 | /* |
904 | * tipc_link_push_packets - push unsent packets to bearer | 869 | * tipc_link_push_packets - push unsent packets to bearer |
905 | * | 870 | * |
@@ -908,30 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | |||
908 | * | 873 | * |
909 | * Called with node locked | 874 | * Called with node locked |
910 | */ | 875 | */ |
911 | void tipc_link_push_packets(struct tipc_link *l_ptr) | 876 | void tipc_link_push_packets(struct tipc_link *link) |
912 | { | 877 | { |
913 | struct sk_buff_head *outqueue = &l_ptr->outqueue; | 878 | struct sk_buff *skb; |
914 | struct sk_buff *skb = l_ptr->next_out; | ||
915 | struct tipc_msg *msg; | 879 | struct tipc_msg *msg; |
916 | u32 next, first; | 880 | unsigned int ack = mod(link->next_in_no - 1); |
917 | 881 | ||
918 | skb_queue_walk_from(outqueue, skb) { | 882 | while (skb_queue_len(&link->transmq) < link->window) { |
919 | msg = buf_msg(skb); | 883 | skb = __skb_dequeue(&link->backlogq); |
920 | next = msg_seqno(msg); | 884 | if (!skb) |
921 | first = buf_seqno(skb_peek(outqueue)); | ||
922 | |||
923 | if (mod(next - first) < l_ptr->queue_limit[0]) { | ||
924 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | ||
925 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | ||
926 | if (msg_user(msg) == MSG_BUNDLER) | ||
927 | TIPC_SKB_CB(skb)->bundling = false; | ||
928 | tipc_bearer_send(l_ptr->owner->net, | ||
929 | l_ptr->bearer_id, skb, | ||
930 | &l_ptr->media_addr); | ||
931 | l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); | ||
932 | } else { | ||
933 | break; | 885 | break; |
934 | } | 886 | msg = buf_msg(skb); |
887 | link->backlog[msg_importance(msg)].len--; | ||
888 | msg_set_ack(msg, ack); | ||
889 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | ||
890 | link->rcv_unacked = 0; | ||
891 | __skb_queue_tail(&link->transmq, skb); | ||
892 | tipc_bearer_send(link->owner->net, link->bearer_id, | ||
893 | skb, &link->media_addr); | ||
935 | } | 894 | } |
936 | } | 895 | } |
937 | 896 | ||
@@ -978,7 +937,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
978 | (unsigned long) TIPC_SKB_CB(buf)->handle); | 937 | (unsigned long) TIPC_SKB_CB(buf)->handle); |
979 | 938 | ||
980 | n_ptr = tipc_bclink_retransmit_to(net); | 939 | n_ptr = tipc_bclink_retransmit_to(net); |
981 | tipc_node_lock(n_ptr); | ||
982 | 940 | ||
983 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 941 | tipc_addr_string_fill(addr_string, n_ptr->addr); |
984 | pr_info("Broadcast link info for %s\n", addr_string); | 942 | pr_info("Broadcast link info for %s\n", addr_string); |
@@ -990,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
990 | n_ptr->bclink.oos_state, | 948 | n_ptr->bclink.oos_state, |
991 | n_ptr->bclink.last_sent); | 949 | n_ptr->bclink.last_sent); |
992 | 950 | ||
993 | tipc_node_unlock(n_ptr); | 951 | n_ptr->action_flags |= TIPC_BCAST_RESET; |
994 | |||
995 | tipc_bclink_set_flags(net, TIPC_BCLINK_RESET); | ||
996 | l_ptr->stale_count = 0; | 952 | l_ptr->stale_count = 0; |
997 | } | 953 | } |
998 | } | 954 | } |
@@ -1018,8 +974,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
1018 | l_ptr->stale_count = 1; | 974 | l_ptr->stale_count = 1; |
1019 | } | 975 | } |
1020 | 976 | ||
1021 | skb_queue_walk_from(&l_ptr->outqueue, skb) { | 977 | skb_queue_walk_from(&l_ptr->transmq, skb) { |
1022 | if (!retransmits || skb == l_ptr->next_out) | 978 | if (!retransmits) |
1023 | break; | 979 | break; |
1024 | msg = buf_msg(skb); | 980 | msg = buf_msg(skb); |
1025 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 981 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
@@ -1031,72 +987,43 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
1031 | } | 987 | } |
1032 | } | 988 | } |
1033 | 989 | ||
1034 | static void link_retrieve_defq(struct tipc_link *link, | 990 | /* link_synch(): check if all packets arrived before the synch |
1035 | struct sk_buff_head *list) | 991 | * point have been consumed |
1036 | { | 992 | * Returns true if the parallel links are synched, otherwise false |
1037 | u32 seq_no; | ||
1038 | |||
1039 | if (skb_queue_empty(&link->deferred_queue)) | ||
1040 | return; | ||
1041 | |||
1042 | seq_no = buf_seqno(skb_peek(&link->deferred_queue)); | ||
1043 | if (seq_no == mod(link->next_in_no)) | ||
1044 | skb_queue_splice_tail_init(&link->deferred_queue, list); | ||
1045 | } | ||
1046 | |||
1047 | /** | ||
1048 | * link_recv_buf_validate - validate basic format of received message | ||
1049 | * | ||
1050 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
1051 | * as much data as the header indicates it should. The routine also ensures | ||
1052 | * that the entire message header is stored in the main fragment of the message | ||
1053 | * buffer, to simplify future access to message header fields. | ||
1054 | * | ||
1055 | * Note: Having extra info present in the message header or data areas is OK. | ||
1056 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
1057 | * introduced by a later release of the protocol. | ||
1058 | */ | 993 | */ |
1059 | static int link_recv_buf_validate(struct sk_buff *buf) | 994 | static bool link_synch(struct tipc_link *l) |
1060 | { | 995 | { |
1061 | static u32 min_data_hdr_size[8] = { | 996 | unsigned int post_synch; |
1062 | SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, | 997 | struct tipc_link *pl; |
1063 | MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE | ||
1064 | }; | ||
1065 | 998 | ||
1066 | struct tipc_msg *msg; | 999 | pl = tipc_parallel_link(l); |
1067 | u32 tipc_hdr[2]; | 1000 | if (pl == l) |
1068 | u32 size; | 1001 | goto synched; |
1069 | u32 hdr_size; | ||
1070 | u32 min_hdr_size; | ||
1071 | 1002 | ||
1072 | /* If this packet comes from the defer queue, the skb has already | 1003 | /* Was last pre-synch packet added to input queue ? */ |
1073 | * been validated | 1004 | if (less_eq(pl->next_in_no, l->synch_point)) |
1074 | */ | 1005 | return false; |
1075 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
1076 | return 1; | ||
1077 | |||
1078 | if (unlikely(buf->len < MIN_H_SIZE)) | ||
1079 | return 0; | ||
1080 | |||
1081 | msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); | ||
1082 | if (msg == NULL) | ||
1083 | return 0; | ||
1084 | 1006 | ||
1085 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | 1007 | /* Is it still in the input queue ? */ |
1086 | return 0; | 1008 | post_synch = mod(pl->next_in_no - l->synch_point) - 1; |
1009 | if (skb_queue_len(&pl->inputq) > post_synch) | ||
1010 | return false; | ||
1011 | synched: | ||
1012 | l->flags &= ~LINK_SYNCHING; | ||
1013 | return true; | ||
1014 | } | ||
1087 | 1015 | ||
1088 | size = msg_size(msg); | 1016 | static void link_retrieve_defq(struct tipc_link *link, |
1089 | hdr_size = msg_hdr_sz(msg); | 1017 | struct sk_buff_head *list) |
1090 | min_hdr_size = msg_isdata(msg) ? | 1018 | { |
1091 | min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; | 1019 | u32 seq_no; |
1092 | 1020 | ||
1093 | if (unlikely((hdr_size < min_hdr_size) || | 1021 | if (skb_queue_empty(&link->deferdq)) |
1094 | (size < hdr_size) || | 1022 | return; |
1095 | (buf->len < size) || | ||
1096 | (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) | ||
1097 | return 0; | ||
1098 | 1023 | ||
1099 | return pskb_may_pull(buf, hdr_size); | 1024 | seq_no = buf_seqno(skb_peek(&link->deferdq)); |
1025 | if (seq_no == mod(link->next_in_no)) | ||
1026 | skb_queue_splice_tail_init(&link->deferdq, list); | ||
1100 | } | 1027 | } |
1101 | 1028 | ||
1102 | /** | 1029 | /** |
@@ -1124,16 +1051,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1124 | 1051 | ||
1125 | while ((skb = __skb_dequeue(&head))) { | 1052 | while ((skb = __skb_dequeue(&head))) { |
1126 | /* Ensure message is well-formed */ | 1053 | /* Ensure message is well-formed */ |
1127 | if (unlikely(!link_recv_buf_validate(skb))) | 1054 | if (unlikely(!tipc_msg_validate(skb))) |
1128 | goto discard; | ||
1129 | |||
1130 | /* Ensure message data is a single contiguous unit */ | ||
1131 | if (unlikely(skb_linearize(skb))) | ||
1132 | goto discard; | 1055 | goto discard; |
1133 | 1056 | ||
1134 | /* Handle arrival of a non-unicast link message */ | 1057 | /* Handle arrival of a non-unicast link message */ |
1135 | msg = buf_msg(skb); | 1058 | msg = buf_msg(skb); |
1136 | |||
1137 | if (unlikely(msg_non_seq(msg))) { | 1059 | if (unlikely(msg_non_seq(msg))) { |
1138 | if (msg_user(msg) == LINK_CONFIG) | 1060 | if (msg_user(msg) == LINK_CONFIG) |
1139 | tipc_disc_rcv(net, skb, b_ptr); | 1061 | tipc_disc_rcv(net, skb, b_ptr); |
@@ -1151,8 +1073,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1151 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); | 1073 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); |
1152 | if (unlikely(!n_ptr)) | 1074 | if (unlikely(!n_ptr)) |
1153 | goto discard; | 1075 | goto discard; |
1154 | tipc_node_lock(n_ptr); | ||
1155 | 1076 | ||
1077 | tipc_node_lock(n_ptr); | ||
1156 | /* Locate unicast link endpoint that should handle message */ | 1078 | /* Locate unicast link endpoint that should handle message */ |
1157 | l_ptr = n_ptr->links[b_ptr->identity]; | 1079 | l_ptr = n_ptr->links[b_ptr->identity]; |
1158 | if (unlikely(!l_ptr)) | 1080 | if (unlikely(!l_ptr)) |
@@ -1174,21 +1096,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1174 | ackd = msg_ack(msg); | 1096 | ackd = msg_ack(msg); |
1175 | 1097 | ||
1176 | /* Release acked messages */ | 1098 | /* Release acked messages */ |
1177 | if (n_ptr->bclink.recv_permitted) | 1099 | if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) |
1178 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 1100 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); |
1179 | 1101 | ||
1180 | released = 0; | 1102 | released = 0; |
1181 | skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { | 1103 | skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) { |
1182 | if (skb1 == l_ptr->next_out || | 1104 | if (more(buf_seqno(skb1), ackd)) |
1183 | more(buf_seqno(skb1), ackd)) | ||
1184 | break; | 1105 | break; |
1185 | __skb_unlink(skb1, &l_ptr->outqueue); | 1106 | __skb_unlink(skb1, &l_ptr->transmq); |
1186 | kfree_skb(skb1); | 1107 | kfree_skb(skb1); |
1187 | released = 1; | 1108 | released = 1; |
1188 | } | 1109 | } |
1189 | 1110 | ||
1190 | /* Try sending any messages link endpoint has pending */ | 1111 | /* Try sending any messages link endpoint has pending */ |
1191 | if (unlikely(l_ptr->next_out)) | 1112 | if (unlikely(skb_queue_len(&l_ptr->backlogq))) |
1192 | tipc_link_push_packets(l_ptr); | 1113 | tipc_link_push_packets(l_ptr); |
1193 | 1114 | ||
1194 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) | 1115 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) |
@@ -1222,18 +1143,26 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
1222 | skb = NULL; | 1143 | skb = NULL; |
1223 | goto unlock; | 1144 | goto unlock; |
1224 | } | 1145 | } |
1146 | /* Synchronize with parallel link if applicable */ | ||
1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { | ||
1148 | link_handle_out_of_seq_msg(l_ptr, skb); | ||
1149 | if (link_synch(l_ptr)) | ||
1150 | link_retrieve_defq(l_ptr, &head); | ||
1151 | skb = NULL; | ||
1152 | goto unlock; | ||
1153 | } | ||
1225 | l_ptr->next_in_no++; | 1154 | l_ptr->next_in_no++; |
1226 | if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) | 1155 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) |
1227 | link_retrieve_defq(l_ptr, &head); | 1156 | link_retrieve_defq(l_ptr, &head); |
1228 | 1157 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { | |
1229 | if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { | ||
1230 | l_ptr->stats.sent_acks++; | 1158 | l_ptr->stats.sent_acks++; |
1231 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1159 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1232 | } | 1160 | } |
1233 | tipc_link_input(l_ptr, skb); | 1161 | tipc_link_input(l_ptr, skb); |
1234 | skb = NULL; | 1162 | skb = NULL; |
1235 | unlock: | 1163 | unlock: |
1236 | tipc_node_unlock(n_ptr); | 1164 | tipc_node_unlock(n_ptr); |
1165 | tipc_node_put(n_ptr); | ||
1237 | discard: | 1166 | discard: |
1238 | if (unlikely(skb)) | 1167 | if (unlikely(skb)) |
1239 | kfree_skb(skb); | 1168 | kfree_skb(skb); |
@@ -1270,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb) | |||
1270 | node->action_flags |= TIPC_NAMED_MSG_EVT; | 1199 | node->action_flags |= TIPC_NAMED_MSG_EVT; |
1271 | return true; | 1200 | return true; |
1272 | case MSG_BUNDLER: | 1201 | case MSG_BUNDLER: |
1273 | case CHANGEOVER_PROTOCOL: | 1202 | case TUNNEL_PROTOCOL: |
1274 | case MSG_FRAGMENTER: | 1203 | case MSG_FRAGMENTER: |
1275 | case BCAST_PROTOCOL: | 1204 | case BCAST_PROTOCOL: |
1276 | return false; | 1205 | return false; |
@@ -1297,8 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb) | |||
1297 | return; | 1226 | return; |
1298 | 1227 | ||
1299 | switch (msg_user(msg)) { | 1228 | switch (msg_user(msg)) { |
1300 | case CHANGEOVER_PROTOCOL: | 1229 | case TUNNEL_PROTOCOL: |
1301 | if (!tipc_link_tunnel_rcv(node, &skb)) | 1230 | if (msg_dup(msg)) { |
1231 | link->flags |= LINK_SYNCHING; | ||
1232 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); | ||
1233 | kfree_skb(skb); | ||
1234 | break; | ||
1235 | } | ||
1236 | if (!tipc_link_failover_rcv(link, &skb)) | ||
1302 | break; | 1237 | break; |
1303 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | 1238 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { |
1304 | tipc_data_input(link, skb); | 1239 | tipc_data_input(link, skb); |
@@ -1393,11 +1328,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1393 | return; | 1328 | return; |
1394 | } | 1329 | } |
1395 | 1330 | ||
1396 | if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { | 1331 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { |
1397 | l_ptr->stats.deferred_recv++; | 1332 | l_ptr->stats.deferred_recv++; |
1398 | TIPC_SKB_CB(buf)->deferred = true; | 1333 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) |
1399 | if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) | 1334 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
1400 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | ||
1401 | } else { | 1335 | } else { |
1402 | l_ptr->stats.duplicates++; | 1336 | l_ptr->stats.duplicates++; |
1403 | } | 1337 | } |
@@ -1407,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
1407 | * Send protocol message to the other endpoint. | 1341 | * Send protocol message to the other endpoint. |
1408 | */ | 1342 | */ |
1409 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 1343 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, |
1410 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) | 1344 | u32 gap, u32 tolerance, u32 priority) |
1411 | { | 1345 | { |
1412 | struct sk_buff *buf = NULL; | 1346 | struct sk_buff *buf = NULL; |
1413 | struct tipc_msg *msg = l_ptr->pmsg; | 1347 | struct tipc_msg *msg = l_ptr->pmsg; |
1414 | u32 msg_size = sizeof(l_ptr->proto_msg); | 1348 | u32 msg_size = sizeof(l_ptr->proto_msg); |
1415 | int r_flag; | 1349 | int r_flag; |
1416 | 1350 | ||
1417 | /* Don't send protocol message during link changeover */ | 1351 | /* Don't send protocol message during link failover */ |
1418 | if (l_ptr->exp_msg_count) | 1352 | if (l_ptr->flags & LINK_FAILINGOVER) |
1419 | return; | 1353 | return; |
1420 | 1354 | ||
1421 | /* Abort non-RESET send if communication with node is prohibited */ | 1355 | /* Abort non-RESET send if communication with node is prohibited */ |
@@ -1433,11 +1367,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1433 | 1367 | ||
1434 | if (!tipc_link_is_up(l_ptr)) | 1368 | if (!tipc_link_is_up(l_ptr)) |
1435 | return; | 1369 | return; |
1436 | if (l_ptr->next_out) | 1370 | if (skb_queue_len(&l_ptr->backlogq)) |
1437 | next_sent = buf_seqno(l_ptr->next_out); | 1371 | next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); |
1438 | msg_set_next_sent(msg, next_sent); | 1372 | msg_set_next_sent(msg, next_sent); |
1439 | if (!skb_queue_empty(&l_ptr->deferred_queue)) { | 1373 | if (!skb_queue_empty(&l_ptr->deferdq)) { |
1440 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); | 1374 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); |
1441 | gap = mod(rec - mod(l_ptr->next_in_no)); | 1375 | gap = mod(rec - mod(l_ptr->next_in_no)); |
1442 | } | 1376 | } |
1443 | msg_set_seq_gap(msg, gap); | 1377 | msg_set_seq_gap(msg, gap); |
@@ -1445,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1445 | l_ptr->stats.sent_nacks++; | 1379 | l_ptr->stats.sent_nacks++; |
1446 | msg_set_link_tolerance(msg, tolerance); | 1380 | msg_set_link_tolerance(msg, tolerance); |
1447 | msg_set_linkprio(msg, priority); | 1381 | msg_set_linkprio(msg, priority); |
1448 | msg_set_max_pkt(msg, ack_mtu); | 1382 | msg_set_max_pkt(msg, l_ptr->mtu); |
1449 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1383 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
1450 | msg_set_probe(msg, probe_msg != 0); | 1384 | msg_set_probe(msg, probe_msg != 0); |
1451 | if (probe_msg) { | 1385 | if (probe_msg) |
1452 | u32 mtu = l_ptr->max_pkt; | ||
1453 | |||
1454 | if ((mtu < l_ptr->max_pkt_target) && | ||
1455 | link_working_working(l_ptr) && | ||
1456 | l_ptr->fsm_msg_cnt) { | ||
1457 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1458 | if (l_ptr->max_pkt_probes == 10) { | ||
1459 | l_ptr->max_pkt_target = (msg_size - 4); | ||
1460 | l_ptr->max_pkt_probes = 0; | ||
1461 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
1462 | } | ||
1463 | l_ptr->max_pkt_probes++; | ||
1464 | } | ||
1465 | |||
1466 | l_ptr->stats.sent_probes++; | 1386 | l_ptr->stats.sent_probes++; |
1467 | } | ||
1468 | l_ptr->stats.sent_states++; | 1387 | l_ptr->stats.sent_states++; |
1469 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 1388 | } else { /* RESET_MSG or ACTIVATE_MSG */ |
1470 | msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); | 1389 | msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); |
1471 | msg_set_seq_gap(msg, 0); | 1390 | msg_set_seq_gap(msg, 0); |
1472 | msg_set_next_sent(msg, 1); | 1391 | msg_set_next_sent(msg, 1); |
1473 | msg_set_probe(msg, 0); | 1392 | msg_set_probe(msg, 0); |
1474 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 1393 | msg_set_link_tolerance(msg, l_ptr->tolerance); |
1475 | msg_set_linkprio(msg, l_ptr->priority); | 1394 | msg_set_linkprio(msg, l_ptr->priority); |
1476 | msg_set_max_pkt(msg, l_ptr->max_pkt_target); | 1395 | msg_set_max_pkt(msg, l_ptr->advertised_mtu); |
1477 | } | 1396 | } |
1478 | 1397 | ||
1479 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 1398 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); |
@@ -1489,10 +1408,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
1489 | 1408 | ||
1490 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 1409 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); |
1491 | buf->priority = TC_PRIO_CONTROL; | 1410 | buf->priority = TC_PRIO_CONTROL; |
1492 | |||
1493 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, | 1411 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, |
1494 | &l_ptr->media_addr); | 1412 | &l_ptr->media_addr); |
1495 | l_ptr->unacked_window = 0; | 1413 | l_ptr->rcv_unacked = 0; |
1496 | kfree_skb(buf); | 1414 | kfree_skb(buf); |
1497 | } | 1415 | } |
1498 | 1416 | ||
@@ -1505,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1505 | struct sk_buff *buf) | 1423 | struct sk_buff *buf) |
1506 | { | 1424 | { |
1507 | u32 rec_gap = 0; | 1425 | u32 rec_gap = 0; |
1508 | u32 max_pkt_info; | ||
1509 | u32 max_pkt_ack; | ||
1510 | u32 msg_tol; | 1426 | u32 msg_tol; |
1511 | struct tipc_msg *msg = buf_msg(buf); | 1427 | struct tipc_msg *msg = buf_msg(buf); |
1512 | 1428 | ||
1513 | /* Discard protocol message during link changeover */ | 1429 | if (l_ptr->flags & LINK_FAILINGOVER) |
1514 | if (l_ptr->exp_msg_count) | ||
1515 | goto exit; | 1430 | goto exit; |
1516 | 1431 | ||
1517 | if (l_ptr->net_plane != msg_net_plane(msg)) | 1432 | if (l_ptr->net_plane != msg_net_plane(msg)) |
@@ -1550,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1550 | if (msg_linkprio(msg) > l_ptr->priority) | 1465 | if (msg_linkprio(msg) > l_ptr->priority) |
1551 | l_ptr->priority = msg_linkprio(msg); | 1466 | l_ptr->priority = msg_linkprio(msg); |
1552 | 1467 | ||
1553 | max_pkt_info = msg_max_pkt(msg); | 1468 | if (l_ptr->mtu > msg_max_pkt(msg)) |
1554 | if (max_pkt_info) { | 1469 | l_ptr->mtu = msg_max_pkt(msg); |
1555 | if (max_pkt_info < l_ptr->max_pkt_target) | ||
1556 | l_ptr->max_pkt_target = max_pkt_info; | ||
1557 | if (l_ptr->max_pkt > l_ptr->max_pkt_target) | ||
1558 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1559 | } else { | ||
1560 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
1561 | } | ||
1562 | 1470 | ||
1563 | /* Synchronize broadcast link info, if not done previously */ | 1471 | /* Synchronize broadcast link info, if not done previously */ |
1564 | if (!tipc_node_is_up(l_ptr->owner)) { | 1472 | if (!tipc_node_is_up(l_ptr->owner)) { |
@@ -1603,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1603 | mod(l_ptr->next_in_no)); | 1511 | mod(l_ptr->next_in_no)); |
1604 | } | 1512 | } |
1605 | 1513 | ||
1606 | max_pkt_ack = msg_max_pkt(msg); | 1514 | if (msg_probe(msg)) |
1607 | if (max_pkt_ack > l_ptr->max_pkt) { | ||
1608 | l_ptr->max_pkt = max_pkt_ack; | ||
1609 | l_ptr->max_pkt_probes = 0; | ||
1610 | } | ||
1611 | |||
1612 | max_pkt_ack = 0; | ||
1613 | if (msg_probe(msg)) { | ||
1614 | l_ptr->stats.recv_probes++; | 1515 | l_ptr->stats.recv_probes++; |
1615 | if (msg_size(msg) > sizeof(l_ptr->proto_msg)) | ||
1616 | max_pkt_ack = msg_size(msg); | ||
1617 | } | ||
1618 | 1516 | ||
1619 | /* Protocol message before retransmits, reduce loss risk */ | 1517 | /* Protocol message before retransmits, reduce loss risk */ |
1620 | if (l_ptr->owner->bclink.recv_permitted) | 1518 | if (l_ptr->owner->bclink.recv_permitted) |
@@ -1622,12 +1520,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
1622 | msg_last_bcast(msg)); | 1520 | msg_last_bcast(msg)); |
1623 | 1521 | ||
1624 | if (rec_gap || (msg_probe(msg))) { | 1522 | if (rec_gap || (msg_probe(msg))) { |
1625 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, | 1523 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, |
1626 | 0, max_pkt_ack); | 1524 | rec_gap, 0, 0); |
1627 | } | 1525 | } |
1628 | if (msg_seq_gap(msg)) { | 1526 | if (msg_seq_gap(msg)) { |
1629 | l_ptr->stats.recv_nacks++; | 1527 | l_ptr->stats.recv_nacks++; |
1630 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), | 1528 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq), |
1631 | msg_seq_gap(msg)); | 1529 | msg_seq_gap(msg)); |
1632 | } | 1530 | } |
1633 | break; | 1531 | break; |
@@ -1674,7 +1572,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, | |||
1674 | */ | 1572 | */ |
1675 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | 1573 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) |
1676 | { | 1574 | { |
1677 | u32 msgcount = skb_queue_len(&l_ptr->outqueue); | 1575 | int msgcount; |
1678 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; | 1576 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; |
1679 | struct tipc_msg tunnel_hdr; | 1577 | struct tipc_msg tunnel_hdr; |
1680 | struct sk_buff *skb; | 1578 | struct sk_buff *skb; |
@@ -1683,12 +1581,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1683 | if (!tunnel) | 1581 | if (!tunnel) |
1684 | return; | 1582 | return; |
1685 | 1583 | ||
1686 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1584 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, |
1687 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 1585 | FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); |
1586 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); | ||
1587 | tipc_link_purge_backlog(l_ptr); | ||
1588 | msgcount = skb_queue_len(&l_ptr->transmq); | ||
1688 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1589 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
1689 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 1590 | msg_set_msgcnt(&tunnel_hdr, msgcount); |
1690 | 1591 | ||
1691 | if (skb_queue_empty(&l_ptr->outqueue)) { | 1592 | if (skb_queue_empty(&l_ptr->transmq)) { |
1692 | skb = tipc_buf_acquire(INT_H_SIZE); | 1593 | skb = tipc_buf_acquire(INT_H_SIZE); |
1693 | if (skb) { | 1594 | if (skb) { |
1694 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); | 1595 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); |
@@ -1704,7 +1605,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1704 | split_bundles = (l_ptr->owner->active_links[0] != | 1605 | split_bundles = (l_ptr->owner->active_links[0] != |
1705 | l_ptr->owner->active_links[1]); | 1606 | l_ptr->owner->active_links[1]); |
1706 | 1607 | ||
1707 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1608 | skb_queue_walk(&l_ptr->transmq, skb) { |
1708 | struct tipc_msg *msg = buf_msg(skb); | 1609 | struct tipc_msg *msg = buf_msg(skb); |
1709 | 1610 | ||
1710 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 1611 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { |
@@ -1735,157 +1636,105 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
1735 | * and sequence order is preserved per sender/receiver socket pair. | 1636 | * and sequence order is preserved per sender/receiver socket pair. |
1736 | * Owner node is locked. | 1637 | * Owner node is locked. |
1737 | */ | 1638 | */ |
1738 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, | 1639 | void tipc_link_dup_queue_xmit(struct tipc_link *link, |
1739 | struct tipc_link *tunnel) | 1640 | struct tipc_link *tnl) |
1740 | { | 1641 | { |
1741 | struct sk_buff *skb; | 1642 | struct sk_buff *skb; |
1742 | struct tipc_msg tunnel_hdr; | 1643 | struct tipc_msg tnl_hdr; |
1743 | 1644 | struct sk_buff_head *queue = &link->transmq; | |
1744 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1645 | int mcnt; |
1745 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); | 1646 | |
1746 | msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); | 1647 | tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, |
1747 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1648 | SYNCH_MSG, INT_H_SIZE, link->addr); |
1748 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1649 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); |
1650 | msg_set_msgcnt(&tnl_hdr, mcnt); | ||
1651 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); | ||
1652 | |||
1653 | tunnel_queue: | ||
1654 | skb_queue_walk(queue, skb) { | ||
1749 | struct sk_buff *outskb; | 1655 | struct sk_buff *outskb; |
1750 | struct tipc_msg *msg = buf_msg(skb); | 1656 | struct tipc_msg *msg = buf_msg(skb); |
1751 | u32 length = msg_size(msg); | 1657 | u32 len = msg_size(msg); |
1752 | 1658 | ||
1753 | if (msg_user(msg) == MSG_BUNDLER) | 1659 | msg_set_ack(msg, mod(link->next_in_no - 1)); |
1754 | msg_set_type(msg, CLOSED_MSG); | 1660 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); |
1755 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ | 1661 | msg_set_size(&tnl_hdr, len + INT_H_SIZE); |
1756 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1662 | outskb = tipc_buf_acquire(len + INT_H_SIZE); |
1757 | msg_set_size(&tunnel_hdr, length + INT_H_SIZE); | ||
1758 | outskb = tipc_buf_acquire(length + INT_H_SIZE); | ||
1759 | if (outskb == NULL) { | 1663 | if (outskb == NULL) { |
1760 | pr_warn("%sunable to send duplicate msg\n", | 1664 | pr_warn("%sunable to send duplicate msg\n", |
1761 | link_co_err); | 1665 | link_co_err); |
1762 | return; | 1666 | return; |
1763 | } | 1667 | } |
1764 | skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); | 1668 | skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE); |
1765 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, | 1669 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, |
1766 | length); | 1670 | skb->data, len); |
1767 | __tipc_link_xmit_skb(tunnel, outskb); | 1671 | __tipc_link_xmit_skb(tnl, outskb); |
1768 | if (!tipc_link_is_up(l_ptr)) | 1672 | if (!tipc_link_is_up(link)) |
1769 | return; | 1673 | return; |
1770 | } | 1674 | } |
1771 | } | 1675 | if (queue == &link->backlogq) |
1772 | |||
1773 | /** | ||
1774 | * buf_extract - extracts embedded TIPC message from another message | ||
1775 | * @skb: encapsulating message buffer | ||
1776 | * @from_pos: offset to extract from | ||
1777 | * | ||
1778 | * Returns a new message buffer containing an embedded message. The | ||
1779 | * encapsulating buffer is left unchanged. | ||
1780 | */ | ||
1781 | static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) | ||
1782 | { | ||
1783 | struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); | ||
1784 | u32 size = msg_size(msg); | ||
1785 | struct sk_buff *eb; | ||
1786 | |||
1787 | eb = tipc_buf_acquire(size); | ||
1788 | if (eb) | ||
1789 | skb_copy_to_linear_data(eb, msg, size); | ||
1790 | return eb; | ||
1791 | } | ||
1792 | |||
1793 | /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. | ||
1794 | * Owner node is locked. | ||
1795 | */ | ||
1796 | static void tipc_link_dup_rcv(struct tipc_link *l_ptr, | ||
1797 | struct sk_buff *t_buf) | ||
1798 | { | ||
1799 | struct sk_buff *buf; | ||
1800 | |||
1801 | if (!tipc_link_is_up(l_ptr)) | ||
1802 | return; | 1676 | return; |
1803 | 1677 | queue = &link->backlogq; | |
1804 | buf = buf_extract(t_buf, INT_H_SIZE); | 1678 | goto tunnel_queue; |
1805 | if (buf == NULL) { | ||
1806 | pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); | ||
1807 | return; | ||
1808 | } | ||
1809 | |||
1810 | /* Add buffer to deferred queue, if applicable: */ | ||
1811 | link_handle_out_of_seq_msg(l_ptr, buf); | ||
1812 | } | 1679 | } |
1813 | 1680 | ||
1814 | /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet | 1681 | /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet |
1815 | * Owner node is locked. | 1682 | * Owner node is locked. |
1816 | */ | 1683 | */ |
1817 | static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, | 1684 | static bool tipc_link_failover_rcv(struct tipc_link *link, |
1818 | struct sk_buff *t_buf) | 1685 | struct sk_buff **skb) |
1819 | { | 1686 | { |
1820 | struct tipc_msg *t_msg = buf_msg(t_buf); | 1687 | struct tipc_msg *msg = buf_msg(*skb); |
1821 | struct sk_buff *buf = NULL; | 1688 | struct sk_buff *iskb = NULL; |
1822 | struct tipc_msg *msg; | 1689 | struct tipc_link *pl = NULL; |
1823 | 1690 | int bearer_id = msg_bearer_id(msg); | |
1824 | if (tipc_link_is_up(l_ptr)) | 1691 | int pos = 0; |
1825 | tipc_link_reset(l_ptr); | ||
1826 | |||
1827 | /* First failover packet? */ | ||
1828 | if (l_ptr->exp_msg_count == START_CHANGEOVER) | ||
1829 | l_ptr->exp_msg_count = msg_msgcnt(t_msg); | ||
1830 | |||
1831 | /* Should there be an inner packet? */ | ||
1832 | if (l_ptr->exp_msg_count) { | ||
1833 | l_ptr->exp_msg_count--; | ||
1834 | buf = buf_extract(t_buf, INT_H_SIZE); | ||
1835 | if (buf == NULL) { | ||
1836 | pr_warn("%sno inner failover pkt\n", link_co_err); | ||
1837 | goto exit; | ||
1838 | } | ||
1839 | msg = buf_msg(buf); | ||
1840 | 1692 | ||
1841 | if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { | 1693 | if (msg_type(msg) != FAILOVER_MSG) { |
1842 | kfree_skb(buf); | 1694 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); |
1843 | buf = NULL; | 1695 | goto exit; |
1844 | goto exit; | ||
1845 | } | ||
1846 | if (msg_user(msg) == MSG_FRAGMENTER) { | ||
1847 | l_ptr->stats.recv_fragments++; | ||
1848 | tipc_buf_append(&l_ptr->reasm_buf, &buf); | ||
1849 | } | ||
1850 | } | 1696 | } |
1851 | exit: | 1697 | if (bearer_id >= MAX_BEARERS) |
1852 | if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED)) | 1698 | goto exit; |
1853 | tipc_link_delete(l_ptr); | ||
1854 | return buf; | ||
1855 | } | ||
1856 | 1699 | ||
1857 | /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent | 1700 | if (bearer_id == link->bearer_id) |
1858 | * via other link as result of a failover (ORIGINAL_MSG) or | 1701 | goto exit; |
1859 | * a new active link (DUPLICATE_MSG). Failover packets are | ||
1860 | * returned to the active link for delivery upwards. | ||
1861 | * Owner node is locked. | ||
1862 | */ | ||
1863 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
1864 | struct sk_buff **buf) | ||
1865 | { | ||
1866 | struct sk_buff *t_buf = *buf; | ||
1867 | struct tipc_link *l_ptr; | ||
1868 | struct tipc_msg *t_msg = buf_msg(t_buf); | ||
1869 | u32 bearer_id = msg_bearer_id(t_msg); | ||
1870 | 1702 | ||
1871 | *buf = NULL; | 1703 | pl = link->owner->links[bearer_id]; |
1704 | if (pl && tipc_link_is_up(pl)) | ||
1705 | tipc_link_reset(pl); | ||
1872 | 1706 | ||
1873 | if (bearer_id >= MAX_BEARERS) | 1707 | if (link->failover_pkts == FIRST_FAILOVER) |
1708 | link->failover_pkts = msg_msgcnt(msg); | ||
1709 | |||
1710 | /* Should we expect an inner packet? */ | ||
1711 | if (!link->failover_pkts) | ||
1874 | goto exit; | 1712 | goto exit; |
1875 | 1713 | ||
1876 | l_ptr = n_ptr->links[bearer_id]; | 1714 | if (!tipc_msg_extract(*skb, &iskb, &pos)) { |
1877 | if (!l_ptr) | 1715 | pr_warn("%sno inner failover pkt\n", link_co_err); |
1716 | *skb = NULL; | ||
1878 | goto exit; | 1717 | goto exit; |
1718 | } | ||
1719 | link->failover_pkts--; | ||
1720 | *skb = NULL; | ||
1879 | 1721 | ||
1880 | if (msg_type(t_msg) == DUPLICATE_MSG) | 1722 | /* Was this packet already delivered? */ |
1881 | tipc_link_dup_rcv(l_ptr, t_buf); | 1723 | if (less(buf_seqno(iskb), link->failover_checkpt)) { |
1882 | else if (msg_type(t_msg) == ORIGINAL_MSG) | 1724 | kfree_skb(iskb); |
1883 | *buf = tipc_link_failover_rcv(l_ptr, t_buf); | 1725 | iskb = NULL; |
1884 | else | 1726 | goto exit; |
1885 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 1727 | } |
1728 | if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) { | ||
1729 | link->stats.recv_fragments++; | ||
1730 | tipc_buf_append(&link->failover_skb, &iskb); | ||
1731 | } | ||
1886 | exit: | 1732 | exit: |
1887 | kfree_skb(t_buf); | 1733 | if (!link->failover_pkts && pl) |
1888 | return *buf != NULL; | 1734 | pl->flags &= ~LINK_FAILINGOVER; |
1735 | kfree_skb(*skb); | ||
1736 | *skb = iskb; | ||
1737 | return *skb; | ||
1889 | } | 1738 | } |
1890 | 1739 | ||
1891 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | 1740 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) |
@@ -1900,23 +1749,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | |||
1900 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); | 1749 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); |
1901 | } | 1750 | } |
1902 | 1751 | ||
1903 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) | 1752 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) |
1904 | { | 1753 | { |
1905 | /* Data messages from this node, inclusive FIRST_FRAGM */ | 1754 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); |
1906 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; | 1755 | |
1907 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; | 1756 | l->window = win; |
1908 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; | 1757 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; |
1909 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; | 1758 | l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; |
1910 | /* Transiting data messages,inclusive FIRST_FRAGM */ | 1759 | l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; |
1911 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; | 1760 | l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; |
1912 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; | 1761 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; |
1913 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; | ||
1914 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; | ||
1915 | l_ptr->queue_limit[CONN_MANAGER] = 1200; | ||
1916 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; | ||
1917 | l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; | ||
1918 | /* FRAGMENT and LAST_FRAGMENT packets */ | ||
1919 | l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; | ||
1920 | } | 1762 | } |
1921 | 1763 | ||
1922 | /* tipc_link_find_owner - locate owner node of link by link's name | 1764 | /* tipc_link_find_owner - locate owner node of link by link's name |
@@ -2081,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
2081 | 1923 | ||
2082 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1924 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
2083 | link_set_supervision_props(link, tol); | 1925 | link_set_supervision_props(link, tol); |
2084 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); | 1926 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); |
2085 | } | 1927 | } |
2086 | if (props[TIPC_NLA_PROP_PRIO]) { | 1928 | if (props[TIPC_NLA_PROP_PRIO]) { |
2087 | u32 prio; | 1929 | u32 prio; |
2088 | 1930 | ||
2089 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 1931 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
2090 | link->priority = prio; | 1932 | link->priority = prio; |
2091 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); | 1933 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); |
2092 | } | 1934 | } |
2093 | if (props[TIPC_NLA_PROP_WIN]) { | 1935 | if (props[TIPC_NLA_PROP_WIN]) { |
2094 | u32 win; | 1936 | u32 win; |
@@ -2193,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
2193 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 2035 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, |
2194 | tipc_cluster_mask(tn->own_addr))) | 2036 | tipc_cluster_mask(tn->own_addr))) |
2195 | goto attr_msg_full; | 2037 | goto attr_msg_full; |
2196 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) | 2038 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) |
2197 | goto attr_msg_full; | 2039 | goto attr_msg_full; |
2198 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 2040 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) |
2199 | goto attr_msg_full; | 2041 | goto attr_msg_full; |
@@ -2215,7 +2057,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
2215 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | 2057 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) |
2216 | goto prop_msg_full; | 2058 | goto prop_msg_full; |
2217 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | 2059 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, |
2218 | link->queue_limit[TIPC_LOW_IMPORTANCE])) | 2060 | link->window)) |
2219 | goto prop_msg_full; | 2061 | goto prop_msg_full; |
2220 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | 2062 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) |
2221 | goto prop_msg_full; | 2063 | goto prop_msg_full; |
@@ -2281,7 +2123,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2281 | msg.seq = cb->nlh->nlmsg_seq; | 2123 | msg.seq = cb->nlh->nlmsg_seq; |
2282 | 2124 | ||
2283 | rcu_read_lock(); | 2125 | rcu_read_lock(); |
2284 | |||
2285 | if (prev_node) { | 2126 | if (prev_node) { |
2286 | node = tipc_node_find(net, prev_node); | 2127 | node = tipc_node_find(net, prev_node); |
2287 | if (!node) { | 2128 | if (!node) { |
@@ -2294,6 +2135,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2294 | cb->prev_seq = 1; | 2135 | cb->prev_seq = 1; |
2295 | goto out; | 2136 | goto out; |
2296 | } | 2137 | } |
2138 | tipc_node_put(node); | ||
2297 | 2139 | ||
2298 | list_for_each_entry_continue_rcu(node, &tn->node_list, | 2140 | list_for_each_entry_continue_rcu(node, &tn->node_list, |
2299 | list) { | 2141 | list) { |
@@ -2301,6 +2143,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
2301 | err = __tipc_nl_add_node_links(net, &msg, node, | 2143 | err = __tipc_nl_add_node_links(net, &msg, node, |
2302 | &prev_link); | 2144 | &prev_link); |
2303 | tipc_node_unlock(node); | 2145 | tipc_node_unlock(node); |
2146 | tipc_node_put(node); | ||
2304 | if (err) | 2147 | if (err) |
2305 | goto out; | 2148 | goto out; |
2306 | 2149 | ||