diff options
Diffstat (limited to 'net/tipc/link.c')
| -rw-r--r-- | net/tipc/link.c | 862 |
1 files changed, 352 insertions, 510 deletions
diff --git a/net/tipc/link.c b/net/tipc/link.c index 14f09b3cb87c..a6b30df6ec02 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c | |||
| @@ -1,7 +1,7 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * net/tipc/link.c: TIPC link code | 2 | * net/tipc/link.c: TIPC link code |
| 3 | * | 3 | * |
| 4 | * Copyright (c) 1996-2007, 2012-2014, Ericsson AB | 4 | * Copyright (c) 1996-2007, 2012-2015, Ericsson AB |
| 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems | 5 | * Copyright (c) 2004-2007, 2010-2013, Wind River Systems |
| 6 | * All rights reserved. | 6 | * All rights reserved. |
| 7 | * | 7 | * |
| @@ -35,6 +35,7 @@ | |||
| 35 | */ | 35 | */ |
| 36 | 36 | ||
| 37 | #include "core.h" | 37 | #include "core.h" |
| 38 | #include "subscr.h" | ||
| 38 | #include "link.h" | 39 | #include "link.h" |
| 39 | #include "bcast.h" | 40 | #include "bcast.h" |
| 40 | #include "socket.h" | 41 | #include "socket.h" |
| @@ -88,24 +89,14 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = { | |||
| 88 | #define TIMEOUT_EVT 560817u /* link timer expired */ | 89 | #define TIMEOUT_EVT 560817u /* link timer expired */ |
| 89 | 90 | ||
| 90 | /* | 91 | /* |
| 91 | * The following two 'message types' is really just implementation | 92 | * State value stored in 'failover_pkts' |
| 92 | * data conveniently stored in the message header. | ||
| 93 | * They must not be considered part of the protocol | ||
| 94 | */ | 93 | */ |
| 95 | #define OPEN_MSG 0 | 94 | #define FIRST_FAILOVER 0xffffu |
| 96 | #define CLOSED_MSG 1 | ||
| 97 | |||
| 98 | /* | ||
| 99 | * State value stored in 'exp_msg_count' | ||
| 100 | */ | ||
| 101 | #define START_CHANGEOVER 100000u | ||
| 102 | 95 | ||
| 103 | static void link_handle_out_of_seq_msg(struct tipc_link *link, | 96 | static void link_handle_out_of_seq_msg(struct tipc_link *link, |
| 104 | struct sk_buff *skb); | 97 | struct sk_buff *skb); |
| 105 | static void tipc_link_proto_rcv(struct tipc_link *link, | 98 | static void tipc_link_proto_rcv(struct tipc_link *link, |
| 106 | struct sk_buff *skb); | 99 | struct sk_buff *skb); |
| 107 | static int tipc_link_tunnel_rcv(struct tipc_node *node, | ||
| 108 | struct sk_buff **skb); | ||
| 109 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); | 100 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol); |
| 110 | static void link_state_event(struct tipc_link *l_ptr, u32 event); | 101 | static void link_state_event(struct tipc_link *l_ptr, u32 event); |
| 111 | static void link_reset_statistics(struct tipc_link *l_ptr); | 102 | static void link_reset_statistics(struct tipc_link *l_ptr); |
| @@ -114,7 +105,7 @@ static void tipc_link_sync_xmit(struct tipc_link *l); | |||
| 114 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); | 105 | static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); |
| 115 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); | 106 | static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb); |
| 116 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); | 107 | static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb); |
| 117 | 108 | static bool tipc_link_failover_rcv(struct tipc_link *l, struct sk_buff **skb); | |
| 118 | /* | 109 | /* |
| 119 | * Simple link routines | 110 | * Simple link routines |
| 120 | */ | 111 | */ |
| @@ -138,32 +129,11 @@ static void tipc_link_put(struct tipc_link *l_ptr) | |||
| 138 | kref_put(&l_ptr->ref, tipc_link_release); | 129 | kref_put(&l_ptr->ref, tipc_link_release); |
| 139 | } | 130 | } |
| 140 | 131 | ||
| 141 | static void link_init_max_pkt(struct tipc_link *l_ptr) | 132 | static struct tipc_link *tipc_parallel_link(struct tipc_link *l) |
| 142 | { | 133 | { |
| 143 | struct tipc_node *node = l_ptr->owner; | 134 | if (l->owner->active_links[0] != l) |
| 144 | struct tipc_net *tn = net_generic(node->net, tipc_net_id); | 135 | return l->owner->active_links[0]; |
| 145 | struct tipc_bearer *b_ptr; | 136 | return l->owner->active_links[1]; |
| 146 | u32 max_pkt; | ||
| 147 | |||
| 148 | rcu_read_lock(); | ||
| 149 | b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]); | ||
| 150 | if (!b_ptr) { | ||
| 151 | rcu_read_unlock(); | ||
| 152 | return; | ||
| 153 | } | ||
| 154 | max_pkt = (b_ptr->mtu & ~3); | ||
| 155 | rcu_read_unlock(); | ||
| 156 | |||
| 157 | if (max_pkt > MAX_MSG_SIZE) | ||
| 158 | max_pkt = MAX_MSG_SIZE; | ||
| 159 | |||
| 160 | l_ptr->max_pkt_target = max_pkt; | ||
| 161 | if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT) | ||
| 162 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 163 | else | ||
| 164 | l_ptr->max_pkt = MAX_PKT_DEFAULT; | ||
| 165 | |||
| 166 | l_ptr->max_pkt_probes = 0; | ||
| 167 | } | 137 | } |
| 168 | 138 | ||
| 169 | /* | 139 | /* |
| @@ -194,10 +164,10 @@ static void link_timeout(unsigned long data) | |||
| 194 | tipc_node_lock(l_ptr->owner); | 164 | tipc_node_lock(l_ptr->owner); |
| 195 | 165 | ||
| 196 | /* update counters used in statistical profiling of send traffic */ | 166 | /* update counters used in statistical profiling of send traffic */ |
| 197 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->outqueue); | 167 | l_ptr->stats.accu_queue_sz += skb_queue_len(&l_ptr->transmq); |
| 198 | l_ptr->stats.queue_sz_counts++; | 168 | l_ptr->stats.queue_sz_counts++; |
| 199 | 169 | ||
| 200 | skb = skb_peek(&l_ptr->outqueue); | 170 | skb = skb_peek(&l_ptr->transmq); |
| 201 | if (skb) { | 171 | if (skb) { |
| 202 | struct tipc_msg *msg = buf_msg(skb); | 172 | struct tipc_msg *msg = buf_msg(skb); |
| 203 | u32 length = msg_size(msg); | 173 | u32 length = msg_size(msg); |
| @@ -229,7 +199,7 @@ static void link_timeout(unsigned long data) | |||
| 229 | /* do all other link processing performed on a periodic basis */ | 199 | /* do all other link processing performed on a periodic basis */ |
| 230 | link_state_event(l_ptr, TIMEOUT_EVT); | 200 | link_state_event(l_ptr, TIMEOUT_EVT); |
| 231 | 201 | ||
| 232 | if (l_ptr->next_out) | 202 | if (skb_queue_len(&l_ptr->backlogq)) |
| 233 | tipc_link_push_packets(l_ptr); | 203 | tipc_link_push_packets(l_ptr); |
| 234 | 204 | ||
| 235 | tipc_node_unlock(l_ptr->owner); | 205 | tipc_node_unlock(l_ptr->owner); |
| @@ -305,16 +275,15 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
| 305 | msg_set_session(msg, (tn->random & 0xffff)); | 275 | msg_set_session(msg, (tn->random & 0xffff)); |
| 306 | msg_set_bearer_id(msg, b_ptr->identity); | 276 | msg_set_bearer_id(msg, b_ptr->identity); |
| 307 | strcpy((char *)msg_data(msg), if_name); | 277 | strcpy((char *)msg_data(msg), if_name); |
| 308 | 278 | l_ptr->net_plane = b_ptr->net_plane; | |
| 279 | l_ptr->advertised_mtu = b_ptr->mtu; | ||
| 280 | l_ptr->mtu = l_ptr->advertised_mtu; | ||
| 309 | l_ptr->priority = b_ptr->priority; | 281 | l_ptr->priority = b_ptr->priority; |
| 310 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); | 282 | tipc_link_set_queue_limits(l_ptr, b_ptr->window); |
| 311 | |||
| 312 | l_ptr->net_plane = b_ptr->net_plane; | ||
| 313 | link_init_max_pkt(l_ptr); | ||
| 314 | |||
| 315 | l_ptr->next_out_no = 1; | 283 | l_ptr->next_out_no = 1; |
| 316 | __skb_queue_head_init(&l_ptr->outqueue); | 284 | __skb_queue_head_init(&l_ptr->transmq); |
| 317 | __skb_queue_head_init(&l_ptr->deferred_queue); | 285 | __skb_queue_head_init(&l_ptr->backlogq); |
| 286 | __skb_queue_head_init(&l_ptr->deferdq); | ||
| 318 | skb_queue_head_init(&l_ptr->wakeupq); | 287 | skb_queue_head_init(&l_ptr->wakeupq); |
| 319 | skb_queue_head_init(&l_ptr->inputq); | 288 | skb_queue_head_init(&l_ptr->inputq); |
| 320 | skb_queue_head_init(&l_ptr->namedq); | 289 | skb_queue_head_init(&l_ptr->namedq); |
| @@ -327,15 +296,19 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, | |||
| 327 | } | 296 | } |
| 328 | 297 | ||
| 329 | /** | 298 | /** |
| 330 | * link_delete - Conditional deletion of link. | 299 | * tipc_link_delete - Delete a link |
| 331 | * If timer still running, real delete is done when it expires | 300 | * @l: link to be deleted |
| 332 | * @link: link to be deleted | ||
| 333 | */ | 301 | */ |
| 334 | void tipc_link_delete(struct tipc_link *link) | 302 | void tipc_link_delete(struct tipc_link *l) |
| 335 | { | 303 | { |
| 336 | tipc_link_reset_fragments(link); | 304 | tipc_link_reset(l); |
| 337 | tipc_node_detach_link(link->owner, link); | 305 | if (del_timer(&l->timer)) |
| 338 | tipc_link_put(link); | 306 | tipc_link_put(l); |
| 307 | l->flags |= LINK_STOPPED; | ||
| 308 | /* Delete link now, or when timer is finished: */ | ||
| 309 | tipc_link_reset_fragments(l); | ||
| 310 | tipc_node_detach_link(l->owner, l); | ||
| 311 | tipc_link_put(l); | ||
| 339 | } | 312 | } |
| 340 | 313 | ||
| 341 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | 314 | void tipc_link_delete_list(struct net *net, unsigned int bearer_id, |
| @@ -349,16 +322,7 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
| 349 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 322 | list_for_each_entry_rcu(node, &tn->node_list, list) { |
| 350 | tipc_node_lock(node); | 323 | tipc_node_lock(node); |
| 351 | link = node->links[bearer_id]; | 324 | link = node->links[bearer_id]; |
| 352 | if (!link) { | 325 | if (link) |
| 353 | tipc_node_unlock(node); | ||
| 354 | continue; | ||
| 355 | } | ||
| 356 | tipc_link_reset(link); | ||
| 357 | if (del_timer(&link->timer)) | ||
| 358 | tipc_link_put(link); | ||
| 359 | link->flags |= LINK_STOPPED; | ||
| 360 | /* Delete link now, or when failover is finished: */ | ||
| 361 | if (shutting_down || !tipc_node_is_up(node)) | ||
| 362 | tipc_link_delete(link); | 326 | tipc_link_delete(link); |
| 363 | tipc_node_unlock(node); | 327 | tipc_node_unlock(node); |
| 364 | } | 328 | } |
| @@ -366,28 +330,43 @@ void tipc_link_delete_list(struct net *net, unsigned int bearer_id, | |||
| 366 | } | 330 | } |
| 367 | 331 | ||
| 368 | /** | 332 | /** |
| 369 | * link_schedule_user - schedule user for wakeup after congestion | 333 | * link_schedule_user - schedule a message sender for wakeup after congestion |
| 370 | * @link: congested link | 334 | * @link: congested link |
| 371 | * @oport: sending port | 335 | * @list: message that was attempted sent |
| 372 | * @chain_sz: size of buffer chain that was attempted sent | ||
| 373 | * @imp: importance of message attempted sent | ||
| 374 | * Create pseudo msg to send back to user when congestion abates | 336 | * Create pseudo msg to send back to user when congestion abates |
| 337 | * Only consumes message if there is an error | ||
| 375 | */ | 338 | */ |
| 376 | static bool link_schedule_user(struct tipc_link *link, u32 oport, | 339 | static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list) |
| 377 | uint chain_sz, uint imp) | ||
| 378 | { | 340 | { |
| 379 | struct sk_buff *buf; | 341 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
| 342 | int imp = msg_importance(msg); | ||
| 343 | u32 oport = msg_origport(msg); | ||
| 344 | u32 addr = link_own_addr(link); | ||
| 345 | struct sk_buff *skb; | ||
| 380 | 346 | ||
| 381 | buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | 347 | /* This really cannot happen... */ |
| 382 | link_own_addr(link), link_own_addr(link), | 348 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { |
| 383 | oport, 0, 0); | 349 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); |
| 384 | if (!buf) | 350 | tipc_link_reset(link); |
| 385 | return false; | 351 | goto err; |
| 386 | TIPC_SKB_CB(buf)->chain_sz = chain_sz; | 352 | } |
| 387 | TIPC_SKB_CB(buf)->chain_imp = imp; | 353 | /* Non-blocking sender: */ |
| 388 | skb_queue_tail(&link->wakeupq, buf); | 354 | if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending) |
| 355 | return -ELINKCONG; | ||
| 356 | |||
| 357 | /* Create and schedule wakeup pseudo message */ | ||
| 358 | skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, | ||
| 359 | addr, addr, oport, 0, 0); | ||
| 360 | if (!skb) | ||
| 361 | goto err; | ||
| 362 | TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list); | ||
| 363 | TIPC_SKB_CB(skb)->chain_imp = imp; | ||
| 364 | skb_queue_tail(&link->wakeupq, skb); | ||
| 389 | link->stats.link_congs++; | 365 | link->stats.link_congs++; |
| 390 | return true; | 366 | return -ELINKCONG; |
| 367 | err: | ||
| 368 | __skb_queue_purge(list); | ||
| 369 | return -ENOBUFS; | ||
| 391 | } | 370 | } |
| 392 | 371 | ||
| 393 | /** | 372 | /** |
| @@ -396,19 +375,22 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport, | |||
| 396 | * Move a number of waiting users, as permitted by available space in | 375 | * Move a number of waiting users, as permitted by available space in |
| 397 | * the send queue, from link wait queue to node wait queue for wakeup | 376 | * the send queue, from link wait queue to node wait queue for wakeup |
| 398 | */ | 377 | */ |
| 399 | void link_prepare_wakeup(struct tipc_link *link) | 378 | void link_prepare_wakeup(struct tipc_link *l) |
| 400 | { | 379 | { |
| 401 | uint pend_qsz = skb_queue_len(&link->outqueue); | 380 | int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,}; |
| 381 | int imp, lim; | ||
| 402 | struct sk_buff *skb, *tmp; | 382 | struct sk_buff *skb, *tmp; |
| 403 | 383 | ||
| 404 | skb_queue_walk_safe(&link->wakeupq, skb, tmp) { | 384 | skb_queue_walk_safe(&l->wakeupq, skb, tmp) { |
| 405 | if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) | 385 | imp = TIPC_SKB_CB(skb)->chain_imp; |
| 386 | lim = l->window + l->backlog[imp].limit; | ||
| 387 | pnd[imp] += TIPC_SKB_CB(skb)->chain_sz; | ||
| 388 | if ((pnd[imp] + l->backlog[imp].len) >= lim) | ||
| 406 | break; | 389 | break; |
| 407 | pend_qsz += TIPC_SKB_CB(skb)->chain_sz; | 390 | skb_unlink(skb, &l->wakeupq); |
| 408 | skb_unlink(skb, &link->wakeupq); | 391 | skb_queue_tail(&l->inputq, skb); |
| 409 | skb_queue_tail(&link->inputq, skb); | 392 | l->owner->inputq = &l->inputq; |
| 410 | link->owner->inputq = &link->inputq; | 393 | l->owner->action_flags |= TIPC_MSG_EVT; |
| 411 | link->owner->action_flags |= TIPC_MSG_EVT; | ||
| 412 | } | 394 | } |
| 413 | } | 395 | } |
| 414 | 396 | ||
| @@ -422,31 +404,42 @@ void tipc_link_reset_fragments(struct tipc_link *l_ptr) | |||
| 422 | l_ptr->reasm_buf = NULL; | 404 | l_ptr->reasm_buf = NULL; |
| 423 | } | 405 | } |
| 424 | 406 | ||
| 407 | static void tipc_link_purge_backlog(struct tipc_link *l) | ||
| 408 | { | ||
| 409 | __skb_queue_purge(&l->backlogq); | ||
| 410 | l->backlog[TIPC_LOW_IMPORTANCE].len = 0; | ||
| 411 | l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0; | ||
| 412 | l->backlog[TIPC_HIGH_IMPORTANCE].len = 0; | ||
| 413 | l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0; | ||
| 414 | l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0; | ||
| 415 | } | ||
| 416 | |||
| 425 | /** | 417 | /** |
| 426 | * tipc_link_purge_queues - purge all pkt queues associated with link | 418 | * tipc_link_purge_queues - purge all pkt queues associated with link |
| 427 | * @l_ptr: pointer to link | 419 | * @l_ptr: pointer to link |
| 428 | */ | 420 | */ |
| 429 | void tipc_link_purge_queues(struct tipc_link *l_ptr) | 421 | void tipc_link_purge_queues(struct tipc_link *l_ptr) |
| 430 | { | 422 | { |
| 431 | __skb_queue_purge(&l_ptr->deferred_queue); | 423 | __skb_queue_purge(&l_ptr->deferdq); |
| 432 | __skb_queue_purge(&l_ptr->outqueue); | 424 | __skb_queue_purge(&l_ptr->transmq); |
| 425 | tipc_link_purge_backlog(l_ptr); | ||
| 433 | tipc_link_reset_fragments(l_ptr); | 426 | tipc_link_reset_fragments(l_ptr); |
| 434 | } | 427 | } |
| 435 | 428 | ||
| 436 | void tipc_link_reset(struct tipc_link *l_ptr) | 429 | void tipc_link_reset(struct tipc_link *l_ptr) |
| 437 | { | 430 | { |
| 438 | u32 prev_state = l_ptr->state; | 431 | u32 prev_state = l_ptr->state; |
| 439 | u32 checkpoint = l_ptr->next_in_no; | ||
| 440 | int was_active_link = tipc_link_is_active(l_ptr); | 432 | int was_active_link = tipc_link_is_active(l_ptr); |
| 441 | struct tipc_node *owner = l_ptr->owner; | 433 | struct tipc_node *owner = l_ptr->owner; |
| 434 | struct tipc_link *pl = tipc_parallel_link(l_ptr); | ||
| 442 | 435 | ||
| 443 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); | 436 | msg_set_session(l_ptr->pmsg, ((msg_session(l_ptr->pmsg) + 1) & 0xffff)); |
| 444 | 437 | ||
| 445 | /* Link is down, accept any session */ | 438 | /* Link is down, accept any session */ |
| 446 | l_ptr->peer_session = INVALID_SESSION; | 439 | l_ptr->peer_session = INVALID_SESSION; |
| 447 | 440 | ||
| 448 | /* Prepare for max packet size negotiation */ | 441 | /* Prepare for renewed mtu size negotiation */ |
| 449 | link_init_max_pkt(l_ptr); | 442 | l_ptr->mtu = l_ptr->advertised_mtu; |
| 450 | 443 | ||
| 451 | l_ptr->state = RESET_UNKNOWN; | 444 | l_ptr->state = RESET_UNKNOWN; |
| 452 | 445 | ||
| @@ -456,21 +449,26 @@ void tipc_link_reset(struct tipc_link *l_ptr) | |||
| 456 | tipc_node_link_down(l_ptr->owner, l_ptr); | 449 | tipc_node_link_down(l_ptr->owner, l_ptr); |
| 457 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); | 450 | tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr); |
| 458 | 451 | ||
| 459 | if (was_active_link && tipc_node_active_links(l_ptr->owner)) { | 452 | if (was_active_link && tipc_node_is_up(l_ptr->owner) && (pl != l_ptr)) { |
| 460 | l_ptr->reset_checkpoint = checkpoint; | 453 | l_ptr->flags |= LINK_FAILINGOVER; |
| 461 | l_ptr->exp_msg_count = START_CHANGEOVER; | 454 | l_ptr->failover_checkpt = l_ptr->next_in_no; |
| 455 | pl->failover_pkts = FIRST_FAILOVER; | ||
| 456 | pl->failover_checkpt = l_ptr->next_in_no; | ||
| 457 | pl->failover_skb = l_ptr->reasm_buf; | ||
| 458 | } else { | ||
| 459 | kfree_skb(l_ptr->reasm_buf); | ||
| 462 | } | 460 | } |
| 463 | |||
| 464 | /* Clean up all queues, except inputq: */ | 461 | /* Clean up all queues, except inputq: */ |
| 465 | __skb_queue_purge(&l_ptr->outqueue); | 462 | __skb_queue_purge(&l_ptr->transmq); |
| 466 | __skb_queue_purge(&l_ptr->deferred_queue); | 463 | __skb_queue_purge(&l_ptr->deferdq); |
| 467 | if (!owner->inputq) | 464 | if (!owner->inputq) |
| 468 | owner->inputq = &l_ptr->inputq; | 465 | owner->inputq = &l_ptr->inputq; |
| 469 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); | 466 | skb_queue_splice_init(&l_ptr->wakeupq, owner->inputq); |
| 470 | if (!skb_queue_empty(owner->inputq)) | 467 | if (!skb_queue_empty(owner->inputq)) |
| 471 | owner->action_flags |= TIPC_MSG_EVT; | 468 | owner->action_flags |= TIPC_MSG_EVT; |
| 472 | l_ptr->next_out = NULL; | 469 | tipc_link_purge_backlog(l_ptr); |
| 473 | l_ptr->unacked_window = 0; | 470 | l_ptr->reasm_buf = NULL; |
| 471 | l_ptr->rcv_unacked = 0; | ||
| 474 | l_ptr->checkpoint = 1; | 472 | l_ptr->checkpoint = 1; |
| 475 | l_ptr->next_out_no = 1; | 473 | l_ptr->next_out_no = 1; |
| 476 | l_ptr->fsm_msg_cnt = 0; | 474 | l_ptr->fsm_msg_cnt = 0; |
| @@ -521,8 +519,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 521 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) | 519 | if (!(l_ptr->flags & LINK_STARTED) && (event != STARTING_EVT)) |
| 522 | return; /* Not yet. */ | 520 | return; /* Not yet. */ |
| 523 | 521 | ||
| 524 | /* Check whether changeover is going on */ | 522 | if (l_ptr->flags & LINK_FAILINGOVER) { |
| 525 | if (l_ptr->exp_msg_count) { | ||
| 526 | if (event == TIMEOUT_EVT) | 523 | if (event == TIMEOUT_EVT) |
| 527 | link_set_timer(l_ptr, cont_intv); | 524 | link_set_timer(l_ptr, cont_intv); |
| 528 | return; | 525 | return; |
| @@ -539,11 +536,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 539 | l_ptr->checkpoint = l_ptr->next_in_no; | 536 | l_ptr->checkpoint = l_ptr->next_in_no; |
| 540 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 537 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
| 541 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 538 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
| 542 | 0, 0, 0, 0, 0); | 539 | 0, 0, 0, 0); |
| 543 | l_ptr->fsm_msg_cnt++; | ||
| 544 | } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) { | ||
| 545 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | ||
| 546 | 1, 0, 0, 0, 0); | ||
| 547 | l_ptr->fsm_msg_cnt++; | 540 | l_ptr->fsm_msg_cnt++; |
| 548 | } | 541 | } |
| 549 | link_set_timer(l_ptr, cont_intv); | 542 | link_set_timer(l_ptr, cont_intv); |
| @@ -551,7 +544,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 551 | } | 544 | } |
| 552 | l_ptr->state = WORKING_UNKNOWN; | 545 | l_ptr->state = WORKING_UNKNOWN; |
| 553 | l_ptr->fsm_msg_cnt = 0; | 546 | l_ptr->fsm_msg_cnt = 0; |
| 554 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 547 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
| 555 | l_ptr->fsm_msg_cnt++; | 548 | l_ptr->fsm_msg_cnt++; |
| 556 | link_set_timer(l_ptr, cont_intv / 4); | 549 | link_set_timer(l_ptr, cont_intv / 4); |
| 557 | break; | 550 | break; |
| @@ -562,7 +555,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 562 | l_ptr->state = RESET_RESET; | 555 | l_ptr->state = RESET_RESET; |
| 563 | l_ptr->fsm_msg_cnt = 0; | 556 | l_ptr->fsm_msg_cnt = 0; |
| 564 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 557 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
| 565 | 0, 0, 0, 0, 0); | 558 | 0, 0, 0, 0); |
| 566 | l_ptr->fsm_msg_cnt++; | 559 | l_ptr->fsm_msg_cnt++; |
| 567 | link_set_timer(l_ptr, cont_intv); | 560 | link_set_timer(l_ptr, cont_intv); |
| 568 | break; | 561 | break; |
| @@ -585,7 +578,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 585 | l_ptr->state = RESET_RESET; | 578 | l_ptr->state = RESET_RESET; |
| 586 | l_ptr->fsm_msg_cnt = 0; | 579 | l_ptr->fsm_msg_cnt = 0; |
| 587 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 580 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
| 588 | 0, 0, 0, 0, 0); | 581 | 0, 0, 0, 0); |
| 589 | l_ptr->fsm_msg_cnt++; | 582 | l_ptr->fsm_msg_cnt++; |
| 590 | link_set_timer(l_ptr, cont_intv); | 583 | link_set_timer(l_ptr, cont_intv); |
| 591 | break; | 584 | break; |
| @@ -596,13 +589,13 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 596 | l_ptr->checkpoint = l_ptr->next_in_no; | 589 | l_ptr->checkpoint = l_ptr->next_in_no; |
| 597 | if (tipc_bclink_acks_missing(l_ptr->owner)) { | 590 | if (tipc_bclink_acks_missing(l_ptr->owner)) { |
| 598 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 591 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
| 599 | 0, 0, 0, 0, 0); | 592 | 0, 0, 0, 0); |
| 600 | l_ptr->fsm_msg_cnt++; | 593 | l_ptr->fsm_msg_cnt++; |
| 601 | } | 594 | } |
| 602 | link_set_timer(l_ptr, cont_intv); | 595 | link_set_timer(l_ptr, cont_intv); |
| 603 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { | 596 | } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) { |
| 604 | tipc_link_proto_xmit(l_ptr, STATE_MSG, | 597 | tipc_link_proto_xmit(l_ptr, STATE_MSG, |
| 605 | 1, 0, 0, 0, 0); | 598 | 1, 0, 0, 0); |
| 606 | l_ptr->fsm_msg_cnt++; | 599 | l_ptr->fsm_msg_cnt++; |
| 607 | link_set_timer(l_ptr, cont_intv / 4); | 600 | link_set_timer(l_ptr, cont_intv / 4); |
| 608 | } else { /* Link has failed */ | 601 | } else { /* Link has failed */ |
| @@ -612,7 +605,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 612 | l_ptr->state = RESET_UNKNOWN; | 605 | l_ptr->state = RESET_UNKNOWN; |
| 613 | l_ptr->fsm_msg_cnt = 0; | 606 | l_ptr->fsm_msg_cnt = 0; |
| 614 | tipc_link_proto_xmit(l_ptr, RESET_MSG, | 607 | tipc_link_proto_xmit(l_ptr, RESET_MSG, |
| 615 | 0, 0, 0, 0, 0); | 608 | 0, 0, 0, 0); |
| 616 | l_ptr->fsm_msg_cnt++; | 609 | l_ptr->fsm_msg_cnt++; |
| 617 | link_set_timer(l_ptr, cont_intv); | 610 | link_set_timer(l_ptr, cont_intv); |
| 618 | } | 611 | } |
| @@ -632,7 +625,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 632 | l_ptr->state = WORKING_WORKING; | 625 | l_ptr->state = WORKING_WORKING; |
| 633 | l_ptr->fsm_msg_cnt = 0; | 626 | l_ptr->fsm_msg_cnt = 0; |
| 634 | link_activate(l_ptr); | 627 | link_activate(l_ptr); |
| 635 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 628 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
| 636 | l_ptr->fsm_msg_cnt++; | 629 | l_ptr->fsm_msg_cnt++; |
| 637 | if (l_ptr->owner->working_links == 1) | 630 | if (l_ptr->owner->working_links == 1) |
| 638 | tipc_link_sync_xmit(l_ptr); | 631 | tipc_link_sync_xmit(l_ptr); |
| @@ -642,7 +635,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 642 | l_ptr->state = RESET_RESET; | 635 | l_ptr->state = RESET_RESET; |
| 643 | l_ptr->fsm_msg_cnt = 0; | 636 | l_ptr->fsm_msg_cnt = 0; |
| 644 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 637 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
| 645 | 1, 0, 0, 0, 0); | 638 | 1, 0, 0, 0); |
| 646 | l_ptr->fsm_msg_cnt++; | 639 | l_ptr->fsm_msg_cnt++; |
| 647 | link_set_timer(l_ptr, cont_intv); | 640 | link_set_timer(l_ptr, cont_intv); |
| 648 | break; | 641 | break; |
| @@ -652,7 +645,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 652 | link_set_timer(l_ptr, cont_intv); | 645 | link_set_timer(l_ptr, cont_intv); |
| 653 | break; | 646 | break; |
| 654 | case TIMEOUT_EVT: | 647 | case TIMEOUT_EVT: |
| 655 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); | 648 | tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0); |
| 656 | l_ptr->fsm_msg_cnt++; | 649 | l_ptr->fsm_msg_cnt++; |
| 657 | link_set_timer(l_ptr, cont_intv); | 650 | link_set_timer(l_ptr, cont_intv); |
| 658 | break; | 651 | break; |
| @@ -670,7 +663,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 670 | l_ptr->state = WORKING_WORKING; | 663 | l_ptr->state = WORKING_WORKING; |
| 671 | l_ptr->fsm_msg_cnt = 0; | 664 | l_ptr->fsm_msg_cnt = 0; |
| 672 | link_activate(l_ptr); | 665 | link_activate(l_ptr); |
| 673 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0, 0); | 666 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 1, 0, 0, 0); |
| 674 | l_ptr->fsm_msg_cnt++; | 667 | l_ptr->fsm_msg_cnt++; |
| 675 | if (l_ptr->owner->working_links == 1) | 668 | if (l_ptr->owner->working_links == 1) |
| 676 | tipc_link_sync_xmit(l_ptr); | 669 | tipc_link_sync_xmit(l_ptr); |
| @@ -680,7 +673,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 680 | break; | 673 | break; |
| 681 | case TIMEOUT_EVT: | 674 | case TIMEOUT_EVT: |
| 682 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, | 675 | tipc_link_proto_xmit(l_ptr, ACTIVATE_MSG, |
| 683 | 0, 0, 0, 0, 0); | 676 | 0, 0, 0, 0); |
| 684 | l_ptr->fsm_msg_cnt++; | 677 | l_ptr->fsm_msg_cnt++; |
| 685 | link_set_timer(l_ptr, cont_intv); | 678 | link_set_timer(l_ptr, cont_intv); |
| 686 | break; | 679 | break; |
| @@ -693,101 +686,65 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event) | |||
| 693 | } | 686 | } |
| 694 | } | 687 | } |
| 695 | 688 | ||
| 696 | /* tipc_link_cong: determine return value and how to treat the | ||
| 697 | * sent buffer during link congestion. | ||
| 698 | * - For plain, errorless user data messages we keep the buffer and | ||
| 699 | * return -ELINKONG. | ||
| 700 | * - For all other messages we discard the buffer and return -EHOSTUNREACH | ||
| 701 | * - For TIPC internal messages we also reset the link | ||
| 702 | */ | ||
| 703 | static int tipc_link_cong(struct tipc_link *link, struct sk_buff_head *list) | ||
| 704 | { | ||
| 705 | struct sk_buff *skb = skb_peek(list); | ||
| 706 | struct tipc_msg *msg = buf_msg(skb); | ||
| 707 | uint imp = tipc_msg_tot_importance(msg); | ||
| 708 | u32 oport = msg_tot_origport(msg); | ||
| 709 | |||
| 710 | if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) { | ||
| 711 | pr_warn("%s<%s>, send queue full", link_rst_msg, link->name); | ||
| 712 | tipc_link_reset(link); | ||
| 713 | goto drop; | ||
| 714 | } | ||
| 715 | if (unlikely(msg_errcode(msg))) | ||
| 716 | goto drop; | ||
| 717 | if (unlikely(msg_reroute_cnt(msg))) | ||
| 718 | goto drop; | ||
| 719 | if (TIPC_SKB_CB(skb)->wakeup_pending) | ||
| 720 | return -ELINKCONG; | ||
| 721 | if (link_schedule_user(link, oport, skb_queue_len(list), imp)) | ||
| 722 | return -ELINKCONG; | ||
| 723 | drop: | ||
| 724 | __skb_queue_purge(list); | ||
| 725 | return -EHOSTUNREACH; | ||
| 726 | } | ||
| 727 | |||
| 728 | /** | 689 | /** |
| 729 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked | 690 | * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked |
| 730 | * @link: link to use | 691 | * @link: link to use |
| 731 | * @list: chain of buffers containing message | 692 | * @list: chain of buffers containing message |
| 732 | * | 693 | * |
| 733 | * Consumes the buffer chain, except when returning -ELINKCONG | 694 | * Consumes the buffer chain, except when returning -ELINKCONG, |
| 734 | * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket | 695 | * since the caller then may want to make more send attempts. |
| 735 | * user data messages) or -EHOSTUNREACH (all other messages/senders) | 696 | * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS |
| 736 | * Only the socket functions tipc_send_stream() and tipc_send_packet() need | 697 | * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted |
| 737 | * to act on the return value, since they may need to do more send attempts. | ||
| 738 | */ | 698 | */ |
| 739 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, | 699 | int __tipc_link_xmit(struct net *net, struct tipc_link *link, |
| 740 | struct sk_buff_head *list) | 700 | struct sk_buff_head *list) |
| 741 | { | 701 | { |
| 742 | struct tipc_msg *msg = buf_msg(skb_peek(list)); | 702 | struct tipc_msg *msg = buf_msg(skb_peek(list)); |
| 743 | uint psz = msg_size(msg); | 703 | unsigned int maxwin = link->window; |
| 744 | uint sndlim = link->queue_limit[0]; | 704 | unsigned int imp = msg_importance(msg); |
| 745 | uint imp = tipc_msg_tot_importance(msg); | 705 | uint mtu = link->mtu; |
| 746 | uint mtu = link->max_pkt; | ||
| 747 | uint ack = mod(link->next_in_no - 1); | 706 | uint ack = mod(link->next_in_no - 1); |
| 748 | uint seqno = link->next_out_no; | 707 | uint seqno = link->next_out_no; |
| 749 | uint bc_last_in = link->owner->bclink.last_in; | 708 | uint bc_last_in = link->owner->bclink.last_in; |
| 750 | struct tipc_media_addr *addr = &link->media_addr; | 709 | struct tipc_media_addr *addr = &link->media_addr; |
| 751 | struct sk_buff_head *outqueue = &link->outqueue; | 710 | struct sk_buff_head *transmq = &link->transmq; |
| 711 | struct sk_buff_head *backlogq = &link->backlogq; | ||
| 752 | struct sk_buff *skb, *tmp; | 712 | struct sk_buff *skb, *tmp; |
| 753 | 713 | ||
| 754 | /* Match queue limits against msg importance: */ | 714 | /* Match backlog limit against msg importance: */ |
| 755 | if (unlikely(skb_queue_len(outqueue) >= link->queue_limit[imp])) | 715 | if (unlikely(link->backlog[imp].len >= link->backlog[imp].limit)) |
| 756 | return tipc_link_cong(link, list); | 716 | return link_schedule_user(link, list); |
| 757 | 717 | ||
| 758 | /* Has valid packet limit been used ? */ | 718 | if (unlikely(msg_size(msg) > mtu)) { |
| 759 | if (unlikely(psz > mtu)) { | ||
| 760 | __skb_queue_purge(list); | 719 | __skb_queue_purge(list); |
| 761 | return -EMSGSIZE; | 720 | return -EMSGSIZE; |
| 762 | } | 721 | } |
| 763 | 722 | /* Prepare each packet for sending, and add to relevant queue: */ | |
| 764 | /* Prepare each packet for sending, and add to outqueue: */ | ||
| 765 | skb_queue_walk_safe(list, skb, tmp) { | 723 | skb_queue_walk_safe(list, skb, tmp) { |
| 766 | __skb_unlink(skb, list); | 724 | __skb_unlink(skb, list); |
| 767 | msg = buf_msg(skb); | 725 | msg = buf_msg(skb); |
| 768 | msg_set_word(msg, 2, ((ack << 16) | mod(seqno))); | 726 | msg_set_seqno(msg, seqno); |
| 727 | msg_set_ack(msg, ack); | ||
| 769 | msg_set_bcast_ack(msg, bc_last_in); | 728 | msg_set_bcast_ack(msg, bc_last_in); |
| 770 | 729 | ||
| 771 | if (skb_queue_len(outqueue) < sndlim) { | 730 | if (likely(skb_queue_len(transmq) < maxwin)) { |
| 772 | __skb_queue_tail(outqueue, skb); | 731 | __skb_queue_tail(transmq, skb); |
| 773 | tipc_bearer_send(net, link->bearer_id, | 732 | tipc_bearer_send(net, link->bearer_id, skb, addr); |
| 774 | skb, addr); | 733 | link->rcv_unacked = 0; |
| 775 | link->next_out = NULL; | 734 | seqno++; |
| 776 | link->unacked_window = 0; | 735 | continue; |
| 777 | } else if (tipc_msg_bundle(outqueue, skb, mtu)) { | 736 | } |
| 737 | if (tipc_msg_bundle(skb_peek_tail(backlogq), skb, mtu)) { | ||
| 778 | link->stats.sent_bundled++; | 738 | link->stats.sent_bundled++; |
| 779 | continue; | 739 | continue; |
| 780 | } else if (tipc_msg_make_bundle(outqueue, skb, mtu, | 740 | } |
| 781 | link->addr)) { | 741 | if (tipc_msg_make_bundle(&skb, mtu, link->addr)) { |
| 782 | link->stats.sent_bundled++; | 742 | link->stats.sent_bundled++; |
| 783 | link->stats.sent_bundles++; | 743 | link->stats.sent_bundles++; |
| 784 | if (!link->next_out) | 744 | imp = msg_importance(buf_msg(skb)); |
| 785 | link->next_out = skb_peek_tail(outqueue); | ||
| 786 | } else { | ||
| 787 | __skb_queue_tail(outqueue, skb); | ||
| 788 | if (!link->next_out) | ||
| 789 | link->next_out = skb; | ||
| 790 | } | 745 | } |
| 746 | __skb_queue_tail(backlogq, skb); | ||
| 747 | link->backlog[imp].len++; | ||
| 791 | seqno++; | 748 | seqno++; |
| 792 | } | 749 | } |
| 793 | link->next_out_no = seqno; | 750 | link->next_out_no = seqno; |
| @@ -808,13 +765,25 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb) | |||
| 808 | return __tipc_link_xmit(link->owner->net, link, &head); | 765 | return __tipc_link_xmit(link->owner->net, link, &head); |
| 809 | } | 766 | } |
| 810 | 767 | ||
| 768 | /* tipc_link_xmit_skb(): send single buffer to destination | ||
| 769 | * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE | ||
| 770 | * messages, which will not be rejected | ||
| 771 | * The only exception is datagram messages rerouted after secondary | ||
| 772 | * lookup, which are rare and safe to dispose of anyway. | ||
| 773 | * TODO: Return real return value, and let callers use | ||
| 774 | * tipc_wait_for_sendpkt() where applicable | ||
| 775 | */ | ||
| 811 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, | 776 | int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode, |
| 812 | u32 selector) | 777 | u32 selector) |
| 813 | { | 778 | { |
| 814 | struct sk_buff_head head; | 779 | struct sk_buff_head head; |
| 780 | int rc; | ||
| 815 | 781 | ||
| 816 | skb2list(skb, &head); | 782 | skb2list(skb, &head); |
| 817 | return tipc_link_xmit(net, &head, dnode, selector); | 783 | rc = tipc_link_xmit(net, &head, dnode, selector); |
| 784 | if (rc == -ELINKCONG) | ||
| 785 | kfree_skb(skb); | ||
| 786 | return 0; | ||
| 818 | } | 787 | } |
| 819 | 788 | ||
| 820 | /** | 789 | /** |
| @@ -841,12 +810,15 @@ int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode, | |||
| 841 | if (link) | 810 | if (link) |
| 842 | rc = __tipc_link_xmit(net, link, list); | 811 | rc = __tipc_link_xmit(net, link, list); |
| 843 | tipc_node_unlock(node); | 812 | tipc_node_unlock(node); |
| 813 | tipc_node_put(node); | ||
| 844 | } | 814 | } |
| 845 | if (link) | 815 | if (link) |
| 846 | return rc; | 816 | return rc; |
| 847 | 817 | ||
| 848 | if (likely(in_own_node(net, dnode))) | 818 | if (likely(in_own_node(net, dnode))) { |
| 849 | return tipc_sk_rcv(net, list); | 819 | tipc_sk_rcv(net, list); |
| 820 | return 0; | ||
| 821 | } | ||
| 850 | 822 | ||
| 851 | __skb_queue_purge(list); | 823 | __skb_queue_purge(list); |
| 852 | return rc; | 824 | return rc; |
| @@ -893,14 +865,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf) | |||
| 893 | kfree_skb(buf); | 865 | kfree_skb(buf); |
| 894 | } | 866 | } |
| 895 | 867 | ||
| 896 | struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | ||
| 897 | const struct sk_buff *skb) | ||
| 898 | { | ||
| 899 | if (skb_queue_is_last(list, skb)) | ||
| 900 | return NULL; | ||
| 901 | return skb->next; | ||
| 902 | } | ||
| 903 | |||
| 904 | /* | 868 | /* |
| 905 | * tipc_link_push_packets - push unsent packets to bearer | 869 | * tipc_link_push_packets - push unsent packets to bearer |
| 906 | * | 870 | * |
| @@ -909,30 +873,24 @@ struct sk_buff *tipc_skb_queue_next(const struct sk_buff_head *list, | |||
| 909 | * | 873 | * |
| 910 | * Called with node locked | 874 | * Called with node locked |
| 911 | */ | 875 | */ |
| 912 | void tipc_link_push_packets(struct tipc_link *l_ptr) | 876 | void tipc_link_push_packets(struct tipc_link *link) |
| 913 | { | 877 | { |
| 914 | struct sk_buff_head *outqueue = &l_ptr->outqueue; | 878 | struct sk_buff *skb; |
| 915 | struct sk_buff *skb = l_ptr->next_out; | ||
| 916 | struct tipc_msg *msg; | 879 | struct tipc_msg *msg; |
| 917 | u32 next, first; | 880 | unsigned int ack = mod(link->next_in_no - 1); |
| 918 | 881 | ||
| 919 | skb_queue_walk_from(outqueue, skb) { | 882 | while (skb_queue_len(&link->transmq) < link->window) { |
| 920 | msg = buf_msg(skb); | 883 | skb = __skb_dequeue(&link->backlogq); |
| 921 | next = msg_seqno(msg); | 884 | if (!skb) |
| 922 | first = buf_seqno(skb_peek(outqueue)); | ||
| 923 | |||
| 924 | if (mod(next - first) < l_ptr->queue_limit[0]) { | ||
| 925 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | ||
| 926 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | ||
| 927 | if (msg_user(msg) == MSG_BUNDLER) | ||
| 928 | TIPC_SKB_CB(skb)->bundling = false; | ||
| 929 | tipc_bearer_send(l_ptr->owner->net, | ||
| 930 | l_ptr->bearer_id, skb, | ||
| 931 | &l_ptr->media_addr); | ||
| 932 | l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); | ||
| 933 | } else { | ||
| 934 | break; | 885 | break; |
| 935 | } | 886 | msg = buf_msg(skb); |
| 887 | link->backlog[msg_importance(msg)].len--; | ||
| 888 | msg_set_ack(msg, ack); | ||
| 889 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); | ||
| 890 | link->rcv_unacked = 0; | ||
| 891 | __skb_queue_tail(&link->transmq, skb); | ||
| 892 | tipc_bearer_send(link->owner->net, link->bearer_id, | ||
| 893 | skb, &link->media_addr); | ||
| 936 | } | 894 | } |
| 937 | } | 895 | } |
| 938 | 896 | ||
| @@ -979,7 +937,6 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
| 979 | (unsigned long) TIPC_SKB_CB(buf)->handle); | 937 | (unsigned long) TIPC_SKB_CB(buf)->handle); |
| 980 | 938 | ||
| 981 | n_ptr = tipc_bclink_retransmit_to(net); | 939 | n_ptr = tipc_bclink_retransmit_to(net); |
| 982 | tipc_node_lock(n_ptr); | ||
| 983 | 940 | ||
| 984 | tipc_addr_string_fill(addr_string, n_ptr->addr); | 941 | tipc_addr_string_fill(addr_string, n_ptr->addr); |
| 985 | pr_info("Broadcast link info for %s\n", addr_string); | 942 | pr_info("Broadcast link info for %s\n", addr_string); |
| @@ -991,9 +948,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr, | |||
| 991 | n_ptr->bclink.oos_state, | 948 | n_ptr->bclink.oos_state, |
| 992 | n_ptr->bclink.last_sent); | 949 | n_ptr->bclink.last_sent); |
| 993 | 950 | ||
| 994 | tipc_node_unlock(n_ptr); | 951 | n_ptr->action_flags |= TIPC_BCAST_RESET; |
| 995 | |||
| 996 | tipc_bclink_set_flags(net, TIPC_BCLINK_RESET); | ||
| 997 | l_ptr->stale_count = 0; | 952 | l_ptr->stale_count = 0; |
| 998 | } | 953 | } |
| 999 | } | 954 | } |
| @@ -1019,8 +974,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
| 1019 | l_ptr->stale_count = 1; | 974 | l_ptr->stale_count = 1; |
| 1020 | } | 975 | } |
| 1021 | 976 | ||
| 1022 | skb_queue_walk_from(&l_ptr->outqueue, skb) { | 977 | skb_queue_walk_from(&l_ptr->transmq, skb) { |
| 1023 | if (!retransmits || skb == l_ptr->next_out) | 978 | if (!retransmits) |
| 1024 | break; | 979 | break; |
| 1025 | msg = buf_msg(skb); | 980 | msg = buf_msg(skb); |
| 1026 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 981 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
| @@ -1032,72 +987,43 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb, | |||
| 1032 | } | 987 | } |
| 1033 | } | 988 | } |
| 1034 | 989 | ||
| 1035 | static void link_retrieve_defq(struct tipc_link *link, | 990 | /* link_synch(): check if all packets arrived before the synch |
| 1036 | struct sk_buff_head *list) | 991 | * point have been consumed |
| 1037 | { | 992 | * Returns true if the parallel links are synched, otherwise false |
| 1038 | u32 seq_no; | ||
| 1039 | |||
| 1040 | if (skb_queue_empty(&link->deferred_queue)) | ||
| 1041 | return; | ||
| 1042 | |||
| 1043 | seq_no = buf_seqno(skb_peek(&link->deferred_queue)); | ||
| 1044 | if (seq_no == mod(link->next_in_no)) | ||
| 1045 | skb_queue_splice_tail_init(&link->deferred_queue, list); | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | /** | ||
| 1049 | * link_recv_buf_validate - validate basic format of received message | ||
| 1050 | * | ||
| 1051 | * This routine ensures a TIPC message has an acceptable header, and at least | ||
| 1052 | * as much data as the header indicates it should. The routine also ensures | ||
| 1053 | * that the entire message header is stored in the main fragment of the message | ||
| 1054 | * buffer, to simplify future access to message header fields. | ||
| 1055 | * | ||
| 1056 | * Note: Having extra info present in the message header or data areas is OK. | ||
| 1057 | * TIPC will ignore the excess, under the assumption that it is optional info | ||
| 1058 | * introduced by a later release of the protocol. | ||
| 1059 | */ | 993 | */ |
| 1060 | static int link_recv_buf_validate(struct sk_buff *buf) | 994 | static bool link_synch(struct tipc_link *l) |
| 1061 | { | 995 | { |
| 1062 | static u32 min_data_hdr_size[8] = { | 996 | unsigned int post_synch; |
| 1063 | SHORT_H_SIZE, MCAST_H_SIZE, NAMED_H_SIZE, BASIC_H_SIZE, | 997 | struct tipc_link *pl; |
| 1064 | MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE, MAX_H_SIZE | ||
| 1065 | }; | ||
| 1066 | 998 | ||
| 1067 | struct tipc_msg *msg; | 999 | pl = tipc_parallel_link(l); |
| 1068 | u32 tipc_hdr[2]; | 1000 | if (pl == l) |
| 1069 | u32 size; | 1001 | goto synched; |
| 1070 | u32 hdr_size; | ||
| 1071 | u32 min_hdr_size; | ||
| 1072 | 1002 | ||
| 1073 | /* If this packet comes from the defer queue, the skb has already | 1003 | /* Was last pre-synch packet added to input queue ? */ |
| 1074 | * been validated | 1004 | if (less_eq(pl->next_in_no, l->synch_point)) |
| 1075 | */ | 1005 | return false; |
| 1076 | if (unlikely(TIPC_SKB_CB(buf)->deferred)) | ||
| 1077 | return 1; | ||
| 1078 | |||
| 1079 | if (unlikely(buf->len < MIN_H_SIZE)) | ||
| 1080 | return 0; | ||
| 1081 | |||
| 1082 | msg = skb_header_pointer(buf, 0, sizeof(tipc_hdr), tipc_hdr); | ||
| 1083 | if (msg == NULL) | ||
| 1084 | return 0; | ||
| 1085 | 1006 | ||
| 1086 | if (unlikely(msg_version(msg) != TIPC_VERSION)) | 1007 | /* Is it still in the input queue ? */ |
| 1087 | return 0; | 1008 | post_synch = mod(pl->next_in_no - l->synch_point) - 1; |
| 1009 | if (skb_queue_len(&pl->inputq) > post_synch) | ||
| 1010 | return false; | ||
| 1011 | synched: | ||
| 1012 | l->flags &= ~LINK_SYNCHING; | ||
| 1013 | return true; | ||
| 1014 | } | ||
| 1088 | 1015 | ||
| 1089 | size = msg_size(msg); | 1016 | static void link_retrieve_defq(struct tipc_link *link, |
| 1090 | hdr_size = msg_hdr_sz(msg); | 1017 | struct sk_buff_head *list) |
| 1091 | min_hdr_size = msg_isdata(msg) ? | 1018 | { |
| 1092 | min_data_hdr_size[msg_type(msg)] : INT_H_SIZE; | 1019 | u32 seq_no; |
| 1093 | 1020 | ||
| 1094 | if (unlikely((hdr_size < min_hdr_size) || | 1021 | if (skb_queue_empty(&link->deferdq)) |
| 1095 | (size < hdr_size) || | 1022 | return; |
| 1096 | (buf->len < size) || | ||
| 1097 | (size - hdr_size > TIPC_MAX_USER_MSG_SIZE))) | ||
| 1098 | return 0; | ||
| 1099 | 1023 | ||
| 1100 | return pskb_may_pull(buf, hdr_size); | 1024 | seq_no = buf_seqno(skb_peek(&link->deferdq)); |
| 1025 | if (seq_no == mod(link->next_in_no)) | ||
| 1026 | skb_queue_splice_tail_init(&link->deferdq, list); | ||
| 1101 | } | 1027 | } |
| 1102 | 1028 | ||
| 1103 | /** | 1029 | /** |
| @@ -1125,16 +1051,11 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1125 | 1051 | ||
| 1126 | while ((skb = __skb_dequeue(&head))) { | 1052 | while ((skb = __skb_dequeue(&head))) { |
| 1127 | /* Ensure message is well-formed */ | 1053 | /* Ensure message is well-formed */ |
| 1128 | if (unlikely(!link_recv_buf_validate(skb))) | 1054 | if (unlikely(!tipc_msg_validate(skb))) |
| 1129 | goto discard; | ||
| 1130 | |||
| 1131 | /* Ensure message data is a single contiguous unit */ | ||
| 1132 | if (unlikely(skb_linearize(skb))) | ||
| 1133 | goto discard; | 1055 | goto discard; |
| 1134 | 1056 | ||
| 1135 | /* Handle arrival of a non-unicast link message */ | 1057 | /* Handle arrival of a non-unicast link message */ |
| 1136 | msg = buf_msg(skb); | 1058 | msg = buf_msg(skb); |
| 1137 | |||
| 1138 | if (unlikely(msg_non_seq(msg))) { | 1059 | if (unlikely(msg_non_seq(msg))) { |
| 1139 | if (msg_user(msg) == LINK_CONFIG) | 1060 | if (msg_user(msg) == LINK_CONFIG) |
| 1140 | tipc_disc_rcv(net, skb, b_ptr); | 1061 | tipc_disc_rcv(net, skb, b_ptr); |
| @@ -1152,8 +1073,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1152 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); | 1073 | n_ptr = tipc_node_find(net, msg_prevnode(msg)); |
| 1153 | if (unlikely(!n_ptr)) | 1074 | if (unlikely(!n_ptr)) |
| 1154 | goto discard; | 1075 | goto discard; |
| 1155 | tipc_node_lock(n_ptr); | ||
| 1156 | 1076 | ||
| 1077 | tipc_node_lock(n_ptr); | ||
| 1157 | /* Locate unicast link endpoint that should handle message */ | 1078 | /* Locate unicast link endpoint that should handle message */ |
| 1158 | l_ptr = n_ptr->links[b_ptr->identity]; | 1079 | l_ptr = n_ptr->links[b_ptr->identity]; |
| 1159 | if (unlikely(!l_ptr)) | 1080 | if (unlikely(!l_ptr)) |
| @@ -1175,21 +1096,20 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1175 | ackd = msg_ack(msg); | 1096 | ackd = msg_ack(msg); |
| 1176 | 1097 | ||
| 1177 | /* Release acked messages */ | 1098 | /* Release acked messages */ |
| 1178 | if (n_ptr->bclink.recv_permitted) | 1099 | if (unlikely(n_ptr->bclink.acked != msg_bcast_ack(msg))) |
| 1179 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); | 1100 | tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg)); |
| 1180 | 1101 | ||
| 1181 | released = 0; | 1102 | released = 0; |
| 1182 | skb_queue_walk_safe(&l_ptr->outqueue, skb1, tmp) { | 1103 | skb_queue_walk_safe(&l_ptr->transmq, skb1, tmp) { |
| 1183 | if (skb1 == l_ptr->next_out || | 1104 | if (more(buf_seqno(skb1), ackd)) |
| 1184 | more(buf_seqno(skb1), ackd)) | ||
| 1185 | break; | 1105 | break; |
| 1186 | __skb_unlink(skb1, &l_ptr->outqueue); | 1106 | __skb_unlink(skb1, &l_ptr->transmq); |
| 1187 | kfree_skb(skb1); | 1107 | kfree_skb(skb1); |
| 1188 | released = 1; | 1108 | released = 1; |
| 1189 | } | 1109 | } |
| 1190 | 1110 | ||
| 1191 | /* Try sending any messages link endpoint has pending */ | 1111 | /* Try sending any messages link endpoint has pending */ |
| 1192 | if (unlikely(l_ptr->next_out)) | 1112 | if (unlikely(skb_queue_len(&l_ptr->backlogq))) |
| 1193 | tipc_link_push_packets(l_ptr); | 1113 | tipc_link_push_packets(l_ptr); |
| 1194 | 1114 | ||
| 1195 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) | 1115 | if (released && !skb_queue_empty(&l_ptr->wakeupq)) |
| @@ -1223,18 +1143,26 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr) | |||
| 1223 | skb = NULL; | 1143 | skb = NULL; |
| 1224 | goto unlock; | 1144 | goto unlock; |
| 1225 | } | 1145 | } |
| 1146 | /* Synchronize with parallel link if applicable */ | ||
| 1147 | if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) { | ||
| 1148 | link_handle_out_of_seq_msg(l_ptr, skb); | ||
| 1149 | if (link_synch(l_ptr)) | ||
| 1150 | link_retrieve_defq(l_ptr, &head); | ||
| 1151 | skb = NULL; | ||
| 1152 | goto unlock; | ||
| 1153 | } | ||
| 1226 | l_ptr->next_in_no++; | 1154 | l_ptr->next_in_no++; |
| 1227 | if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) | 1155 | if (unlikely(!skb_queue_empty(&l_ptr->deferdq))) |
| 1228 | link_retrieve_defq(l_ptr, &head); | 1156 | link_retrieve_defq(l_ptr, &head); |
| 1229 | 1157 | if (unlikely(++l_ptr->rcv_unacked >= TIPC_MIN_LINK_WIN)) { | |
| 1230 | if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) { | ||
| 1231 | l_ptr->stats.sent_acks++; | 1158 | l_ptr->stats.sent_acks++; |
| 1232 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | 1159 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
| 1233 | } | 1160 | } |
| 1234 | tipc_link_input(l_ptr, skb); | 1161 | tipc_link_input(l_ptr, skb); |
| 1235 | skb = NULL; | 1162 | skb = NULL; |
| 1236 | unlock: | 1163 | unlock: |
| 1237 | tipc_node_unlock(n_ptr); | 1164 | tipc_node_unlock(n_ptr); |
| 1165 | tipc_node_put(n_ptr); | ||
| 1238 | discard: | 1166 | discard: |
| 1239 | if (unlikely(skb)) | 1167 | if (unlikely(skb)) |
| 1240 | kfree_skb(skb); | 1168 | kfree_skb(skb); |
| @@ -1271,7 +1199,7 @@ static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb) | |||
| 1271 | node->action_flags |= TIPC_NAMED_MSG_EVT; | 1199 | node->action_flags |= TIPC_NAMED_MSG_EVT; |
| 1272 | return true; | 1200 | return true; |
| 1273 | case MSG_BUNDLER: | 1201 | case MSG_BUNDLER: |
| 1274 | case CHANGEOVER_PROTOCOL: | 1202 | case TUNNEL_PROTOCOL: |
| 1275 | case MSG_FRAGMENTER: | 1203 | case MSG_FRAGMENTER: |
| 1276 | case BCAST_PROTOCOL: | 1204 | case BCAST_PROTOCOL: |
| 1277 | return false; | 1205 | return false; |
| @@ -1298,8 +1226,14 @@ static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb) | |||
| 1298 | return; | 1226 | return; |
| 1299 | 1227 | ||
| 1300 | switch (msg_user(msg)) { | 1228 | switch (msg_user(msg)) { |
| 1301 | case CHANGEOVER_PROTOCOL: | 1229 | case TUNNEL_PROTOCOL: |
| 1302 | if (!tipc_link_tunnel_rcv(node, &skb)) | 1230 | if (msg_dup(msg)) { |
| 1231 | link->flags |= LINK_SYNCHING; | ||
| 1232 | link->synch_point = msg_seqno(msg_get_wrapped(msg)); | ||
| 1233 | kfree_skb(skb); | ||
| 1234 | break; | ||
| 1235 | } | ||
| 1236 | if (!tipc_link_failover_rcv(link, &skb)) | ||
| 1303 | break; | 1237 | break; |
| 1304 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { | 1238 | if (msg_user(buf_msg(skb)) != MSG_BUNDLER) { |
| 1305 | tipc_data_input(link, skb); | 1239 | tipc_data_input(link, skb); |
| @@ -1394,11 +1328,10 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
| 1394 | return; | 1328 | return; |
| 1395 | } | 1329 | } |
| 1396 | 1330 | ||
| 1397 | if (tipc_link_defer_pkt(&l_ptr->deferred_queue, buf)) { | 1331 | if (tipc_link_defer_pkt(&l_ptr->deferdq, buf)) { |
| 1398 | l_ptr->stats.deferred_recv++; | 1332 | l_ptr->stats.deferred_recv++; |
| 1399 | TIPC_SKB_CB(buf)->deferred = true; | 1333 | if ((skb_queue_len(&l_ptr->deferdq) % TIPC_MIN_LINK_WIN) == 1) |
| 1400 | if ((skb_queue_len(&l_ptr->deferred_queue) % 16) == 1) | 1334 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0); |
| 1401 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); | ||
| 1402 | } else { | 1335 | } else { |
| 1403 | l_ptr->stats.duplicates++; | 1336 | l_ptr->stats.duplicates++; |
| 1404 | } | 1337 | } |
| @@ -1408,15 +1341,15 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, | |||
| 1408 | * Send protocol message to the other endpoint. | 1341 | * Send protocol message to the other endpoint. |
| 1409 | */ | 1342 | */ |
| 1410 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | 1343 | void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, |
| 1411 | u32 gap, u32 tolerance, u32 priority, u32 ack_mtu) | 1344 | u32 gap, u32 tolerance, u32 priority) |
| 1412 | { | 1345 | { |
| 1413 | struct sk_buff *buf = NULL; | 1346 | struct sk_buff *buf = NULL; |
| 1414 | struct tipc_msg *msg = l_ptr->pmsg; | 1347 | struct tipc_msg *msg = l_ptr->pmsg; |
| 1415 | u32 msg_size = sizeof(l_ptr->proto_msg); | 1348 | u32 msg_size = sizeof(l_ptr->proto_msg); |
| 1416 | int r_flag; | 1349 | int r_flag; |
| 1417 | 1350 | ||
| 1418 | /* Don't send protocol message during link changeover */ | 1351 | /* Don't send protocol message during link failover */ |
| 1419 | if (l_ptr->exp_msg_count) | 1352 | if (l_ptr->flags & LINK_FAILINGOVER) |
| 1420 | return; | 1353 | return; |
| 1421 | 1354 | ||
| 1422 | /* Abort non-RESET send if communication with node is prohibited */ | 1355 | /* Abort non-RESET send if communication with node is prohibited */ |
| @@ -1434,11 +1367,11 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1434 | 1367 | ||
| 1435 | if (!tipc_link_is_up(l_ptr)) | 1368 | if (!tipc_link_is_up(l_ptr)) |
| 1436 | return; | 1369 | return; |
| 1437 | if (l_ptr->next_out) | 1370 | if (skb_queue_len(&l_ptr->backlogq)) |
| 1438 | next_sent = buf_seqno(l_ptr->next_out); | 1371 | next_sent = buf_seqno(skb_peek(&l_ptr->backlogq)); |
| 1439 | msg_set_next_sent(msg, next_sent); | 1372 | msg_set_next_sent(msg, next_sent); |
| 1440 | if (!skb_queue_empty(&l_ptr->deferred_queue)) { | 1373 | if (!skb_queue_empty(&l_ptr->deferdq)) { |
| 1441 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferred_queue)); | 1374 | u32 rec = buf_seqno(skb_peek(&l_ptr->deferdq)); |
| 1442 | gap = mod(rec - mod(l_ptr->next_in_no)); | 1375 | gap = mod(rec - mod(l_ptr->next_in_no)); |
| 1443 | } | 1376 | } |
| 1444 | msg_set_seq_gap(msg, gap); | 1377 | msg_set_seq_gap(msg, gap); |
| @@ -1446,35 +1379,20 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1446 | l_ptr->stats.sent_nacks++; | 1379 | l_ptr->stats.sent_nacks++; |
| 1447 | msg_set_link_tolerance(msg, tolerance); | 1380 | msg_set_link_tolerance(msg, tolerance); |
| 1448 | msg_set_linkprio(msg, priority); | 1381 | msg_set_linkprio(msg, priority); |
| 1449 | msg_set_max_pkt(msg, ack_mtu); | 1382 | msg_set_max_pkt(msg, l_ptr->mtu); |
| 1450 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); | 1383 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); |
| 1451 | msg_set_probe(msg, probe_msg != 0); | 1384 | msg_set_probe(msg, probe_msg != 0); |
| 1452 | if (probe_msg) { | 1385 | if (probe_msg) |
| 1453 | u32 mtu = l_ptr->max_pkt; | ||
| 1454 | |||
| 1455 | if ((mtu < l_ptr->max_pkt_target) && | ||
| 1456 | link_working_working(l_ptr) && | ||
| 1457 | l_ptr->fsm_msg_cnt) { | ||
| 1458 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
| 1459 | if (l_ptr->max_pkt_probes == 10) { | ||
| 1460 | l_ptr->max_pkt_target = (msg_size - 4); | ||
| 1461 | l_ptr->max_pkt_probes = 0; | ||
| 1462 | msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3; | ||
| 1463 | } | ||
| 1464 | l_ptr->max_pkt_probes++; | ||
| 1465 | } | ||
| 1466 | |||
| 1467 | l_ptr->stats.sent_probes++; | 1386 | l_ptr->stats.sent_probes++; |
| 1468 | } | ||
| 1469 | l_ptr->stats.sent_states++; | 1387 | l_ptr->stats.sent_states++; |
| 1470 | } else { /* RESET_MSG or ACTIVATE_MSG */ | 1388 | } else { /* RESET_MSG or ACTIVATE_MSG */ |
| 1471 | msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1)); | 1389 | msg_set_ack(msg, mod(l_ptr->failover_checkpt - 1)); |
| 1472 | msg_set_seq_gap(msg, 0); | 1390 | msg_set_seq_gap(msg, 0); |
| 1473 | msg_set_next_sent(msg, 1); | 1391 | msg_set_next_sent(msg, 1); |
| 1474 | msg_set_probe(msg, 0); | 1392 | msg_set_probe(msg, 0); |
| 1475 | msg_set_link_tolerance(msg, l_ptr->tolerance); | 1393 | msg_set_link_tolerance(msg, l_ptr->tolerance); |
| 1476 | msg_set_linkprio(msg, l_ptr->priority); | 1394 | msg_set_linkprio(msg, l_ptr->priority); |
| 1477 | msg_set_max_pkt(msg, l_ptr->max_pkt_target); | 1395 | msg_set_max_pkt(msg, l_ptr->advertised_mtu); |
| 1478 | } | 1396 | } |
| 1479 | 1397 | ||
| 1480 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); | 1398 | r_flag = (l_ptr->owner->working_links > tipc_link_is_up(l_ptr)); |
| @@ -1490,10 +1408,9 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg, | |||
| 1490 | 1408 | ||
| 1491 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); | 1409 | skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); |
| 1492 | buf->priority = TC_PRIO_CONTROL; | 1410 | buf->priority = TC_PRIO_CONTROL; |
| 1493 | |||
| 1494 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, | 1411 | tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf, |
| 1495 | &l_ptr->media_addr); | 1412 | &l_ptr->media_addr); |
| 1496 | l_ptr->unacked_window = 0; | 1413 | l_ptr->rcv_unacked = 0; |
| 1497 | kfree_skb(buf); | 1414 | kfree_skb(buf); |
| 1498 | } | 1415 | } |
| 1499 | 1416 | ||
| @@ -1506,13 +1423,10 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1506 | struct sk_buff *buf) | 1423 | struct sk_buff *buf) |
| 1507 | { | 1424 | { |
| 1508 | u32 rec_gap = 0; | 1425 | u32 rec_gap = 0; |
| 1509 | u32 max_pkt_info; | ||
| 1510 | u32 max_pkt_ack; | ||
| 1511 | u32 msg_tol; | 1426 | u32 msg_tol; |
| 1512 | struct tipc_msg *msg = buf_msg(buf); | 1427 | struct tipc_msg *msg = buf_msg(buf); |
| 1513 | 1428 | ||
| 1514 | /* Discard protocol message during link changeover */ | 1429 | if (l_ptr->flags & LINK_FAILINGOVER) |
| 1515 | if (l_ptr->exp_msg_count) | ||
| 1516 | goto exit; | 1430 | goto exit; |
| 1517 | 1431 | ||
| 1518 | if (l_ptr->net_plane != msg_net_plane(msg)) | 1432 | if (l_ptr->net_plane != msg_net_plane(msg)) |
| @@ -1551,15 +1465,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1551 | if (msg_linkprio(msg) > l_ptr->priority) | 1465 | if (msg_linkprio(msg) > l_ptr->priority) |
| 1552 | l_ptr->priority = msg_linkprio(msg); | 1466 | l_ptr->priority = msg_linkprio(msg); |
| 1553 | 1467 | ||
| 1554 | max_pkt_info = msg_max_pkt(msg); | 1468 | if (l_ptr->mtu > msg_max_pkt(msg)) |
| 1555 | if (max_pkt_info) { | 1469 | l_ptr->mtu = msg_max_pkt(msg); |
| 1556 | if (max_pkt_info < l_ptr->max_pkt_target) | ||
| 1557 | l_ptr->max_pkt_target = max_pkt_info; | ||
| 1558 | if (l_ptr->max_pkt > l_ptr->max_pkt_target) | ||
| 1559 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 1560 | } else { | ||
| 1561 | l_ptr->max_pkt = l_ptr->max_pkt_target; | ||
| 1562 | } | ||
| 1563 | 1470 | ||
| 1564 | /* Synchronize broadcast link info, if not done previously */ | 1471 | /* Synchronize broadcast link info, if not done previously */ |
| 1565 | if (!tipc_node_is_up(l_ptr->owner)) { | 1472 | if (!tipc_node_is_up(l_ptr->owner)) { |
| @@ -1604,18 +1511,8 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1604 | mod(l_ptr->next_in_no)); | 1511 | mod(l_ptr->next_in_no)); |
| 1605 | } | 1512 | } |
| 1606 | 1513 | ||
| 1607 | max_pkt_ack = msg_max_pkt(msg); | 1514 | if (msg_probe(msg)) |
| 1608 | if (max_pkt_ack > l_ptr->max_pkt) { | ||
| 1609 | l_ptr->max_pkt = max_pkt_ack; | ||
| 1610 | l_ptr->max_pkt_probes = 0; | ||
| 1611 | } | ||
| 1612 | |||
| 1613 | max_pkt_ack = 0; | ||
| 1614 | if (msg_probe(msg)) { | ||
| 1615 | l_ptr->stats.recv_probes++; | 1515 | l_ptr->stats.recv_probes++; |
| 1616 | if (msg_size(msg) > sizeof(l_ptr->proto_msg)) | ||
| 1617 | max_pkt_ack = msg_size(msg); | ||
| 1618 | } | ||
| 1619 | 1516 | ||
| 1620 | /* Protocol message before retransmits, reduce loss risk */ | 1517 | /* Protocol message before retransmits, reduce loss risk */ |
| 1621 | if (l_ptr->owner->bclink.recv_permitted) | 1518 | if (l_ptr->owner->bclink.recv_permitted) |
| @@ -1623,12 +1520,12 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, | |||
| 1623 | msg_last_bcast(msg)); | 1520 | msg_last_bcast(msg)); |
| 1624 | 1521 | ||
| 1625 | if (rec_gap || (msg_probe(msg))) { | 1522 | if (rec_gap || (msg_probe(msg))) { |
| 1626 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, rec_gap, 0, | 1523 | tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, |
| 1627 | 0, max_pkt_ack); | 1524 | rec_gap, 0, 0); |
| 1628 | } | 1525 | } |
| 1629 | if (msg_seq_gap(msg)) { | 1526 | if (msg_seq_gap(msg)) { |
| 1630 | l_ptr->stats.recv_nacks++; | 1527 | l_ptr->stats.recv_nacks++; |
| 1631 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->outqueue), | 1528 | tipc_link_retransmit(l_ptr, skb_peek(&l_ptr->transmq), |
| 1632 | msg_seq_gap(msg)); | 1529 | msg_seq_gap(msg)); |
| 1633 | } | 1530 | } |
| 1634 | break; | 1531 | break; |
| @@ -1675,7 +1572,7 @@ static void tipc_link_tunnel_xmit(struct tipc_link *l_ptr, | |||
| 1675 | */ | 1572 | */ |
| 1676 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | 1573 | void tipc_link_failover_send_queue(struct tipc_link *l_ptr) |
| 1677 | { | 1574 | { |
| 1678 | u32 msgcount = skb_queue_len(&l_ptr->outqueue); | 1575 | int msgcount; |
| 1679 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; | 1576 | struct tipc_link *tunnel = l_ptr->owner->active_links[0]; |
| 1680 | struct tipc_msg tunnel_hdr; | 1577 | struct tipc_msg tunnel_hdr; |
| 1681 | struct sk_buff *skb; | 1578 | struct sk_buff *skb; |
| @@ -1684,12 +1581,15 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1684 | if (!tunnel) | 1581 | if (!tunnel) |
| 1685 | return; | 1582 | return; |
| 1686 | 1583 | ||
| 1687 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1584 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, TUNNEL_PROTOCOL, |
| 1688 | ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); | 1585 | FAILOVER_MSG, INT_H_SIZE, l_ptr->addr); |
| 1586 | skb_queue_splice_tail_init(&l_ptr->backlogq, &l_ptr->transmq); | ||
| 1587 | tipc_link_purge_backlog(l_ptr); | ||
| 1588 | msgcount = skb_queue_len(&l_ptr->transmq); | ||
| 1689 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1589 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); |
| 1690 | msg_set_msgcnt(&tunnel_hdr, msgcount); | 1590 | msg_set_msgcnt(&tunnel_hdr, msgcount); |
| 1691 | 1591 | ||
| 1692 | if (skb_queue_empty(&l_ptr->outqueue)) { | 1592 | if (skb_queue_empty(&l_ptr->transmq)) { |
| 1693 | skb = tipc_buf_acquire(INT_H_SIZE); | 1593 | skb = tipc_buf_acquire(INT_H_SIZE); |
| 1694 | if (skb) { | 1594 | if (skb) { |
| 1695 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); | 1595 | skb_copy_to_linear_data(skb, &tunnel_hdr, INT_H_SIZE); |
| @@ -1705,7 +1605,7 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1705 | split_bundles = (l_ptr->owner->active_links[0] != | 1605 | split_bundles = (l_ptr->owner->active_links[0] != |
| 1706 | l_ptr->owner->active_links[1]); | 1606 | l_ptr->owner->active_links[1]); |
| 1707 | 1607 | ||
| 1708 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1608 | skb_queue_walk(&l_ptr->transmq, skb) { |
| 1709 | struct tipc_msg *msg = buf_msg(skb); | 1609 | struct tipc_msg *msg = buf_msg(skb); |
| 1710 | 1610 | ||
| 1711 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { | 1611 | if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) { |
| @@ -1736,157 +1636,105 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr) | |||
| 1736 | * and sequence order is preserved per sender/receiver socket pair. | 1636 | * and sequence order is preserved per sender/receiver socket pair. |
| 1737 | * Owner node is locked. | 1637 | * Owner node is locked. |
| 1738 | */ | 1638 | */ |
| 1739 | void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, | 1639 | void tipc_link_dup_queue_xmit(struct tipc_link *link, |
| 1740 | struct tipc_link *tunnel) | 1640 | struct tipc_link *tnl) |
| 1741 | { | 1641 | { |
| 1742 | struct sk_buff *skb; | 1642 | struct sk_buff *skb; |
| 1743 | struct tipc_msg tunnel_hdr; | 1643 | struct tipc_msg tnl_hdr; |
| 1744 | 1644 | struct sk_buff_head *queue = &link->transmq; | |
| 1745 | tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL, | 1645 | int mcnt; |
| 1746 | DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); | 1646 | |
| 1747 | msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); | 1647 | tipc_msg_init(link_own_addr(link), &tnl_hdr, TUNNEL_PROTOCOL, |
| 1748 | msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); | 1648 | SYNCH_MSG, INT_H_SIZE, link->addr); |
| 1749 | skb_queue_walk(&l_ptr->outqueue, skb) { | 1649 | mcnt = skb_queue_len(&link->transmq) + skb_queue_len(&link->backlogq); |
| 1650 | msg_set_msgcnt(&tnl_hdr, mcnt); | ||
| 1651 | msg_set_bearer_id(&tnl_hdr, link->peer_bearer_id); | ||
| 1652 | |||
| 1653 | tunnel_queue: | ||
| 1654 | skb_queue_walk(queue, skb) { | ||
| 1750 | struct sk_buff *outskb; | 1655 | struct sk_buff *outskb; |
| 1751 | struct tipc_msg *msg = buf_msg(skb); | 1656 | struct tipc_msg *msg = buf_msg(skb); |
| 1752 | u32 length = msg_size(msg); | 1657 | u32 len = msg_size(msg); |
| 1753 | 1658 | ||
| 1754 | if (msg_user(msg) == MSG_BUNDLER) | 1659 | msg_set_ack(msg, mod(link->next_in_no - 1)); |
| 1755 | msg_set_type(msg, CLOSED_MSG); | 1660 | msg_set_bcast_ack(msg, link->owner->bclink.last_in); |
| 1756 | msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */ | 1661 | msg_set_size(&tnl_hdr, len + INT_H_SIZE); |
| 1757 | msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); | 1662 | outskb = tipc_buf_acquire(len + INT_H_SIZE); |
| 1758 | msg_set_size(&tunnel_hdr, length + INT_H_SIZE); | ||
| 1759 | outskb = tipc_buf_acquire(length + INT_H_SIZE); | ||
| 1760 | if (outskb == NULL) { | 1663 | if (outskb == NULL) { |
| 1761 | pr_warn("%sunable to send duplicate msg\n", | 1664 | pr_warn("%sunable to send duplicate msg\n", |
| 1762 | link_co_err); | 1665 | link_co_err); |
| 1763 | return; | 1666 | return; |
| 1764 | } | 1667 | } |
| 1765 | skb_copy_to_linear_data(outskb, &tunnel_hdr, INT_H_SIZE); | 1668 | skb_copy_to_linear_data(outskb, &tnl_hdr, INT_H_SIZE); |
| 1766 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, skb->data, | 1669 | skb_copy_to_linear_data_offset(outskb, INT_H_SIZE, |
| 1767 | length); | 1670 | skb->data, len); |
| 1768 | __tipc_link_xmit_skb(tunnel, outskb); | 1671 | __tipc_link_xmit_skb(tnl, outskb); |
| 1769 | if (!tipc_link_is_up(l_ptr)) | 1672 | if (!tipc_link_is_up(link)) |
| 1770 | return; | 1673 | return; |
| 1771 | } | 1674 | } |
| 1772 | } | 1675 | if (queue == &link->backlogq) |
| 1773 | |||
| 1774 | /** | ||
| 1775 | * buf_extract - extracts embedded TIPC message from another message | ||
| 1776 | * @skb: encapsulating message buffer | ||
| 1777 | * @from_pos: offset to extract from | ||
| 1778 | * | ||
| 1779 | * Returns a new message buffer containing an embedded message. The | ||
| 1780 | * encapsulating buffer is left unchanged. | ||
| 1781 | */ | ||
| 1782 | static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) | ||
| 1783 | { | ||
| 1784 | struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos); | ||
| 1785 | u32 size = msg_size(msg); | ||
| 1786 | struct sk_buff *eb; | ||
| 1787 | |||
| 1788 | eb = tipc_buf_acquire(size); | ||
| 1789 | if (eb) | ||
| 1790 | skb_copy_to_linear_data(eb, msg, size); | ||
| 1791 | return eb; | ||
| 1792 | } | ||
| 1793 | |||
| 1794 | /* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. | ||
| 1795 | * Owner node is locked. | ||
| 1796 | */ | ||
| 1797 | static void tipc_link_dup_rcv(struct tipc_link *l_ptr, | ||
| 1798 | struct sk_buff *t_buf) | ||
| 1799 | { | ||
| 1800 | struct sk_buff *buf; | ||
| 1801 | |||
| 1802 | if (!tipc_link_is_up(l_ptr)) | ||
| 1803 | return; | 1676 | return; |
| 1804 | 1677 | queue = &link->backlogq; | |
| 1805 | buf = buf_extract(t_buf, INT_H_SIZE); | 1678 | goto tunnel_queue; |
| 1806 | if (buf == NULL) { | ||
| 1807 | pr_warn("%sfailed to extract inner dup pkt\n", link_co_err); | ||
| 1808 | return; | ||
| 1809 | } | ||
| 1810 | |||
| 1811 | /* Add buffer to deferred queue, if applicable: */ | ||
| 1812 | link_handle_out_of_seq_msg(l_ptr, buf); | ||
| 1813 | } | 1679 | } |
| 1814 | 1680 | ||
| 1815 | /* tipc_link_failover_rcv(): Receive a tunnelled ORIGINAL_MSG packet | 1681 | /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet |
| 1816 | * Owner node is locked. | 1682 | * Owner node is locked. |
| 1817 | */ | 1683 | */ |
| 1818 | static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr, | 1684 | static bool tipc_link_failover_rcv(struct tipc_link *link, |
| 1819 | struct sk_buff *t_buf) | 1685 | struct sk_buff **skb) |
| 1820 | { | 1686 | { |
| 1821 | struct tipc_msg *t_msg = buf_msg(t_buf); | 1687 | struct tipc_msg *msg = buf_msg(*skb); |
| 1822 | struct sk_buff *buf = NULL; | 1688 | struct sk_buff *iskb = NULL; |
| 1823 | struct tipc_msg *msg; | 1689 | struct tipc_link *pl = NULL; |
| 1824 | 1690 | int bearer_id = msg_bearer_id(msg); | |
| 1825 | if (tipc_link_is_up(l_ptr)) | 1691 | int pos = 0; |
| 1826 | tipc_link_reset(l_ptr); | ||
| 1827 | |||
| 1828 | /* First failover packet? */ | ||
| 1829 | if (l_ptr->exp_msg_count == START_CHANGEOVER) | ||
| 1830 | l_ptr->exp_msg_count = msg_msgcnt(t_msg); | ||
| 1831 | |||
| 1832 | /* Should there be an inner packet? */ | ||
| 1833 | if (l_ptr->exp_msg_count) { | ||
| 1834 | l_ptr->exp_msg_count--; | ||
| 1835 | buf = buf_extract(t_buf, INT_H_SIZE); | ||
| 1836 | if (buf == NULL) { | ||
| 1837 | pr_warn("%sno inner failover pkt\n", link_co_err); | ||
| 1838 | goto exit; | ||
| 1839 | } | ||
| 1840 | msg = buf_msg(buf); | ||
| 1841 | 1692 | ||
| 1842 | if (less(msg_seqno(msg), l_ptr->reset_checkpoint)) { | 1693 | if (msg_type(msg) != FAILOVER_MSG) { |
| 1843 | kfree_skb(buf); | 1694 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); |
| 1844 | buf = NULL; | 1695 | goto exit; |
| 1845 | goto exit; | ||
| 1846 | } | ||
| 1847 | if (msg_user(msg) == MSG_FRAGMENTER) { | ||
| 1848 | l_ptr->stats.recv_fragments++; | ||
| 1849 | tipc_buf_append(&l_ptr->reasm_buf, &buf); | ||
| 1850 | } | ||
| 1851 | } | 1696 | } |
| 1852 | exit: | 1697 | if (bearer_id >= MAX_BEARERS) |
| 1853 | if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED)) | 1698 | goto exit; |
| 1854 | tipc_link_delete(l_ptr); | ||
| 1855 | return buf; | ||
| 1856 | } | ||
| 1857 | 1699 | ||
| 1858 | /* tipc_link_tunnel_rcv(): Receive a tunnelled packet, sent | 1700 | if (bearer_id == link->bearer_id) |
| 1859 | * via other link as result of a failover (ORIGINAL_MSG) or | 1701 | goto exit; |
| 1860 | * a new active link (DUPLICATE_MSG). Failover packets are | ||
| 1861 | * returned to the active link for delivery upwards. | ||
| 1862 | * Owner node is locked. | ||
| 1863 | */ | ||
| 1864 | static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, | ||
| 1865 | struct sk_buff **buf) | ||
| 1866 | { | ||
| 1867 | struct sk_buff *t_buf = *buf; | ||
| 1868 | struct tipc_link *l_ptr; | ||
| 1869 | struct tipc_msg *t_msg = buf_msg(t_buf); | ||
| 1870 | u32 bearer_id = msg_bearer_id(t_msg); | ||
| 1871 | 1702 | ||
| 1872 | *buf = NULL; | 1703 | pl = link->owner->links[bearer_id]; |
| 1704 | if (pl && tipc_link_is_up(pl)) | ||
| 1705 | tipc_link_reset(pl); | ||
| 1873 | 1706 | ||
| 1874 | if (bearer_id >= MAX_BEARERS) | 1707 | if (link->failover_pkts == FIRST_FAILOVER) |
| 1708 | link->failover_pkts = msg_msgcnt(msg); | ||
| 1709 | |||
| 1710 | /* Should we expect an inner packet? */ | ||
| 1711 | if (!link->failover_pkts) | ||
| 1875 | goto exit; | 1712 | goto exit; |
| 1876 | 1713 | ||
| 1877 | l_ptr = n_ptr->links[bearer_id]; | 1714 | if (!tipc_msg_extract(*skb, &iskb, &pos)) { |
| 1878 | if (!l_ptr) | 1715 | pr_warn("%sno inner failover pkt\n", link_co_err); |
| 1716 | *skb = NULL; | ||
| 1879 | goto exit; | 1717 | goto exit; |
| 1718 | } | ||
| 1719 | link->failover_pkts--; | ||
| 1720 | *skb = NULL; | ||
| 1880 | 1721 | ||
| 1881 | if (msg_type(t_msg) == DUPLICATE_MSG) | 1722 | /* Was this packet already delivered? */ |
| 1882 | tipc_link_dup_rcv(l_ptr, t_buf); | 1723 | if (less(buf_seqno(iskb), link->failover_checkpt)) { |
| 1883 | else if (msg_type(t_msg) == ORIGINAL_MSG) | 1724 | kfree_skb(iskb); |
| 1884 | *buf = tipc_link_failover_rcv(l_ptr, t_buf); | 1725 | iskb = NULL; |
| 1885 | else | 1726 | goto exit; |
| 1886 | pr_warn("%sunknown tunnel pkt received\n", link_co_err); | 1727 | } |
| 1728 | if (msg_user(buf_msg(iskb)) == MSG_FRAGMENTER) { | ||
| 1729 | link->stats.recv_fragments++; | ||
| 1730 | tipc_buf_append(&link->failover_skb, &iskb); | ||
| 1731 | } | ||
| 1887 | exit: | 1732 | exit: |
| 1888 | kfree_skb(t_buf); | 1733 | if (!link->failover_pkts && pl) |
| 1889 | return *buf != NULL; | 1734 | pl->flags &= ~LINK_FAILINGOVER; |
| 1735 | kfree_skb(*skb); | ||
| 1736 | *skb = iskb; | ||
| 1737 | return *skb; | ||
| 1890 | } | 1738 | } |
| 1891 | 1739 | ||
| 1892 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | 1740 | static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) |
| @@ -1901,23 +1749,16 @@ static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol) | |||
| 1901 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); | 1749 | l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4); |
| 1902 | } | 1750 | } |
| 1903 | 1751 | ||
| 1904 | void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) | 1752 | void tipc_link_set_queue_limits(struct tipc_link *l, u32 win) |
| 1905 | { | 1753 | { |
| 1906 | /* Data messages from this node, inclusive FIRST_FRAGM */ | 1754 | int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE); |
| 1907 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE] = window; | 1755 | |
| 1908 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE] = (window / 3) * 4; | 1756 | l->window = win; |
| 1909 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE] = (window / 3) * 5; | 1757 | l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2; |
| 1910 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE] = (window / 3) * 6; | 1758 | l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win; |
| 1911 | /* Transiting data messages,inclusive FIRST_FRAGM */ | 1759 | l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3; |
| 1912 | l_ptr->queue_limit[TIPC_LOW_IMPORTANCE + 4] = 300; | 1760 | l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2; |
| 1913 | l_ptr->queue_limit[TIPC_MEDIUM_IMPORTANCE + 4] = 600; | 1761 | l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk; |
| 1914 | l_ptr->queue_limit[TIPC_HIGH_IMPORTANCE + 4] = 900; | ||
| 1915 | l_ptr->queue_limit[TIPC_CRITICAL_IMPORTANCE + 4] = 1200; | ||
| 1916 | l_ptr->queue_limit[CONN_MANAGER] = 1200; | ||
| 1917 | l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500; | ||
| 1918 | l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000; | ||
| 1919 | /* FRAGMENT and LAST_FRAGMENT packets */ | ||
| 1920 | l_ptr->queue_limit[MSG_FRAGMENTER] = 4000; | ||
| 1921 | } | 1762 | } |
| 1922 | 1763 | ||
| 1923 | /* tipc_link_find_owner - locate owner node of link by link's name | 1764 | /* tipc_link_find_owner - locate owner node of link by link's name |
| @@ -2082,14 +1923,14 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info) | |||
| 2082 | 1923 | ||
| 2083 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); | 1924 | tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]); |
| 2084 | link_set_supervision_props(link, tol); | 1925 | link_set_supervision_props(link, tol); |
| 2085 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0, 0); | 1926 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, tol, 0); |
| 2086 | } | 1927 | } |
| 2087 | if (props[TIPC_NLA_PROP_PRIO]) { | 1928 | if (props[TIPC_NLA_PROP_PRIO]) { |
| 2088 | u32 prio; | 1929 | u32 prio; |
| 2089 | 1930 | ||
| 2090 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); | 1931 | prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]); |
| 2091 | link->priority = prio; | 1932 | link->priority = prio; |
| 2092 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio, 0); | 1933 | tipc_link_proto_xmit(link, STATE_MSG, 0, 0, 0, prio); |
| 2093 | } | 1934 | } |
| 2094 | if (props[TIPC_NLA_PROP_WIN]) { | 1935 | if (props[TIPC_NLA_PROP_WIN]) { |
| 2095 | u32 win; | 1936 | u32 win; |
| @@ -2194,7 +2035,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
| 2194 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, | 2035 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, |
| 2195 | tipc_cluster_mask(tn->own_addr))) | 2036 | tipc_cluster_mask(tn->own_addr))) |
| 2196 | goto attr_msg_full; | 2037 | goto attr_msg_full; |
| 2197 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) | 2038 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu)) |
| 2198 | goto attr_msg_full; | 2039 | goto attr_msg_full; |
| 2199 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) | 2040 | if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->next_in_no)) |
| 2200 | goto attr_msg_full; | 2041 | goto attr_msg_full; |
| @@ -2216,7 +2057,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg, | |||
| 2216 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) | 2057 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance)) |
| 2217 | goto prop_msg_full; | 2058 | goto prop_msg_full; |
| 2218 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, | 2059 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, |
| 2219 | link->queue_limit[TIPC_LOW_IMPORTANCE])) | 2060 | link->window)) |
| 2220 | goto prop_msg_full; | 2061 | goto prop_msg_full; |
| 2221 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) | 2062 | if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority)) |
| 2222 | goto prop_msg_full; | 2063 | goto prop_msg_full; |
| @@ -2282,7 +2123,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2282 | msg.seq = cb->nlh->nlmsg_seq; | 2123 | msg.seq = cb->nlh->nlmsg_seq; |
| 2283 | 2124 | ||
| 2284 | rcu_read_lock(); | 2125 | rcu_read_lock(); |
| 2285 | |||
| 2286 | if (prev_node) { | 2126 | if (prev_node) { |
| 2287 | node = tipc_node_find(net, prev_node); | 2127 | node = tipc_node_find(net, prev_node); |
| 2288 | if (!node) { | 2128 | if (!node) { |
| @@ -2295,6 +2135,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2295 | cb->prev_seq = 1; | 2135 | cb->prev_seq = 1; |
| 2296 | goto out; | 2136 | goto out; |
| 2297 | } | 2137 | } |
| 2138 | tipc_node_put(node); | ||
| 2298 | 2139 | ||
| 2299 | list_for_each_entry_continue_rcu(node, &tn->node_list, | 2140 | list_for_each_entry_continue_rcu(node, &tn->node_list, |
| 2300 | list) { | 2141 | list) { |
| @@ -2302,6 +2143,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
| 2302 | err = __tipc_nl_add_node_links(net, &msg, node, | 2143 | err = __tipc_nl_add_node_links(net, &msg, node, |
| 2303 | &prev_link); | 2144 | &prev_link); |
| 2304 | tipc_node_unlock(node); | 2145 | tipc_node_unlock(node); |
| 2146 | tipc_node_put(node); | ||
| 2305 | if (err) | 2147 | if (err) |
| 2306 | goto out; | 2148 | goto out; |
| 2307 | 2149 | ||
