aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorAllan Stephens <allan.stephens@windriver.com>2011-04-07 13:57:25 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2011-09-01 11:16:36 -0400
commit5d3c488dfe5f797d9f3cee2e8928aad8a2f6e44f (patch)
tree93e9f59a4ad2c155939b6f66ca30d1e9201fff87 /net
parent169073db442cb9e5aa2b70a2e4158d4f35a3b810 (diff)
tipc: Fix node lock problems during broadcast message reception
Modifies TIPC's incoming broadcast packet handler to ensure that the node lock associated with the sender of the packet is held whenever node-related data structure fields are accessed. The routine is also restructured with a single exit point, making it easier to ensure the node lock is properly released and the incoming packet is properly disposed of. Signed-off-by: Allan Stephens <allan.stephens@windriver.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'net')
-rw-r--r--net/tipc/bcast.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 5200457eaeb4..bc01ca6891e4 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -426,20 +426,26 @@ int tipc_bclink_send_msg(struct sk_buff *buf)
426void tipc_bclink_recv_pkt(struct sk_buff *buf) 426void tipc_bclink_recv_pkt(struct sk_buff *buf)
427{ 427{
428 struct tipc_msg *msg = buf_msg(buf); 428 struct tipc_msg *msg = buf_msg(buf);
429 struct tipc_node *node = tipc_node_find(msg_prevnode(msg)); 429 struct tipc_node *node;
430 u32 next_in; 430 u32 next_in;
431 u32 seqno; 431 u32 seqno;
432 struct sk_buff *deferred; 432 struct sk_buff *deferred;
433 433
434 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported || 434 /* Screen out unwanted broadcast messages */
435 (msg_mc_netid(msg) != tipc_net_id))) { 435
436 buf_discard(buf); 436 if (msg_mc_netid(msg) != tipc_net_id)
437 return; 437 goto exit;
438 } 438
439 node = tipc_node_find(msg_prevnode(msg));
440 if (unlikely(!node))
441 goto exit;
442
443 tipc_node_lock(node);
444 if (unlikely(!node->bclink.supported))
445 goto unlock;
439 446
440 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 447 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
441 if (msg_destnode(msg) == tipc_own_addr) { 448 if (msg_destnode(msg) == tipc_own_addr) {
442 tipc_node_lock(node);
443 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 449 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
444 tipc_node_unlock(node); 450 tipc_node_unlock(node);
445 spin_lock_bh(&bc_lock); 451 spin_lock_bh(&bc_lock);
@@ -449,16 +455,17 @@ void tipc_bclink_recv_pkt(struct sk_buff *buf)
449 msg_bcgap_to(msg)); 455 msg_bcgap_to(msg));
450 spin_unlock_bh(&bc_lock); 456 spin_unlock_bh(&bc_lock);
451 } else { 457 } else {
458 tipc_node_unlock(node);
452 tipc_bclink_peek_nack(msg_destnode(msg), 459 tipc_bclink_peek_nack(msg_destnode(msg),
453 msg_bcast_tag(msg), 460 msg_bcast_tag(msg),
454 msg_bcgap_after(msg), 461 msg_bcgap_after(msg),
455 msg_bcgap_to(msg)); 462 msg_bcgap_to(msg));
456 } 463 }
457 buf_discard(buf); 464 goto exit;
458 return;
459 } 465 }
460 466
461 tipc_node_lock(node); 467 /* Handle in-sequence broadcast message */
468
462receive: 469receive:
463 deferred = node->bclink.deferred_head; 470 deferred = node->bclink.deferred_head;
464 next_in = mod(node->bclink.last_in + 1); 471 next_in = mod(node->bclink.last_in + 1);
@@ -491,14 +498,14 @@ receive:
491 tipc_node_unlock(node); 498 tipc_node_unlock(node);
492 tipc_net_route_msg(buf); 499 tipc_net_route_msg(buf);
493 } 500 }
501 buf = NULL;
502 tipc_node_lock(node);
494 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) { 503 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
495 tipc_node_lock(node);
496 buf = deferred; 504 buf = deferred;
497 msg = buf_msg(buf); 505 msg = buf_msg(buf);
498 node->bclink.deferred_head = deferred->next; 506 node->bclink.deferred_head = deferred->next;
499 goto receive; 507 goto receive;
500 } 508 }
501 return;
502 } else if (less(next_in, seqno)) { 509 } else if (less(next_in, seqno)) {
503 u32 gap_after = node->bclink.gap_after; 510 u32 gap_after = node->bclink.gap_after;
504 u32 gap_to = node->bclink.gap_to; 511 u32 gap_to = node->bclink.gap_to;
@@ -513,6 +520,7 @@ receive:
513 else if (less(gap_after, seqno) && less(seqno, gap_to)) 520 else if (less(gap_after, seqno) && less(seqno, gap_to))
514 node->bclink.gap_to = seqno; 521 node->bclink.gap_to = seqno;
515 } 522 }
523 buf = NULL;
516 if (bclink_ack_allowed(node->bclink.nack_sync)) { 524 if (bclink_ack_allowed(node->bclink.nack_sync)) {
517 if (gap_to != gap_after) 525 if (gap_to != gap_after)
518 bclink_send_nack(node); 526 bclink_send_nack(node);
@@ -520,9 +528,11 @@ receive:
520 } 528 }
521 } else { 529 } else {
522 bcl->stats.duplicates++; 530 bcl->stats.duplicates++;
523 buf_discard(buf);
524 } 531 }
532unlock:
525 tipc_node_unlock(node); 533 tipc_node_unlock(node);
534exit:
535 buf_discard(buf);
526} 536}
527 537
528u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 538u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)