aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorJon Paul Maloy <jon.maloy@ericsson.com>2016-02-10 16:14:57 -0500
committerDavid S. Miller <davem@davemloft.net>2016-02-16 15:57:11 -0500
commitd5c91fb72f1652ea3026925240a0998a42ddb16b (patch)
tree8e731cefe0bc105ddede8e43ff947fe9903a8ee3 /net/tipc/node.c
parent7facc5fbde878b2441962748823966bf2ec1d446 (diff)
tipc: fix premature addition of node to lookup table
In commit 5266698661401a ("tipc: let broadcast packet reception use new link receive function") we introduced a new per-node broadcast reception link instance. This link is created at the moment the node itself is created. Unfortunately, the allocation is done after the node instance has already been added to the node lookup hash table. This creates a potential race condition, where arriving broadcast packets are able to find and access the node before it has been fully initialized, and before the above mentioned link has been created. The result is occasional crashes in the function tipc_bcast_rcv(), which is trying to access the not-yet existing link. We fix this by deferring the addition of the node instance until after it has been fully initialized in the function tipc_node_create(). Acked-by: Ying Xue <ying.xue@windriver.com> Signed-off-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa97d9649a28..9d7a16fc5ca4 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -346,12 +346,6 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
346 skb_queue_head_init(&n->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
347 for (i = 0; i < MAX_BEARERS; i++) 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock); 348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
351 if (n->addr < temp_node->addr)
352 break;
353 }
354 list_add_tail_rcu(&n->list, &temp_node->list);
355 n->state = SELF_DOWN_PEER_LEAVING; 349 n->state = SELF_DOWN_PEER_LEAVING;
356 n->signature = INVALID_NODE_SIG; 350 n->signature = INVALID_NODE_SIG;
357 n->active_links[0] = INVALID_BEARER_ID; 351 n->active_links[0] = INVALID_BEARER_ID;
@@ -372,6 +366,12 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
372 tipc_node_get(n); 366 tipc_node_get(n);
373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n); 367 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
374 n->keepalive_intv = U32_MAX; 368 n->keepalive_intv = U32_MAX;
369 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
370 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
371 if (n->addr < temp_node->addr)
372 break;
373 }
374 list_add_tail_rcu(&n->list, &temp_node->list);
375exit: 375exit:
376 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
377 return n; 377 return n;