aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-03-27 00:54:39 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-27 13:08:38 -0400
commitdde2026608fbf24e1687a2b62c4752022f429252 (patch)
tree829cd112db96c7c1e7fcacb9544f36703c06ac4a /net/tipc/node.c
parent2220646a53aa588798653232e26172ec36ab06cd (diff)
tipc: use node list lock to protect tipc_num_links variable
Without properly implicit or explicit read memory barrier, it's unsafe to read an atomic variable with atomic_read() from another thread which is different with the thread of changing the atomic variable with atomic_inc() or atomic_dec(). So a stale tipc_num_links may be got with atomic_read() in tipc_node_get_links(). If the tipc_num_links variable type is converted from atomic to unsigned integer and node list lock is used to protect it, the issue would be avoided. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c21
1 files changed, 11 insertions, 10 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 85405a6e3076..1d3a4999a70f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -47,10 +47,9 @@ static void node_established_contact(struct tipc_node *n_ptr);
47static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 47static struct hlist_head node_htable[NODE_HTABLE_SIZE];
48LIST_HEAD(tipc_node_list); 48LIST_HEAD(tipc_node_list);
49static u32 tipc_num_nodes; 49static u32 tipc_num_nodes;
50static u32 tipc_num_links;
50static DEFINE_SPINLOCK(node_list_lock); 51static DEFINE_SPINLOCK(node_list_lock);
51 52
52static atomic_t tipc_num_links = ATOMIC_INIT(0);
53
54/* 53/*
55 * A trivial power-of-two bitmask technique is used for speed, since this 54 * A trivial power-of-two bitmask technique is used for speed, since this
56 * operation is done for every incoming TIPC packet. The number of hash table 55 * operation is done for every incoming TIPC packet. The number of hash table
@@ -241,7 +240,9 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
241void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
242{ 241{
243 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
244 atomic_inc(&tipc_num_links); 243 spin_lock_bh(&node_list_lock);
244 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock);
245 n_ptr->link_cnt++; 246 n_ptr->link_cnt++;
246} 247}
247 248
@@ -253,7 +254,9 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
253 if (l_ptr != n_ptr->links[i]) 254 if (l_ptr != n_ptr->links[i])
254 continue; 255 continue;
255 n_ptr->links[i] = NULL; 256 n_ptr->links[i] = NULL;
256 atomic_dec(&tipc_num_links); 257 spin_lock_bh(&node_list_lock);
258 tipc_num_links--;
259 spin_unlock_bh(&node_list_lock);
257 n_ptr->link_cnt--; 260 n_ptr->link_cnt--;
258 } 261 }
259} 262}
@@ -393,18 +396,17 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
393 396
394 spin_lock_bh(&node_list_lock); 397 spin_lock_bh(&node_list_lock);
395 /* Get space for all unicast links + broadcast link */ 398 /* Get space for all unicast links + broadcast link */
396 payload_size = TLV_SPACE(sizeof(link_info)) * 399 payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
397 (atomic_read(&tipc_num_links) + 1);
398 if (payload_size > 32768u) { 400 if (payload_size > 32768u) {
399 spin_unlock_bh(&node_list_lock); 401 spin_unlock_bh(&node_list_lock);
400 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 402 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
401 " (too many links)"); 403 " (too many links)");
402 } 404 }
405 spin_unlock_bh(&node_list_lock);
406
403 buf = tipc_cfg_reply_alloc(payload_size); 407 buf = tipc_cfg_reply_alloc(payload_size);
404 if (!buf) { 408 if (!buf)
405 spin_unlock_bh(&node_list_lock);
406 return NULL; 409 return NULL;
407 }
408 410
409 /* Add TLV for broadcast link */ 411 /* Add TLV for broadcast link */
410 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 412 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
@@ -432,6 +434,5 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
432 tipc_node_unlock(n_ptr); 434 tipc_node_unlock(n_ptr);
433 } 435 }
434 rcu_read_unlock(); 436 rcu_read_unlock();
435 spin_unlock_bh(&node_list_lock);
436 return buf; 437 return buf;
437} 438}