aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-03-27 00:54:36 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-27 13:08:37 -0400
commit46651c59c483f14fd35cf7df2104feac0e54e258 (patch)
tree06945336cf5bab75a27df13f7a6408d5d1e88672 /net/tipc
parent987b58be376b8d087a9bb677f50592efc6ccb7c5 (diff)
tipc: rename node create lock to protect node list and hlist
When a node is created, tipc_net_lock read lock is first held and then node_create_lock is grabbed in order to prevent the same node from being created and inserted into both node list and hlist twice. But when we query node from the two node lists, we only hold tipc_net_lock read lock without grabbing node_create_lock. Obviously this locking policy is unable to guarantee that the two node lists are always synchronized especially when the operation of changing and accessing them occurs in different contexts like currently doing. Therefore, rename node_create_lock to node_list_lock to protect the two node lists, that is, whenever node is inserted into them or node is queried from them, the node_list_lock should be always held. As a result, tipc_net_lock read lock becomes redundant and then can be removed from the node query functions. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/net.c7
-rw-r--r--net/tipc/node.c59
-rw-r--r--net/tipc/node.h4
3 files changed, 35 insertions, 35 deletions
diff --git a/net/tipc/net.c b/net/tipc/net.c
index bb171c3f90d3..0374a817631e 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -189,15 +189,14 @@ void tipc_net_start(u32 addr)
189 189
190void tipc_net_stop(void) 190void tipc_net_stop(void)
191{ 191{
192 struct tipc_node *node, *t_node;
193
194 if (!tipc_own_addr) 192 if (!tipc_own_addr)
195 return; 193 return;
194
196 write_lock_bh(&tipc_net_lock); 195 write_lock_bh(&tipc_net_lock);
197 tipc_bearer_stop(); 196 tipc_bearer_stop();
198 tipc_bclink_stop(); 197 tipc_bclink_stop();
199 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 198 tipc_node_stop();
200 tipc_node_delete(node);
201 write_unlock_bh(&tipc_net_lock); 199 write_unlock_bh(&tipc_net_lock);
200
202 pr_info("Left network mode\n"); 201 pr_info("Left network mode\n");
203} 202}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 7c9b6673e2ab..ec8360736239 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2,7 +2,7 @@
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, 2012 Ericsson AB 4 * Copyright (c) 2000-2006, 2012 Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -44,11 +44,10 @@
44static void node_lost_contact(struct tipc_node *n_ptr); 44static void node_lost_contact(struct tipc_node *n_ptr);
45static void node_established_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr);
46 46
47static DEFINE_SPINLOCK(node_create_lock);
48
49static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 47static struct hlist_head node_htable[NODE_HTABLE_SIZE];
50LIST_HEAD(tipc_node_list); 48LIST_HEAD(tipc_node_list);
51static u32 tipc_num_nodes; 49static u32 tipc_num_nodes;
50static DEFINE_SPINLOCK(node_list_lock);
52 51
53static atomic_t tipc_num_links = ATOMIC_INIT(0); 52static atomic_t tipc_num_links = ATOMIC_INIT(0);
54 53
@@ -73,31 +72,26 @@ struct tipc_node *tipc_node_find(u32 addr)
73 if (unlikely(!in_own_cluster_exact(addr))) 72 if (unlikely(!in_own_cluster_exact(addr)))
74 return NULL; 73 return NULL;
75 74
75 spin_lock_bh(&node_list_lock);
76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { 76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) {
77 if (node->addr == addr) 77 if (node->addr == addr) {
78 spin_unlock_bh(&node_list_lock);
78 return node; 79 return node;
80 }
79 } 81 }
82 spin_unlock_bh(&node_list_lock);
80 return NULL; 83 return NULL;
81} 84}
82 85
83/**
84 * tipc_node_create - create neighboring node
85 *
86 * Currently, this routine is called by neighbor discovery code, which holds
87 * net_lock for reading only. We must take node_create_lock to ensure a node
88 * isn't created twice if two different bearers discover the node at the same
89 * time. (It would be preferable to switch to holding net_lock in write mode,
90 * but this is a non-trivial change.)
91 */
92struct tipc_node *tipc_node_create(u32 addr) 86struct tipc_node *tipc_node_create(u32 addr)
93{ 87{
94 struct tipc_node *n_ptr, *temp_node; 88 struct tipc_node *n_ptr, *temp_node;
95 89
96 spin_lock_bh(&node_create_lock); 90 spin_lock_bh(&node_list_lock);
97 91
98 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 92 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
99 if (!n_ptr) { 93 if (!n_ptr) {
100 spin_unlock_bh(&node_create_lock); 94 spin_unlock_bh(&node_list_lock);
101 pr_warn("Node creation failed, no memory\n"); 95 pr_warn("Node creation failed, no memory\n");
102 return NULL; 96 return NULL;
103 } 97 }
@@ -120,11 +114,11 @@ struct tipc_node *tipc_node_create(u32 addr)
120 114
121 tipc_num_nodes++; 115 tipc_num_nodes++;
122 116
123 spin_unlock_bh(&node_create_lock); 117 spin_unlock_bh(&node_list_lock);
124 return n_ptr; 118 return n_ptr;
125} 119}
126 120
127void tipc_node_delete(struct tipc_node *n_ptr) 121static void tipc_node_delete(struct tipc_node *n_ptr)
128{ 122{
129 list_del(&n_ptr->list); 123 list_del(&n_ptr->list);
130 hlist_del(&n_ptr->hash); 124 hlist_del(&n_ptr->hash);
@@ -133,6 +127,16 @@ void tipc_node_delete(struct tipc_node *n_ptr)
133 tipc_num_nodes--; 127 tipc_num_nodes--;
134} 128}
135 129
130void tipc_node_stop(void)
131{
132 struct tipc_node *node, *t_node;
133
134 spin_lock_bh(&node_list_lock);
135 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
136 tipc_node_delete(node);
137 spin_unlock_bh(&node_list_lock);
138}
139
136/** 140/**
137 * tipc_node_link_up - handle addition of link 141 * tipc_node_link_up - handle addition of link
138 * 142 *
@@ -335,22 +339,22 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
335 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 339 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
336 " (network address)"); 340 " (network address)");
337 341
338 read_lock_bh(&tipc_net_lock); 342 spin_lock_bh(&node_list_lock);
339 if (!tipc_num_nodes) { 343 if (!tipc_num_nodes) {
340 read_unlock_bh(&tipc_net_lock); 344 spin_unlock_bh(&node_list_lock);
341 return tipc_cfg_reply_none(); 345 return tipc_cfg_reply_none();
342 } 346 }
343 347
344 /* For now, get space for all other nodes */ 348 /* For now, get space for all other nodes */
345 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 349 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
346 if (payload_size > 32768u) { 350 if (payload_size > 32768u) {
347 read_unlock_bh(&tipc_net_lock); 351 spin_unlock_bh(&node_list_lock);
348 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 352 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
349 " (too many nodes)"); 353 " (too many nodes)");
350 } 354 }
351 buf = tipc_cfg_reply_alloc(payload_size); 355 buf = tipc_cfg_reply_alloc(payload_size);
352 if (!buf) { 356 if (!buf) {
353 read_unlock_bh(&tipc_net_lock); 357 spin_unlock_bh(&node_list_lock);
354 return NULL; 358 return NULL;
355 } 359 }
356 360
@@ -363,8 +367,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
363 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 367 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
364 &node_info, sizeof(node_info)); 368 &node_info, sizeof(node_info));
365 } 369 }
366 370 spin_unlock_bh(&node_list_lock);
367 read_unlock_bh(&tipc_net_lock);
368 return buf; 371 return buf;
369} 372}
370 373
@@ -387,19 +390,18 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
387 if (!tipc_own_addr) 390 if (!tipc_own_addr)
388 return tipc_cfg_reply_none(); 391 return tipc_cfg_reply_none();
389 392
390 read_lock_bh(&tipc_net_lock); 393 spin_lock_bh(&node_list_lock);
391
392 /* Get space for all unicast links + broadcast link */ 394 /* Get space for all unicast links + broadcast link */
393 payload_size = TLV_SPACE(sizeof(link_info)) * 395 payload_size = TLV_SPACE(sizeof(link_info)) *
394 (atomic_read(&tipc_num_links) + 1); 396 (atomic_read(&tipc_num_links) + 1);
395 if (payload_size > 32768u) { 397 if (payload_size > 32768u) {
396 read_unlock_bh(&tipc_net_lock); 398 spin_unlock_bh(&node_list_lock);
397 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 399 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
398 " (too many links)"); 400 " (too many links)");
399 } 401 }
400 buf = tipc_cfg_reply_alloc(payload_size); 402 buf = tipc_cfg_reply_alloc(payload_size);
401 if (!buf) { 403 if (!buf) {
402 read_unlock_bh(&tipc_net_lock); 404 spin_unlock_bh(&node_list_lock);
403 return NULL; 405 return NULL;
404 } 406 }
405 407
@@ -427,7 +429,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
427 } 429 }
428 tipc_node_unlock(n_ptr); 430 tipc_node_unlock(n_ptr);
429 } 431 }
430 432 spin_unlock_bh(&node_list_lock);
431 read_unlock_bh(&tipc_net_lock);
432 return buf; 433 return buf;
433} 434}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 63e2e8ead2fe..42038690c540 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -2,7 +2,7 @@
2 * net/tipc/node.h: Include file for TIPC node management routines 2 * net/tipc/node.h: Include file for TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, Ericsson AB 4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -107,7 +107,7 @@ extern struct list_head tipc_node_list;
107 107
108struct tipc_node *tipc_node_find(u32 addr); 108struct tipc_node *tipc_node_find(u32 addr);
109struct tipc_node *tipc_node_create(u32 addr); 109struct tipc_node *tipc_node_create(u32 addr);
110void tipc_node_delete(struct tipc_node *n_ptr); 110void tipc_node_stop(void);
111void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 111void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
112void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 112void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
113void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 113void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);