aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-04-20 22:55:48 -0400
committerDavid S. Miller <davem@davemloft.net>2014-04-22 21:17:53 -0400
commit7216cd949c9bd56a4ccd952c624ab68f8c9aa0a4 (patch)
tree9c4f86a2a04713294bbea8bbf9b19462f307f05b /net/tipc
parent2231c5af451e4b7ae3cc56eaa4653af6ede51109 (diff)
tipc: purge tipc_net_lock lock
Now tipc routing hierarchy comprises the structures 'node', 'link'and 'bearer'. The whole hierarchy is protected by a big read/write lock, tipc_net_lock, to ensure that nothing is added or removed while code is accessing any of these structures. Obviously the locking policy makes node, link and bearer components closely bound together so that their relationship becomes unnecessarily complex. In the worst case, such locking policy not only has a negative influence on performance, but also it's prone to lead to deadlock occasionally. In order o decouple the complex relationship between bearer and node as well as link, the locking policy is adjusted as follows: - Bearer level RTNL lock is used on update side, and RCU is used on read side. Meanwhile, all bearer instances including broadcast bearer are saved into bearer_list array. - Node and link level All node instances are saved into two tipc_node_list and node_htable lists. The two lists are protected by node_list_lock on write side, and they are guarded with RCU lock on read side. All members in node structure including link instances are protected by node spin lock. - The relationship between bearer and node When link accesses bearer, it first needs to find the bearer with its bearer identity from the bearer_list array. When bearer accesses node, it can iterate the node_htable hash list with the node address to find the corresponding node. In the new locking policy, every component has its private locking solution and the relationship between bearer and node is very simple, that is, they can find each other with node address or bearer identity from node_htable hash list or bearer_list array. Until now above all changes have been done, so tipc_net_lock can be removed safely. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Tested-by: Erik Hugne <erik.hugne@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/bcast.c6
-rw-r--r--net/tipc/bearer.c31
-rw-r--r--net/tipc/link.c40
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/net.c59
-rw-r--r--net/tipc/net.h2
-rw-r--r--net/tipc/node.c2
7 files changed, 42 insertions, 100 deletions
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 51dab96ddd5f..0f32226db483 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -273,7 +273,7 @@ exit:
273/** 273/**
274 * tipc_bclink_update_link_state - update broadcast link state 274 * tipc_bclink_update_link_state - update broadcast link state
275 * 275 *
276 * tipc_net_lock and node lock set 276 * RCU and node lock set
277 */ 277 */
278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 278void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
279{ 279{
@@ -335,8 +335,6 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
335 * 335 *
336 * Delay any upcoming NACK by this node if another node has already 336 * Delay any upcoming NACK by this node if another node has already
337 * requested the first message this node is going to ask for. 337 * requested the first message this node is going to ask for.
338 *
339 * Only tipc_net_lock set.
340 */ 338 */
341static void bclink_peek_nack(struct tipc_msg *msg) 339static void bclink_peek_nack(struct tipc_msg *msg)
342{ 340{
@@ -408,7 +406,7 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
408/** 406/**
409 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards 407 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
410 * 408 *
411 * tipc_net_lock is read_locked, no other locks set 409 * RCU is locked, no other locks set
412 */ 410 */
413void tipc_bclink_rcv(struct sk_buff *buf) 411void tipc_bclink_rcv(struct sk_buff *buf)
414{ 412{
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index c24a35114fd7..1bd96eb465e1 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -198,7 +198,6 @@ struct sk_buff *tipc_bearer_get_names(void)
198 if (!buf) 198 if (!buf)
199 return NULL; 199 return NULL;
200 200
201 read_lock_bh(&tipc_net_lock);
202 for (i = 0; media_info_array[i] != NULL; i++) { 201 for (i = 0; media_info_array[i] != NULL; i++) {
203 for (j = 0; j < MAX_BEARERS; j++) { 202 for (j = 0; j < MAX_BEARERS; j++) {
204 b = rtnl_dereference(bearer_list[j]); 203 b = rtnl_dereference(bearer_list[j]);
@@ -211,7 +210,6 @@ struct sk_buff *tipc_bearer_get_names(void)
211 } 210 }
212 } 211 }
213 } 212 }
214 read_unlock_bh(&tipc_net_lock);
215 return buf; 213 return buf;
216} 214}
217 215
@@ -285,13 +283,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
285 return -EINVAL; 283 return -EINVAL;
286 } 284 }
287 285
288 write_lock_bh(&tipc_net_lock);
289
290 m_ptr = tipc_media_find(b_names.media_name); 286 m_ptr = tipc_media_find(b_names.media_name);
291 if (!m_ptr) { 287 if (!m_ptr) {
292 pr_warn("Bearer <%s> rejected, media <%s> not registered\n", 288 pr_warn("Bearer <%s> rejected, media <%s> not registered\n",
293 name, b_names.media_name); 289 name, b_names.media_name);
294 goto exit; 290 return -EINVAL;
295 } 291 }
296 292
297 if (priority == TIPC_MEDIA_LINK_PRI) 293 if (priority == TIPC_MEDIA_LINK_PRI)
@@ -309,14 +305,14 @@ restart:
309 if (!strcmp(name, b_ptr->name)) { 305 if (!strcmp(name, b_ptr->name)) {
310 pr_warn("Bearer <%s> rejected, already enabled\n", 306 pr_warn("Bearer <%s> rejected, already enabled\n",
311 name); 307 name);
312 goto exit; 308 return -EINVAL;
313 } 309 }
314 if ((b_ptr->priority == priority) && 310 if ((b_ptr->priority == priority) &&
315 (++with_this_prio > 2)) { 311 (++with_this_prio > 2)) {
316 if (priority-- == 0) { 312 if (priority-- == 0) {
317 pr_warn("Bearer <%s> rejected, duplicate priority\n", 313 pr_warn("Bearer <%s> rejected, duplicate priority\n",
318 name); 314 name);
319 goto exit; 315 return -EINVAL;
320 } 316 }
321 pr_warn("Bearer <%s> priority adjustment required %u->%u\n", 317 pr_warn("Bearer <%s> priority adjustment required %u->%u\n",
322 name, priority + 1, priority); 318 name, priority + 1, priority);
@@ -326,21 +322,20 @@ restart:
326 if (bearer_id >= MAX_BEARERS) { 322 if (bearer_id >= MAX_BEARERS) {
327 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n", 323 pr_warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
328 name, MAX_BEARERS); 324 name, MAX_BEARERS);
329 goto exit; 325 return -EINVAL;
330 } 326 }
331 327
332 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC); 328 b_ptr = kzalloc(sizeof(*b_ptr), GFP_ATOMIC);
333 if (!b_ptr) { 329 if (!b_ptr)
334 res = -ENOMEM; 330 return -ENOMEM;
335 goto exit; 331
336 }
337 strcpy(b_ptr->name, name); 332 strcpy(b_ptr->name, name);
338 b_ptr->media = m_ptr; 333 b_ptr->media = m_ptr;
339 res = m_ptr->enable_media(b_ptr); 334 res = m_ptr->enable_media(b_ptr);
340 if (res) { 335 if (res) {
341 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 336 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
342 name, -res); 337 name, -res);
343 goto exit; 338 return -EINVAL;
344 } 339 }
345 340
346 b_ptr->identity = bearer_id; 341 b_ptr->identity = bearer_id;
@@ -355,7 +350,7 @@ restart:
355 bearer_disable(b_ptr, false); 350 bearer_disable(b_ptr, false);
356 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 351 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
357 name); 352 name);
358 goto exit; 353 return -EINVAL;
359 } 354 }
360 355
361 rcu_assign_pointer(bearer_list[bearer_id], b_ptr); 356 rcu_assign_pointer(bearer_list[bearer_id], b_ptr);
@@ -363,8 +358,6 @@ restart:
363 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 358 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
364 name, 359 name,
365 tipc_addr_string_fill(addr_string, disc_domain), priority); 360 tipc_addr_string_fill(addr_string, disc_domain), priority);
366exit:
367 write_unlock_bh(&tipc_net_lock);
368 return res; 361 return res;
369} 362}
370 363
@@ -373,19 +366,17 @@ exit:
373 */ 366 */
374static int tipc_reset_bearer(struct tipc_bearer *b_ptr) 367static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
375{ 368{
376 read_lock_bh(&tipc_net_lock);
377 pr_info("Resetting bearer <%s>\n", b_ptr->name); 369 pr_info("Resetting bearer <%s>\n", b_ptr->name);
378 tipc_disc_delete(b_ptr->link_req); 370 tipc_disc_delete(b_ptr->link_req);
379 tipc_link_reset_list(b_ptr->identity); 371 tipc_link_reset_list(b_ptr->identity);
380 tipc_disc_create(b_ptr, &b_ptr->bcast_addr); 372 tipc_disc_create(b_ptr, &b_ptr->bcast_addr);
381 read_unlock_bh(&tipc_net_lock);
382 return 0; 373 return 0;
383} 374}
384 375
385/** 376/**
386 * bearer_disable 377 * bearer_disable
387 * 378 *
388 * Note: This routine assumes caller holds tipc_net_lock. 379 * Note: This routine assumes caller holds RTNL lock.
389 */ 380 */
390static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 381static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down)
391{ 382{
@@ -412,7 +403,6 @@ int tipc_disable_bearer(const char *name)
412 struct tipc_bearer *b_ptr; 403 struct tipc_bearer *b_ptr;
413 int res; 404 int res;
414 405
415 write_lock_bh(&tipc_net_lock);
416 b_ptr = tipc_bearer_find(name); 406 b_ptr = tipc_bearer_find(name);
417 if (b_ptr == NULL) { 407 if (b_ptr == NULL) {
418 pr_warn("Attempt to disable unknown bearer <%s>\n", name); 408 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
@@ -421,7 +411,6 @@ int tipc_disable_bearer(const char *name)
421 bearer_disable(b_ptr, false); 411 bearer_disable(b_ptr, false);
422 res = 0; 412 res = 0;
423 } 413 }
424 write_unlock_bh(&tipc_net_lock);
425 return res; 414 return res;
426} 415}
427 416
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 229d478494b9..c723ee90219d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -835,7 +835,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
835 struct tipc_node *n_ptr; 835 struct tipc_node *n_ptr;
836 int res = -ELINKCONG; 836 int res = -ELINKCONG;
837 837
838 read_lock_bh(&tipc_net_lock);
839 n_ptr = tipc_node_find(dest); 838 n_ptr = tipc_node_find(dest);
840 if (n_ptr) { 839 if (n_ptr) {
841 tipc_node_lock(n_ptr); 840 tipc_node_lock(n_ptr);
@@ -848,7 +847,6 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
848 } else { 847 } else {
849 kfree_skb(buf); 848 kfree_skb(buf);
850 } 849 }
851 read_unlock_bh(&tipc_net_lock);
852 return res; 850 return res;
853} 851}
854 852
@@ -912,7 +910,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
912 if (list_empty(message_list)) 910 if (list_empty(message_list))
913 return; 911 return;
914 912
915 read_lock_bh(&tipc_net_lock);
916 n_ptr = tipc_node_find(dest); 913 n_ptr = tipc_node_find(dest);
917 if (n_ptr) { 914 if (n_ptr) {
918 tipc_node_lock(n_ptr); 915 tipc_node_lock(n_ptr);
@@ -927,7 +924,6 @@ void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
927 } 924 }
928 tipc_node_unlock(n_ptr); 925 tipc_node_unlock(n_ptr);
929 } 926 }
930 read_unlock_bh(&tipc_net_lock);
931 927
932 /* discard the messages if they couldn't be sent */ 928 /* discard the messages if they couldn't be sent */
933 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) { 929 list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
@@ -989,7 +985,6 @@ again:
989 if (unlikely(res < 0)) 985 if (unlikely(res < 0))
990 return res; 986 return res;
991 987
992 read_lock_bh(&tipc_net_lock);
993 node = tipc_node_find(destaddr); 988 node = tipc_node_find(destaddr);
994 if (likely(node)) { 989 if (likely(node)) {
995 tipc_node_lock(node); 990 tipc_node_lock(node);
@@ -1000,7 +995,6 @@ again:
1000 &sender->max_pkt); 995 &sender->max_pkt);
1001exit: 996exit:
1002 tipc_node_unlock(node); 997 tipc_node_unlock(node);
1003 read_unlock_bh(&tipc_net_lock);
1004 return res; 998 return res;
1005 } 999 }
1006 1000
@@ -1017,7 +1011,6 @@ exit:
1017 */ 1011 */
1018 sender->max_pkt = l_ptr->max_pkt; 1012 sender->max_pkt = l_ptr->max_pkt;
1019 tipc_node_unlock(node); 1013 tipc_node_unlock(node);
1020 read_unlock_bh(&tipc_net_lock);
1021 1014
1022 1015
1023 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt) 1016 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
@@ -1028,7 +1021,6 @@ exit:
1028 } 1021 }
1029 tipc_node_unlock(node); 1022 tipc_node_unlock(node);
1030 } 1023 }
1031 read_unlock_bh(&tipc_net_lock);
1032 1024
1033 /* Couldn't find a link to the destination node */ 1025 /* Couldn't find a link to the destination node */
1034 kfree_skb(buf); 1026 kfree_skb(buf);
@@ -1273,12 +1265,9 @@ static void link_reset_all(unsigned long addr)
1273 char addr_string[16]; 1265 char addr_string[16];
1274 u32 i; 1266 u32 i;
1275 1267
1276 read_lock_bh(&tipc_net_lock);
1277 n_ptr = tipc_node_find((u32)addr); 1268 n_ptr = tipc_node_find((u32)addr);
1278 if (!n_ptr) { 1269 if (!n_ptr)
1279 read_unlock_bh(&tipc_net_lock);
1280 return; /* node no longer exists */ 1270 return; /* node no longer exists */
1281 }
1282 1271
1283 tipc_node_lock(n_ptr); 1272 tipc_node_lock(n_ptr);
1284 1273
@@ -1293,7 +1282,6 @@ static void link_reset_all(unsigned long addr)
1293 } 1282 }
1294 1283
1295 tipc_node_unlock(n_ptr); 1284 tipc_node_unlock(n_ptr);
1296 read_unlock_bh(&tipc_net_lock);
1297} 1285}
1298 1286
1299static void link_retransmit_failure(struct tipc_link *l_ptr, 1287static void link_retransmit_failure(struct tipc_link *l_ptr,
@@ -1458,7 +1446,6 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1458 */ 1446 */
1459void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr) 1447void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
1460{ 1448{
1461 read_lock_bh(&tipc_net_lock);
1462 while (head) { 1449 while (head) {
1463 struct tipc_node *n_ptr; 1450 struct tipc_node *n_ptr;
1464 struct tipc_link *l_ptr; 1451 struct tipc_link *l_ptr;
@@ -1646,7 +1633,6 @@ unlock_discard:
1646discard: 1633discard:
1647 kfree_skb(buf); 1634 kfree_skb(buf);
1648 } 1635 }
1649 read_unlock_bh(&tipc_net_lock);
1650} 1636}
1651 1637
1652/** 1638/**
@@ -2408,8 +2394,6 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
2408/* tipc_link_find_owner - locate owner node of link by link's name 2394/* tipc_link_find_owner - locate owner node of link by link's name
2409 * @name: pointer to link name string 2395 * @name: pointer to link name string
2410 * @bearer_id: pointer to index in 'node->links' array where the link was found. 2396 * @bearer_id: pointer to index in 'node->links' array where the link was found.
2411 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2412 * this also prevents link deletion.
2413 * 2397 *
2414 * Returns pointer to node owning the link, or 0 if no matching link is found. 2398 * Returns pointer to node owning the link, or 0 if no matching link is found.
2415 */ 2399 */
@@ -2471,7 +2455,7 @@ static int link_value_is_valid(u16 cmd, u32 new_value)
2471 * @new_value: new value of link, bearer, or media setting 2455 * @new_value: new value of link, bearer, or media setting
2472 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*) 2456 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
2473 * 2457 *
2474 * Caller must hold 'tipc_net_lock' to ensure link/bearer/media is not deleted. 2458 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
2475 * 2459 *
2476 * Returns 0 if value updated and negative value on error. 2460 * Returns 0 if value updated and negative value on error.
2477 */ 2461 */
@@ -2577,9 +2561,7 @@ struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space
2577 " (cannot change setting on broadcast link)"); 2561 " (cannot change setting on broadcast link)");
2578 } 2562 }
2579 2563
2580 read_lock_bh(&tipc_net_lock);
2581 res = link_cmd_set_value(args->name, new_value, cmd); 2564 res = link_cmd_set_value(args->name, new_value, cmd);
2582 read_unlock_bh(&tipc_net_lock);
2583 if (res) 2565 if (res)
2584 return tipc_cfg_reply_error_string("cannot change link setting"); 2566 return tipc_cfg_reply_error_string("cannot change link setting");
2585 2567
@@ -2613,22 +2595,18 @@ struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_
2613 return tipc_cfg_reply_error_string("link not found"); 2595 return tipc_cfg_reply_error_string("link not found");
2614 return tipc_cfg_reply_none(); 2596 return tipc_cfg_reply_none();
2615 } 2597 }
2616 read_lock_bh(&tipc_net_lock);
2617 node = tipc_link_find_owner(link_name, &bearer_id); 2598 node = tipc_link_find_owner(link_name, &bearer_id);
2618 if (!node) { 2599 if (!node)
2619 read_unlock_bh(&tipc_net_lock);
2620 return tipc_cfg_reply_error_string("link not found"); 2600 return tipc_cfg_reply_error_string("link not found");
2621 } 2601
2622 tipc_node_lock(node); 2602 tipc_node_lock(node);
2623 l_ptr = node->links[bearer_id]; 2603 l_ptr = node->links[bearer_id];
2624 if (!l_ptr) { 2604 if (!l_ptr) {
2625 tipc_node_unlock(node); 2605 tipc_node_unlock(node);
2626 read_unlock_bh(&tipc_net_lock);
2627 return tipc_cfg_reply_error_string("link not found"); 2606 return tipc_cfg_reply_error_string("link not found");
2628 } 2607 }
2629 link_reset_statistics(l_ptr); 2608 link_reset_statistics(l_ptr);
2630 tipc_node_unlock(node); 2609 tipc_node_unlock(node);
2631 read_unlock_bh(&tipc_net_lock);
2632 return tipc_cfg_reply_none(); 2610 return tipc_cfg_reply_none();
2633} 2611}
2634 2612
@@ -2661,18 +2639,15 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2661 if (!strcmp(name, tipc_bclink_name)) 2639 if (!strcmp(name, tipc_bclink_name))
2662 return tipc_bclink_stats(buf, buf_size); 2640 return tipc_bclink_stats(buf, buf_size);
2663 2641
2664 read_lock_bh(&tipc_net_lock);
2665 node = tipc_link_find_owner(name, &bearer_id); 2642 node = tipc_link_find_owner(name, &bearer_id);
2666 if (!node) { 2643 if (!node)
2667 read_unlock_bh(&tipc_net_lock);
2668 return 0; 2644 return 0;
2669 } 2645
2670 tipc_node_lock(node); 2646 tipc_node_lock(node);
2671 2647
2672 l = node->links[bearer_id]; 2648 l = node->links[bearer_id];
2673 if (!l) { 2649 if (!l) {
2674 tipc_node_unlock(node); 2650 tipc_node_unlock(node);
2675 read_unlock_bh(&tipc_net_lock);
2676 return 0; 2651 return 0;
2677 } 2652 }
2678 2653
@@ -2738,7 +2713,6 @@ static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2738 (s->accu_queue_sz / s->queue_sz_counts) : 0); 2713 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2739 2714
2740 tipc_node_unlock(node); 2715 tipc_node_unlock(node);
2741 read_unlock_bh(&tipc_net_lock);
2742 return ret; 2716 return ret;
2743} 2717}
2744 2718
@@ -2789,7 +2763,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2789 if (dest == tipc_own_addr) 2763 if (dest == tipc_own_addr)
2790 return MAX_MSG_SIZE; 2764 return MAX_MSG_SIZE;
2791 2765
2792 read_lock_bh(&tipc_net_lock);
2793 n_ptr = tipc_node_find(dest); 2766 n_ptr = tipc_node_find(dest);
2794 if (n_ptr) { 2767 if (n_ptr) {
2795 tipc_node_lock(n_ptr); 2768 tipc_node_lock(n_ptr);
@@ -2798,7 +2771,6 @@ u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2798 res = l_ptr->max_pkt; 2771 res = l_ptr->max_pkt;
2799 tipc_node_unlock(n_ptr); 2772 tipc_node_unlock(n_ptr);
2800 } 2773 }
2801 read_unlock_bh(&tipc_net_lock);
2802 return res; 2774 return res;
2803} 2775}
2804 2776
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index aff8041dc157..36a72822601c 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -248,7 +248,6 @@ void tipc_named_node_up(unsigned long nodearg)
248 u32 max_item_buf = 0; 248 u32 max_item_buf = 0;
249 249
250 /* compute maximum amount of publication data to send per message */ 250 /* compute maximum amount of publication data to send per message */
251 read_lock_bh(&tipc_net_lock);
252 n_ptr = tipc_node_find(node); 251 n_ptr = tipc_node_find(node);
253 if (n_ptr) { 252 if (n_ptr) {
254 tipc_node_lock(n_ptr); 253 tipc_node_lock(n_ptr);
@@ -258,7 +257,6 @@ void tipc_named_node_up(unsigned long nodearg)
258 ITEM_SIZE) * ITEM_SIZE; 257 ITEM_SIZE) * ITEM_SIZE;
259 tipc_node_unlock(n_ptr); 258 tipc_node_unlock(n_ptr);
260 } 259 }
261 read_unlock_bh(&tipc_net_lock);
262 if (!max_item_buf) 260 if (!max_item_buf)
263 return; 261 return;
264 262
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 24d2d21266a4..75bb39025d53 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -45,39 +45,34 @@
45/* 45/*
46 * The TIPC locking policy is designed to ensure a very fine locking 46 * The TIPC locking policy is designed to ensure a very fine locking
47 * granularity, permitting complete parallel access to individual 47 * granularity, permitting complete parallel access to individual
48 * port and node/link instances. The code consists of three major 48 * port and node/link instances. The code consists of four major
49 * locking domains, each protected with their own disjunct set of locks. 49 * locking domains, each protected with their own disjunct set of locks.
50 * 50 *
51 * 1: The routing hierarchy. 51 * 1: The bearer level.
52 * Comprises the structures 'zone', 'cluster', 'node', 'link' 52 * RTNL lock is used to serialize the process of configuring bearer
53 * and 'bearer'. The whole hierarchy is protected by a big 53 * on update side, and RCU lock is applied on read side to make
54 * read/write lock, tipc_net_lock, to enssure that nothing is added 54 * bearer instance valid on both paths of message transmission and
55 * or removed while code is accessing any of these structures. 55 * reception.
56 * This layer must not be called from the two others while they
57 * hold any of their own locks.
58 * Neither must it itself do any upcalls to the other two before
59 * it has released tipc_net_lock and other protective locks.
60 * 56 *
61 * Within the tipc_net_lock domain there are two sub-domains;'node' and 57 * 2: The node and link level.
62 * 'bearer', where local write operations are permitted, 58 * All node instances are saved into two tipc_node_list and node_htable
63 * provided that those are protected by individual spin_locks 59 * lists. The two lists are protected by node_list_lock on write side,
64 * per instance. Code holding tipc_net_lock(read) and a node spin_lock 60 * and they are guarded with RCU lock on read side. Especially node
65 * is permitted to poke around in both the node itself and its 61 * instance is destroyed only when TIPC module is removed, and we can
66 * subordinate links. I.e, it can update link counters and queues, 62 * confirm that there has no any user who is accessing the node at the
67 * change link state, send protocol messages, and alter the 63 * moment. Therefore, Except for iterating the two lists within RCU
68 * "active_links" array in the node; but it can _not_ remove a link 64 * protection, it's no needed to hold RCU that we access node instance
69 * or a node from the overall structure. 65 * in other places.
70 * Correspondingly, individual bearers may change status within a
71 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
72 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
73 * 66 *
67 * In addition, all members in node structure including link instances
68 * are protected by node spin lock.
74 * 69 *
75 * 2: The transport level of the protocol. 70 * 3: The transport level of the protocol.
76 * This consists of the structures port, (and its user level 71 * This consists of the structures port, (and its user level
77 * representations, such as user_port and tipc_sock), reference and 72 * representations, such as user_port and tipc_sock), reference and
78 * tipc_user (port.c, reg.c, socket.c). 73 * tipc_user (port.c, reg.c, socket.c).
79 * 74 *
80 * This layer has four different locks: 75 * This layer has four different locks:
81 * - The tipc_port spin_lock. This is protecting each port instance 76 * - The tipc_port spin_lock. This is protecting each port instance
82 * from parallel data access and removal. Since we can not place 77 * from parallel data access and removal. Since we can not place
83 * this lock in the port itself, it has been placed in the 78 * this lock in the port itself, it has been placed in the
@@ -96,7 +91,7 @@
96 * There are two such lists; 'port_list', which is used for management, 91 * There are two such lists; 'port_list', which is used for management,
97 * and 'wait_list', which is used to queue ports during congestion. 92 * and 'wait_list', which is used to queue ports during congestion.
98 * 93 *
99 * 3: The name table (name_table.c, name_distr.c, subscription.c) 94 * 4: The name table (name_table.c, name_distr.c, subscription.c)
100 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the 95 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
101 * overall name table structure. Nothing must be added/removed to 96 * overall name table structure. Nothing must be added/removed to
102 * this structure without holding write access to it. 97 * this structure without holding write access to it.
@@ -108,8 +103,6 @@
108 * - A local spin_lock protecting the queue of subscriber events. 103 * - A local spin_lock protecting the queue of subscriber events.
109*/ 104*/
110 105
111DEFINE_RWLOCK(tipc_net_lock);
112
113static void net_route_named_msg(struct sk_buff *buf) 106static void net_route_named_msg(struct sk_buff *buf)
114{ 107{
115 struct tipc_msg *msg = buf_msg(buf); 108 struct tipc_msg *msg = buf_msg(buf);
@@ -175,15 +168,13 @@ void tipc_net_start(u32 addr)
175{ 168{
176 char addr_string[16]; 169 char addr_string[16];
177 170
178 write_lock_bh(&tipc_net_lock);
179 tipc_own_addr = addr; 171 tipc_own_addr = addr;
180 tipc_named_reinit(); 172 tipc_named_reinit();
181 tipc_port_reinit(); 173 tipc_port_reinit();
182 tipc_bclink_init(); 174 tipc_bclink_init();
183 write_unlock_bh(&tipc_net_lock);
184
185 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 175 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr,
186 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 176 TIPC_ZONE_SCOPE, 0, tipc_own_addr);
177
187 pr_info("Started in network mode\n"); 178 pr_info("Started in network mode\n");
188 pr_info("Own node address %s, network identity %u\n", 179 pr_info("Own node address %s, network identity %u\n",
189 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 180 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
@@ -196,11 +187,9 @@ void tipc_net_stop(void)
196 187
197 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 188 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr);
198 rtnl_lock(); 189 rtnl_lock();
199 write_lock_bh(&tipc_net_lock);
200 tipc_bearer_stop(); 190 tipc_bearer_stop();
201 tipc_bclink_stop(); 191 tipc_bclink_stop();
202 tipc_node_stop(); 192 tipc_node_stop();
203 write_unlock_bh(&tipc_net_lock);
204 rtnl_unlock(); 193 rtnl_unlock();
205 194
206 pr_info("Left network mode\n"); 195 pr_info("Left network mode\n");
diff --git a/net/tipc/net.h b/net/tipc/net.h
index 079daadb3f72..f781cae8df4b 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -37,8 +37,6 @@
37#ifndef _TIPC_NET_H 37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H 38#define _TIPC_NET_H
39 39
40extern rwlock_t tipc_net_lock;
41
42void tipc_net_route_msg(struct sk_buff *buf); 40void tipc_net_route_msg(struct sk_buff *buf);
43 41
44void tipc_net_start(u32 addr); 42void tipc_net_start(u32 addr);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa6823f6457a..be90115cda1a 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -273,14 +273,12 @@ static void node_name_purge_complete(unsigned long node_addr)
273{ 273{
274 struct tipc_node *n_ptr; 274 struct tipc_node *n_ptr;
275 275
276 read_lock_bh(&tipc_net_lock);
277 n_ptr = tipc_node_find(node_addr); 276 n_ptr = tipc_node_find(node_addr);
278 if (n_ptr) { 277 if (n_ptr) {
279 tipc_node_lock(n_ptr); 278 tipc_node_lock(n_ptr);
280 n_ptr->block_setup &= ~WAIT_NAMES_GONE; 279 n_ptr->block_setup &= ~WAIT_NAMES_GONE;
281 tipc_node_unlock(n_ptr); 280 tipc_node_unlock(n_ptr);
282 } 281 }
283 read_unlock_bh(&tipc_net_lock);
284} 282}
285 283
286static void node_lost_contact(struct tipc_node *n_ptr) 284static void node_lost_contact(struct tipc_node *n_ptr)