aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
authorYing Xue <ying.xue@windriver.com>2014-03-27 00:54:37 -0400
committerDavid S. Miller <davem@davemloft.net>2014-03-27 13:08:37 -0400
commit6c7a762e70637a256229f9dc9ca793908e8bd01b (patch)
treead8c61e1fa48fdc36e799be6f8f67d257ee080b1 /net/tipc/node.c
parent46651c59c483f14fd35cf7df2104feac0e54e258 (diff)
tipc: tipc: convert node list and node hlist to RCU lists
Convert tipc_node_list list and node_htable hash list to RCU lists. On read side, the two lists are protected with RCU read lock, and on update side, node_list_lock is applied to them. Signed-off-by: Ying Xue <ying.xue@windriver.com> Reviewed-by: Erik Hugne <erik.hugne@ericsson.com> Reviewed-by: Jon Maloy <jon.maloy@ericsson.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c28
1 files changed, 16 insertions, 12 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index ec8360736239..4f517ff783d9 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -72,14 +72,14 @@ struct tipc_node *tipc_node_find(u32 addr)
72 if (unlikely(!in_own_cluster_exact(addr))) 72 if (unlikely(!in_own_cluster_exact(addr)))
73 return NULL; 73 return NULL;
74 74
75 spin_lock_bh(&node_list_lock); 75 rcu_read_lock();
76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { 76 hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
77 if (node->addr == addr) { 77 if (node->addr == addr) {
78 spin_unlock_bh(&node_list_lock); 78 rcu_read_unlock();
79 return node; 79 return node;
80 } 80 }
81 } 81 }
82 spin_unlock_bh(&node_list_lock); 82 rcu_read_unlock();
83 return NULL; 83 return NULL;
84} 84}
85 85
@@ -102,13 +102,13 @@ struct tipc_node *tipc_node_create(u32 addr)
102 INIT_LIST_HEAD(&n_ptr->list); 102 INIT_LIST_HEAD(&n_ptr->list);
103 INIT_LIST_HEAD(&n_ptr->nsub); 103 INIT_LIST_HEAD(&n_ptr->nsub);
104 104
105 hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 105 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
106 106
107 list_for_each_entry(temp_node, &tipc_node_list, list) { 107 list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
108 if (n_ptr->addr < temp_node->addr) 108 if (n_ptr->addr < temp_node->addr)
109 break; 109 break;
110 } 110 }
111 list_add_tail(&n_ptr->list, &temp_node->list); 111 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
112 n_ptr->block_setup = WAIT_PEER_DOWN; 112 n_ptr->block_setup = WAIT_PEER_DOWN;
113 n_ptr->signature = INVALID_NODE_SIG; 113 n_ptr->signature = INVALID_NODE_SIG;
114 114
@@ -120,9 +120,9 @@ struct tipc_node *tipc_node_create(u32 addr)
120 120
121static void tipc_node_delete(struct tipc_node *n_ptr) 121static void tipc_node_delete(struct tipc_node *n_ptr)
122{ 122{
123 list_del(&n_ptr->list); 123 list_del_rcu(&n_ptr->list);
124 hlist_del(&n_ptr->hash); 124 hlist_del_rcu(&n_ptr->hash);
125 kfree(n_ptr); 125 kfree_rcu(n_ptr, rcu);
126 126
127 tipc_num_nodes--; 127 tipc_num_nodes--;
128} 128}
@@ -359,7 +359,8 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
359 } 359 }
360 360
361 /* Add TLVs for all nodes in scope */ 361 /* Add TLVs for all nodes in scope */
362 list_for_each_entry(n_ptr, &tipc_node_list, list) { 362 rcu_read_lock();
363 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
363 if (!tipc_in_scope(domain, n_ptr->addr)) 364 if (!tipc_in_scope(domain, n_ptr->addr))
364 continue; 365 continue;
365 node_info.addr = htonl(n_ptr->addr); 366 node_info.addr = htonl(n_ptr->addr);
@@ -367,6 +368,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
367 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 368 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
368 &node_info, sizeof(node_info)); 369 &node_info, sizeof(node_info));
369 } 370 }
371 rcu_read_unlock();
370 spin_unlock_bh(&node_list_lock); 372 spin_unlock_bh(&node_list_lock);
371 return buf; 373 return buf;
372} 374}
@@ -412,7 +414,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
412 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 414 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
413 415
414 /* Add TLVs for any other links in scope */ 416 /* Add TLVs for any other links in scope */
415 list_for_each_entry(n_ptr, &tipc_node_list, list) { 417 rcu_read_lock();
418 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
416 u32 i; 419 u32 i;
417 420
418 if (!tipc_in_scope(domain, n_ptr->addr)) 421 if (!tipc_in_scope(domain, n_ptr->addr))
@@ -429,6 +432,7 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
429 } 432 }
430 tipc_node_unlock(n_ptr); 433 tipc_node_unlock(n_ptr);
431 } 434 }
435 rcu_read_unlock();
432 spin_unlock_bh(&node_list_lock); 436 spin_unlock_bh(&node_list_lock);
433 return buf; 437 return buf;
434} 438}