aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c119
1 files changed, 62 insertions, 57 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index efe4d41bf11b..1d3a4999a70f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2,7 +2,7 @@
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, 2012 Ericsson AB 4 * Copyright (c) 2000-2006, 2012 Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,11 @@
44static void node_lost_contact(struct tipc_node *n_ptr); 44static void node_lost_contact(struct tipc_node *n_ptr);
45static void node_established_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr);
46 46
47static DEFINE_SPINLOCK(node_create_lock);
48
49static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 47static struct hlist_head node_htable[NODE_HTABLE_SIZE];
50LIST_HEAD(tipc_node_list); 48LIST_HEAD(tipc_node_list);
51static u32 tipc_num_nodes; 49static u32 tipc_num_nodes;
52 50static u32 tipc_num_links;
53static atomic_t tipc_num_links = ATOMIC_INIT(0); 51static DEFINE_SPINLOCK(node_list_lock);
54 52
55/* 53/*
56 * A trivial power-of-two bitmask technique is used for speed, since this 54 * A trivial power-of-two bitmask technique is used for speed, since this
@@ -73,37 +71,26 @@ struct tipc_node *tipc_node_find(u32 addr)
73 if (unlikely(!in_own_cluster_exact(addr))) 71 if (unlikely(!in_own_cluster_exact(addr)))
74 return NULL; 72 return NULL;
75 73
76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { 74 rcu_read_lock();
77 if (node->addr == addr) 75 hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
76 if (node->addr == addr) {
77 rcu_read_unlock();
78 return node; 78 return node;
79 }
79 } 80 }
81 rcu_read_unlock();
80 return NULL; 82 return NULL;
81} 83}
82 84
83/**
84 * tipc_node_create - create neighboring node
85 *
86 * Currently, this routine is called by neighbor discovery code, which holds
87 * net_lock for reading only. We must take node_create_lock to ensure a node
88 * isn't created twice if two different bearers discover the node at the same
89 * time. (It would be preferable to switch to holding net_lock in write mode,
90 * but this is a non-trivial change.)
91 */
92struct tipc_node *tipc_node_create(u32 addr) 85struct tipc_node *tipc_node_create(u32 addr)
93{ 86{
94 struct tipc_node *n_ptr, *temp_node; 87 struct tipc_node *n_ptr, *temp_node;
95 88
96 spin_lock_bh(&node_create_lock); 89 spin_lock_bh(&node_list_lock);
97
98 n_ptr = tipc_node_find(addr);
99 if (n_ptr) {
100 spin_unlock_bh(&node_create_lock);
101 return n_ptr;
102 }
103 90
104 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 91 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
105 if (!n_ptr) { 92 if (!n_ptr) {
106 spin_unlock_bh(&node_create_lock); 93 spin_unlock_bh(&node_list_lock);
107 pr_warn("Node creation failed, no memory\n"); 94 pr_warn("Node creation failed, no memory\n");
108 return NULL; 95 return NULL;
109 } 96 }
@@ -114,31 +101,41 @@ struct tipc_node *tipc_node_create(u32 addr)
114 INIT_LIST_HEAD(&n_ptr->list); 101 INIT_LIST_HEAD(&n_ptr->list);
115 INIT_LIST_HEAD(&n_ptr->nsub); 102 INIT_LIST_HEAD(&n_ptr->nsub);
116 103
117 hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 104 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
118 105
119 list_for_each_entry(temp_node, &tipc_node_list, list) { 106 list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
120 if (n_ptr->addr < temp_node->addr) 107 if (n_ptr->addr < temp_node->addr)
121 break; 108 break;
122 } 109 }
123 list_add_tail(&n_ptr->list, &temp_node->list); 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
124 n_ptr->block_setup = WAIT_PEER_DOWN; 111 n_ptr->block_setup = WAIT_PEER_DOWN;
125 n_ptr->signature = INVALID_NODE_SIG; 112 n_ptr->signature = INVALID_NODE_SIG;
126 113
127 tipc_num_nodes++; 114 tipc_num_nodes++;
128 115
129 spin_unlock_bh(&node_create_lock); 116 spin_unlock_bh(&node_list_lock);
130 return n_ptr; 117 return n_ptr;
131} 118}
132 119
133void tipc_node_delete(struct tipc_node *n_ptr) 120static void tipc_node_delete(struct tipc_node *n_ptr)
134{ 121{
135 list_del(&n_ptr->list); 122 list_del_rcu(&n_ptr->list);
136 hlist_del(&n_ptr->hash); 123 hlist_del_rcu(&n_ptr->hash);
137 kfree(n_ptr); 124 kfree_rcu(n_ptr, rcu);
138 125
139 tipc_num_nodes--; 126 tipc_num_nodes--;
140} 127}
141 128
129void tipc_node_stop(void)
130{
131 struct tipc_node *node, *t_node;
132
133 spin_lock_bh(&node_list_lock);
134 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
135 tipc_node_delete(node);
136 spin_unlock_bh(&node_list_lock);
137}
138
142/** 139/**
143 * tipc_node_link_up - handle addition of link 140 * tipc_node_link_up - handle addition of link
144 * 141 *
@@ -162,7 +159,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
162 pr_info("New link <%s> becomes standby\n", l_ptr->name); 159 pr_info("New link <%s> becomes standby\n", l_ptr->name);
163 return; 160 return;
164 } 161 }
165 tipc_link_dup_send_queue(active[0], l_ptr); 162 tipc_link_dup_queue_xmit(active[0], l_ptr);
166 if (l_ptr->priority == active[0]->priority) { 163 if (l_ptr->priority == active[0]->priority) {
167 active[0] = l_ptr; 164 active[0] = l_ptr;
168 return; 165 return;
@@ -243,15 +240,25 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
243void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
244{ 241{
245 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
246 atomic_inc(&tipc_num_links); 243 spin_lock_bh(&node_list_lock);
244 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock);
247 n_ptr->link_cnt++; 246 n_ptr->link_cnt++;
248} 247}
249 248
250void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 249void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
251{ 250{
252 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 251 int i;
253 atomic_dec(&tipc_num_links); 252
254 n_ptr->link_cnt--; 253 for (i = 0; i < MAX_BEARERS; i++) {
254 if (l_ptr != n_ptr->links[i])
255 continue;
256 n_ptr->links[i] = NULL;
257 spin_lock_bh(&node_list_lock);
258 tipc_num_links--;
259 spin_unlock_bh(&node_list_lock);
260 n_ptr->link_cnt--;
261 }
255} 262}
256 263
257static void node_established_contact(struct tipc_node *n_ptr) 264static void node_established_contact(struct tipc_node *n_ptr)
@@ -335,27 +342,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
335 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 342 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
336 " (network address)"); 343 " (network address)");
337 344
338 read_lock_bh(&tipc_net_lock); 345 spin_lock_bh(&node_list_lock);
339 if (!tipc_num_nodes) { 346 if (!tipc_num_nodes) {
340 read_unlock_bh(&tipc_net_lock); 347 spin_unlock_bh(&node_list_lock);
341 return tipc_cfg_reply_none(); 348 return tipc_cfg_reply_none();
342 } 349 }
343 350
344 /* For now, get space for all other nodes */ 351 /* For now, get space for all other nodes */
345 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 352 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
346 if (payload_size > 32768u) { 353 if (payload_size > 32768u) {
347 read_unlock_bh(&tipc_net_lock); 354 spin_unlock_bh(&node_list_lock);
348 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 355 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
349 " (too many nodes)"); 356 " (too many nodes)");
350 } 357 }
358 spin_unlock_bh(&node_list_lock);
359
351 buf = tipc_cfg_reply_alloc(payload_size); 360 buf = tipc_cfg_reply_alloc(payload_size);
352 if (!buf) { 361 if (!buf)
353 read_unlock_bh(&tipc_net_lock);
354 return NULL; 362 return NULL;
355 }
356 363
357 /* Add TLVs for all nodes in scope */ 364 /* Add TLVs for all nodes in scope */
358 list_for_each_entry(n_ptr, &tipc_node_list, list) { 365 rcu_read_lock();
366 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
359 if (!tipc_in_scope(domain, n_ptr->addr)) 367 if (!tipc_in_scope(domain, n_ptr->addr))
360 continue; 368 continue;
361 node_info.addr = htonl(n_ptr->addr); 369 node_info.addr = htonl(n_ptr->addr);
@@ -363,8 +371,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
363 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 371 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
364 &node_info, sizeof(node_info)); 372 &node_info, sizeof(node_info));
365 } 373 }
366 374 rcu_read_unlock();
367 read_unlock_bh(&tipc_net_lock);
368 return buf; 375 return buf;
369} 376}
370 377
@@ -387,21 +394,19 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
387 if (!tipc_own_addr) 394 if (!tipc_own_addr)
388 return tipc_cfg_reply_none(); 395 return tipc_cfg_reply_none();
389 396
390 read_lock_bh(&tipc_net_lock); 397 spin_lock_bh(&node_list_lock);
391
392 /* Get space for all unicast links + broadcast link */ 398 /* Get space for all unicast links + broadcast link */
393 payload_size = TLV_SPACE(sizeof(link_info)) * 399 payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
394 (atomic_read(&tipc_num_links) + 1);
395 if (payload_size > 32768u) { 400 if (payload_size > 32768u) {
396 read_unlock_bh(&tipc_net_lock); 401 spin_unlock_bh(&node_list_lock);
397 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 402 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
398 " (too many links)"); 403 " (too many links)");
399 } 404 }
405 spin_unlock_bh(&node_list_lock);
406
400 buf = tipc_cfg_reply_alloc(payload_size); 407 buf = tipc_cfg_reply_alloc(payload_size);
401 if (!buf) { 408 if (!buf)
402 read_unlock_bh(&tipc_net_lock);
403 return NULL; 409 return NULL;
404 }
405 410
406 /* Add TLV for broadcast link */ 411 /* Add TLV for broadcast link */
407 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 412 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
@@ -410,7 +415,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
410 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 415 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
411 416
412 /* Add TLVs for any other links in scope */ 417 /* Add TLVs for any other links in scope */
413 list_for_each_entry(n_ptr, &tipc_node_list, list) { 418 rcu_read_lock();
419 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
414 u32 i; 420 u32 i;
415 421
416 if (!tipc_in_scope(domain, n_ptr->addr)) 422 if (!tipc_in_scope(domain, n_ptr->addr))
@@ -427,7 +433,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
427 } 433 }
428 tipc_node_unlock(n_ptr); 434 tipc_node_unlock(n_ptr);
429 } 435 }
430 436 rcu_read_unlock();
431 read_unlock_bh(&tipc_net_lock);
432 return buf; 437 return buf;
433} 438}