aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c132
1 files changed, 64 insertions, 68 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 25100c0a6fe8..1d3a4999a70f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -2,7 +2,7 @@
2 * net/tipc/node.c: TIPC node management routines 2 * net/tipc/node.c: TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, 2012 Ericsson AB 4 * Copyright (c) 2000-2006, 2012 Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Redistribution and use in source and binary forms, with or without 8 * Redistribution and use in source and binary forms, with or without
@@ -44,13 +44,11 @@
44static void node_lost_contact(struct tipc_node *n_ptr); 44static void node_lost_contact(struct tipc_node *n_ptr);
45static void node_established_contact(struct tipc_node *n_ptr); 45static void node_established_contact(struct tipc_node *n_ptr);
46 46
47static DEFINE_SPINLOCK(node_create_lock);
48
49static struct hlist_head node_htable[NODE_HTABLE_SIZE]; 47static struct hlist_head node_htable[NODE_HTABLE_SIZE];
50LIST_HEAD(tipc_node_list); 48LIST_HEAD(tipc_node_list);
51static u32 tipc_num_nodes; 49static u32 tipc_num_nodes;
52 50static u32 tipc_num_links;
53static atomic_t tipc_num_links = ATOMIC_INIT(0); 51static DEFINE_SPINLOCK(node_list_lock);
54 52
55/* 53/*
56 * A trivial power-of-two bitmask technique is used for speed, since this 54 * A trivial power-of-two bitmask technique is used for speed, since this
@@ -73,37 +71,26 @@ struct tipc_node *tipc_node_find(u32 addr)
73 if (unlikely(!in_own_cluster_exact(addr))) 71 if (unlikely(!in_own_cluster_exact(addr)))
74 return NULL; 72 return NULL;
75 73
76 hlist_for_each_entry(node, &node_htable[tipc_hashfn(addr)], hash) { 74 rcu_read_lock();
77 if (node->addr == addr) 75 hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) {
76 if (node->addr == addr) {
77 rcu_read_unlock();
78 return node; 78 return node;
79 }
79 } 80 }
81 rcu_read_unlock();
80 return NULL; 82 return NULL;
81} 83}
82 84
83/**
84 * tipc_node_create - create neighboring node
85 *
86 * Currently, this routine is called by neighbor discovery code, which holds
87 * net_lock for reading only. We must take node_create_lock to ensure a node
88 * isn't created twice if two different bearers discover the node at the same
89 * time. (It would be preferable to switch to holding net_lock in write mode,
90 * but this is a non-trivial change.)
91 */
92struct tipc_node *tipc_node_create(u32 addr) 85struct tipc_node *tipc_node_create(u32 addr)
93{ 86{
94 struct tipc_node *n_ptr, *temp_node; 87 struct tipc_node *n_ptr, *temp_node;
95 88
96 spin_lock_bh(&node_create_lock); 89 spin_lock_bh(&node_list_lock);
97
98 n_ptr = tipc_node_find(addr);
99 if (n_ptr) {
100 spin_unlock_bh(&node_create_lock);
101 return n_ptr;
102 }
103 90
104 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 91 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
105 if (!n_ptr) { 92 if (!n_ptr) {
106 spin_unlock_bh(&node_create_lock); 93 spin_unlock_bh(&node_list_lock);
107 pr_warn("Node creation failed, no memory\n"); 94 pr_warn("Node creation failed, no memory\n");
108 return NULL; 95 return NULL;
109 } 96 }
@@ -114,31 +101,41 @@ struct tipc_node *tipc_node_create(u32 addr)
114 INIT_LIST_HEAD(&n_ptr->list); 101 INIT_LIST_HEAD(&n_ptr->list);
115 INIT_LIST_HEAD(&n_ptr->nsub); 102 INIT_LIST_HEAD(&n_ptr->nsub);
116 103
117 hlist_add_head(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 104 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]);
118 105
119 list_for_each_entry(temp_node, &tipc_node_list, list) { 106 list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
120 if (n_ptr->addr < temp_node->addr) 107 if (n_ptr->addr < temp_node->addr)
121 break; 108 break;
122 } 109 }
123 list_add_tail(&n_ptr->list, &temp_node->list); 110 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
124 n_ptr->block_setup = WAIT_PEER_DOWN; 111 n_ptr->block_setup = WAIT_PEER_DOWN;
125 n_ptr->signature = INVALID_NODE_SIG; 112 n_ptr->signature = INVALID_NODE_SIG;
126 113
127 tipc_num_nodes++; 114 tipc_num_nodes++;
128 115
129 spin_unlock_bh(&node_create_lock); 116 spin_unlock_bh(&node_list_lock);
130 return n_ptr; 117 return n_ptr;
131} 118}
132 119
133void tipc_node_delete(struct tipc_node *n_ptr) 120static void tipc_node_delete(struct tipc_node *n_ptr)
134{ 121{
135 list_del(&n_ptr->list); 122 list_del_rcu(&n_ptr->list);
136 hlist_del(&n_ptr->hash); 123 hlist_del_rcu(&n_ptr->hash);
137 kfree(n_ptr); 124 kfree_rcu(n_ptr, rcu);
138 125
139 tipc_num_nodes--; 126 tipc_num_nodes--;
140} 127}
141 128
129void tipc_node_stop(void)
130{
131 struct tipc_node *node, *t_node;
132
133 spin_lock_bh(&node_list_lock);
134 list_for_each_entry_safe(node, t_node, &tipc_node_list, list)
135 tipc_node_delete(node);
136 spin_unlock_bh(&node_list_lock);
137}
138
142/** 139/**
143 * tipc_node_link_up - handle addition of link 140 * tipc_node_link_up - handle addition of link
144 * 141 *
@@ -162,7 +159,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
162 pr_info("New link <%s> becomes standby\n", l_ptr->name); 159 pr_info("New link <%s> becomes standby\n", l_ptr->name);
163 return; 160 return;
164 } 161 }
165 tipc_link_send_duplicate(active[0], l_ptr); 162 tipc_link_dup_queue_xmit(active[0], l_ptr);
166 if (l_ptr->priority == active[0]->priority) { 163 if (l_ptr->priority == active[0]->priority) {
167 active[0] = l_ptr; 164 active[0] = l_ptr;
168 return; 165 return;
@@ -225,7 +222,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
225 if (active[0] == l_ptr) 222 if (active[0] == l_ptr)
226 node_select_active_links(n_ptr); 223 node_select_active_links(n_ptr);
227 if (tipc_node_is_up(n_ptr)) 224 if (tipc_node_is_up(n_ptr))
228 tipc_link_changeover(l_ptr); 225 tipc_link_failover_send_queue(l_ptr);
229 else 226 else
230 node_lost_contact(n_ptr); 227 node_lost_contact(n_ptr);
231} 228}
@@ -235,11 +232,6 @@ int tipc_node_active_links(struct tipc_node *n_ptr)
235 return n_ptr->active_links[0] != NULL; 232 return n_ptr->active_links[0] != NULL;
236} 233}
237 234
238int tipc_node_redundant_links(struct tipc_node *n_ptr)
239{
240 return n_ptr->working_links > 1;
241}
242
243int tipc_node_is_up(struct tipc_node *n_ptr) 235int tipc_node_is_up(struct tipc_node *n_ptr)
244{ 236{
245 return tipc_node_active_links(n_ptr); 237 return tipc_node_active_links(n_ptr);
@@ -248,15 +240,25 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
248void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 240void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
249{ 241{
250 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr; 242 n_ptr->links[l_ptr->b_ptr->identity] = l_ptr;
251 atomic_inc(&tipc_num_links); 243 spin_lock_bh(&node_list_lock);
244 tipc_num_links++;
245 spin_unlock_bh(&node_list_lock);
252 n_ptr->link_cnt++; 246 n_ptr->link_cnt++;
253} 247}
254 248
255void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 249void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
256{ 250{
257 n_ptr->links[l_ptr->b_ptr->identity] = NULL; 251 int i;
258 atomic_dec(&tipc_num_links); 252
259 n_ptr->link_cnt--; 253 for (i = 0; i < MAX_BEARERS; i++) {
254 if (l_ptr != n_ptr->links[i])
255 continue;
256 n_ptr->links[i] = NULL;
257 spin_lock_bh(&node_list_lock);
258 tipc_num_links--;
259 spin_unlock_bh(&node_list_lock);
260 n_ptr->link_cnt--;
261 }
260} 262}
261 263
262static void node_established_contact(struct tipc_node *n_ptr) 264static void node_established_contact(struct tipc_node *n_ptr)
@@ -291,11 +293,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
291 293
292 /* Flush broadcast link info associated with lost node */ 294 /* Flush broadcast link info associated with lost node */
293 if (n_ptr->bclink.recv_permitted) { 295 if (n_ptr->bclink.recv_permitted) {
294 while (n_ptr->bclink.deferred_head) { 296 kfree_skb_list(n_ptr->bclink.deferred_head);
295 struct sk_buff *buf = n_ptr->bclink.deferred_head;
296 n_ptr->bclink.deferred_head = buf->next;
297 kfree_skb(buf);
298 }
299 n_ptr->bclink.deferred_size = 0; 297 n_ptr->bclink.deferred_size = 0;
300 298
301 if (n_ptr->bclink.reasm_head) { 299 if (n_ptr->bclink.reasm_head) {
@@ -344,27 +342,28 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
344 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE 342 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
345 " (network address)"); 343 " (network address)");
346 344
347 read_lock_bh(&tipc_net_lock); 345 spin_lock_bh(&node_list_lock);
348 if (!tipc_num_nodes) { 346 if (!tipc_num_nodes) {
349 read_unlock_bh(&tipc_net_lock); 347 spin_unlock_bh(&node_list_lock);
350 return tipc_cfg_reply_none(); 348 return tipc_cfg_reply_none();
351 } 349 }
352 350
353 /* For now, get space for all other nodes */ 351 /* For now, get space for all other nodes */
354 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes; 352 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
355 if (payload_size > 32768u) { 353 if (payload_size > 32768u) {
356 read_unlock_bh(&tipc_net_lock); 354 spin_unlock_bh(&node_list_lock);
357 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 355 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
358 " (too many nodes)"); 356 " (too many nodes)");
359 } 357 }
358 spin_unlock_bh(&node_list_lock);
359
360 buf = tipc_cfg_reply_alloc(payload_size); 360 buf = tipc_cfg_reply_alloc(payload_size);
361 if (!buf) { 361 if (!buf)
362 read_unlock_bh(&tipc_net_lock);
363 return NULL; 362 return NULL;
364 }
365 363
366 /* Add TLVs for all nodes in scope */ 364 /* Add TLVs for all nodes in scope */
367 list_for_each_entry(n_ptr, &tipc_node_list, list) { 365 rcu_read_lock();
366 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
368 if (!tipc_in_scope(domain, n_ptr->addr)) 367 if (!tipc_in_scope(domain, n_ptr->addr))
369 continue; 368 continue;
370 node_info.addr = htonl(n_ptr->addr); 369 node_info.addr = htonl(n_ptr->addr);
@@ -372,8 +371,7 @@ struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
372 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, 371 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
373 &node_info, sizeof(node_info)); 372 &node_info, sizeof(node_info));
374 } 373 }
375 374 rcu_read_unlock();
376 read_unlock_bh(&tipc_net_lock);
377 return buf; 375 return buf;
378} 376}
379 377
@@ -396,21 +394,19 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
396 if (!tipc_own_addr) 394 if (!tipc_own_addr)
397 return tipc_cfg_reply_none(); 395 return tipc_cfg_reply_none();
398 396
399 read_lock_bh(&tipc_net_lock); 397 spin_lock_bh(&node_list_lock);
400
401 /* Get space for all unicast links + broadcast link */ 398 /* Get space for all unicast links + broadcast link */
402 payload_size = TLV_SPACE(sizeof(link_info)) * 399 payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
403 (atomic_read(&tipc_num_links) + 1);
404 if (payload_size > 32768u) { 400 if (payload_size > 32768u) {
405 read_unlock_bh(&tipc_net_lock); 401 spin_unlock_bh(&node_list_lock);
406 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED 402 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
407 " (too many links)"); 403 " (too many links)");
408 } 404 }
405 spin_unlock_bh(&node_list_lock);
406
409 buf = tipc_cfg_reply_alloc(payload_size); 407 buf = tipc_cfg_reply_alloc(payload_size);
410 if (!buf) { 408 if (!buf)
411 read_unlock_bh(&tipc_net_lock);
412 return NULL; 409 return NULL;
413 }
414 410
415 /* Add TLV for broadcast link */ 411 /* Add TLV for broadcast link */
416 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 412 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr));
@@ -419,7 +415,8 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
419 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); 415 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
420 416
421 /* Add TLVs for any other links in scope */ 417 /* Add TLVs for any other links in scope */
422 list_for_each_entry(n_ptr, &tipc_node_list, list) { 418 rcu_read_lock();
419 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
423 u32 i; 420 u32 i;
424 421
425 if (!tipc_in_scope(domain, n_ptr->addr)) 422 if (!tipc_in_scope(domain, n_ptr->addr))
@@ -436,7 +433,6 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
436 } 433 }
437 tipc_node_unlock(n_ptr); 434 tipc_node_unlock(n_ptr);
438 } 435 }
439 436 rcu_read_unlock();
440 read_unlock_bh(&tipc_net_lock);
441 return buf; 437 return buf;
442} 438}