aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc/node.c
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r--net/tipc/node.c875
1 files changed, 687 insertions, 188 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 20cddec0a43c..fa97d9649a28 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -42,6 +42,84 @@
42#include "bcast.h" 42#include "bcast.h"
43#include "discover.h" 43#include "discover.h"
44 44
45#define INVALID_NODE_SIG 0x10000
46
47/* Flags used to take different actions according to flag type
48 * TIPC_NOTIFY_NODE_DOWN: notify node is down
49 * TIPC_NOTIFY_NODE_UP: notify node is up
50 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
51 */
52enum {
53 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
54 TIPC_NOTIFY_NODE_UP = (1 << 4),
55 TIPC_NOTIFY_LINK_UP = (1 << 6),
56 TIPC_NOTIFY_LINK_DOWN = (1 << 7)
57};
58
59struct tipc_link_entry {
60 struct tipc_link *link;
61 spinlock_t lock; /* per link */
62 u32 mtu;
63 struct sk_buff_head inputq;
64 struct tipc_media_addr maddr;
65};
66
67struct tipc_bclink_entry {
68 struct tipc_link *link;
69 struct sk_buff_head inputq1;
70 struct sk_buff_head arrvq;
71 struct sk_buff_head inputq2;
72 struct sk_buff_head namedq;
73};
74
75/**
76 * struct tipc_node - TIPC node structure
77 * @addr: network address of node
78 * @ref: reference counter to node object
79 * @lock: rwlock governing access to structure
80 * @net: the applicable net namespace
81 * @hash: links to adjacent nodes in unsorted hash chain
82 * @inputq: pointer to input queue containing messages for msg event
83 * @namedq: pointer to name table input queue with name table messages
84 * @active_links: bearer ids of active links, used as index into links[] array
85 * @links: array containing references to all links to node
86 * @action_flags: bit mask of different types of node actions
87 * @state: connectivity state vs peer node
88 * @sync_point: sequence number where synch/failover is finished
89 * @list: links to adjacent nodes in sorted list of cluster's nodes
90 * @working_links: number of working links to node (both active and standby)
91 * @link_cnt: number of links to node
92 * @capabilities: bitmap, indicating peer node's functional capabilities
93 * @signature: node instance identifier
94 * @link_id: local and remote bearer ids of changing link, if any
95 * @publ_list: list of publications
96 * @rcu: rcu struct for tipc_node
97 */
98struct tipc_node {
99 u32 addr;
100 struct kref kref;
101 rwlock_t lock;
102 struct net *net;
103 struct hlist_node hash;
104 int active_links[2];
105 struct tipc_link_entry links[MAX_BEARERS];
106 struct tipc_bclink_entry bc_entry;
107 int action_flags;
108 struct list_head list;
109 int state;
110 u16 sync_point;
111 int link_cnt;
112 u16 working_links;
113 u16 capabilities;
114 u32 signature;
115 u32 link_id;
116 struct list_head publ_list;
117 struct list_head conn_sks;
118 unsigned long keepalive_intv;
119 struct timer_list timer;
120 struct rcu_head rcu;
121};
122
45/* Node FSM states and events: 123/* Node FSM states and events:
46 */ 124 */
47enum { 125enum {
@@ -75,6 +153,9 @@ static void node_lost_contact(struct tipc_node *n, struct sk_buff_head *inputq);
75static void tipc_node_delete(struct tipc_node *node); 153static void tipc_node_delete(struct tipc_node *node);
76static void tipc_node_timeout(unsigned long data); 154static void tipc_node_timeout(unsigned long data);
77static void tipc_node_fsm_evt(struct tipc_node *n, int evt); 155static void tipc_node_fsm_evt(struct tipc_node *n, int evt);
156static struct tipc_node *tipc_node_find(struct net *net, u32 addr);
157static void tipc_node_put(struct tipc_node *node);
158static bool tipc_node_is_up(struct tipc_node *n);
78 159
79struct tipc_sock_conn { 160struct tipc_sock_conn {
80 u32 port; 161 u32 port;
@@ -83,12 +164,54 @@ struct tipc_sock_conn {
83 struct list_head list; 164 struct list_head list;
84}; 165};
85 166
167static const struct nla_policy tipc_nl_link_policy[TIPC_NLA_LINK_MAX + 1] = {
168 [TIPC_NLA_LINK_UNSPEC] = { .type = NLA_UNSPEC },
169 [TIPC_NLA_LINK_NAME] = {
170 .type = NLA_STRING,
171 .len = TIPC_MAX_LINK_NAME
172 },
173 [TIPC_NLA_LINK_MTU] = { .type = NLA_U32 },
174 [TIPC_NLA_LINK_BROADCAST] = { .type = NLA_FLAG },
175 [TIPC_NLA_LINK_UP] = { .type = NLA_FLAG },
176 [TIPC_NLA_LINK_ACTIVE] = { .type = NLA_FLAG },
177 [TIPC_NLA_LINK_PROP] = { .type = NLA_NESTED },
178 [TIPC_NLA_LINK_STATS] = { .type = NLA_NESTED },
179 [TIPC_NLA_LINK_RX] = { .type = NLA_U32 },
180 [TIPC_NLA_LINK_TX] = { .type = NLA_U32 }
181};
182
86static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = { 183static const struct nla_policy tipc_nl_node_policy[TIPC_NLA_NODE_MAX + 1] = {
87 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC }, 184 [TIPC_NLA_NODE_UNSPEC] = { .type = NLA_UNSPEC },
88 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 }, 185 [TIPC_NLA_NODE_ADDR] = { .type = NLA_U32 },
89 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG } 186 [TIPC_NLA_NODE_UP] = { .type = NLA_FLAG }
90}; 187};
91 188
189static struct tipc_link *node_active_link(struct tipc_node *n, int sel)
190{
191 int bearer_id = n->active_links[sel & 1];
192
193 if (unlikely(bearer_id == INVALID_BEARER_ID))
194 return NULL;
195
196 return n->links[bearer_id].link;
197}
198
199int tipc_node_get_mtu(struct net *net, u32 addr, u32 sel)
200{
201 struct tipc_node *n;
202 int bearer_id;
203 unsigned int mtu = MAX_MSG_SIZE;
204
205 n = tipc_node_find(net, addr);
206 if (unlikely(!n))
207 return mtu;
208
209 bearer_id = n->active_links[sel & 1];
210 if (likely(bearer_id != INVALID_BEARER_ID))
211 mtu = n->links[bearer_id].mtu;
212 tipc_node_put(n);
213 return mtu;
214}
92/* 215/*
93 * A trivial power-of-two bitmask technique is used for speed, since this 216 * A trivial power-of-two bitmask technique is used for speed, since this
94 * operation is done for every incoming TIPC packet. The number of hash table 217 * operation is done for every incoming TIPC packet. The number of hash table
@@ -107,7 +230,7 @@ static void tipc_node_kref_release(struct kref *kref)
107 tipc_node_delete(node); 230 tipc_node_delete(node);
108} 231}
109 232
110void tipc_node_put(struct tipc_node *node) 233static void tipc_node_put(struct tipc_node *node)
111{ 234{
112 kref_put(&node->kref, tipc_node_kref_release); 235 kref_put(&node->kref, tipc_node_kref_release);
113} 236}
@@ -120,7 +243,7 @@ static void tipc_node_get(struct tipc_node *node)
120/* 243/*
121 * tipc_node_find - locate specified node object, if it exists 244 * tipc_node_find - locate specified node object, if it exists
122 */ 245 */
123struct tipc_node *tipc_node_find(struct net *net, u32 addr) 246static struct tipc_node *tipc_node_find(struct net *net, u32 addr)
124{ 247{
125 struct tipc_net *tn = net_generic(net, tipc_net_id); 248 struct tipc_net *tn = net_generic(net, tipc_net_id);
126 struct tipc_node *node; 249 struct tipc_node *node;
@@ -141,66 +264,122 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr)
141 return NULL; 264 return NULL;
142} 265}
143 266
267static void tipc_node_read_lock(struct tipc_node *n)
268{
269 read_lock_bh(&n->lock);
270}
271
272static void tipc_node_read_unlock(struct tipc_node *n)
273{
274 read_unlock_bh(&n->lock);
275}
276
277static void tipc_node_write_lock(struct tipc_node *n)
278{
279 write_lock_bh(&n->lock);
280}
281
282static void tipc_node_write_unlock(struct tipc_node *n)
283{
284 struct net *net = n->net;
285 u32 addr = 0;
286 u32 flags = n->action_flags;
287 u32 link_id = 0;
288 struct list_head *publ_list;
289
290 if (likely(!flags)) {
291 write_unlock_bh(&n->lock);
292 return;
293 }
294
295 addr = n->addr;
296 link_id = n->link_id;
297 publ_list = &n->publ_list;
298
299 n->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
300 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
301
302 write_unlock_bh(&n->lock);
303
304 if (flags & TIPC_NOTIFY_NODE_DOWN)
305 tipc_publ_notify(net, publ_list, addr);
306
307 if (flags & TIPC_NOTIFY_NODE_UP)
308 tipc_named_node_up(net, addr);
309
310 if (flags & TIPC_NOTIFY_LINK_UP)
311 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
312 TIPC_NODE_SCOPE, link_id, addr);
313
314 if (flags & TIPC_NOTIFY_LINK_DOWN)
315 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
316 link_id, addr);
317}
318
144struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities) 319struct tipc_node *tipc_node_create(struct net *net, u32 addr, u16 capabilities)
145{ 320{
146 struct tipc_net *tn = net_generic(net, tipc_net_id); 321 struct tipc_net *tn = net_generic(net, tipc_net_id);
147 struct tipc_node *n_ptr, *temp_node; 322 struct tipc_node *n, *temp_node;
323 int i;
148 324
149 spin_lock_bh(&tn->node_list_lock); 325 spin_lock_bh(&tn->node_list_lock);
150 n_ptr = tipc_node_find(net, addr); 326 n = tipc_node_find(net, addr);
151 if (n_ptr) 327 if (n)
152 goto exit; 328 goto exit;
153 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 329 n = kzalloc(sizeof(*n), GFP_ATOMIC);
154 if (!n_ptr) { 330 if (!n) {
155 pr_warn("Node creation failed, no memory\n"); 331 pr_warn("Node creation failed, no memory\n");
156 goto exit; 332 goto exit;
157 } 333 }
158 n_ptr->addr = addr; 334 n->addr = addr;
159 n_ptr->net = net; 335 n->net = net;
160 n_ptr->capabilities = capabilities; 336 n->capabilities = capabilities;
161 kref_init(&n_ptr->kref); 337 kref_init(&n->kref);
162 spin_lock_init(&n_ptr->lock); 338 rwlock_init(&n->lock);
163 INIT_HLIST_NODE(&n_ptr->hash); 339 INIT_HLIST_NODE(&n->hash);
164 INIT_LIST_HEAD(&n_ptr->list); 340 INIT_LIST_HEAD(&n->list);
165 INIT_LIST_HEAD(&n_ptr->publ_list); 341 INIT_LIST_HEAD(&n->publ_list);
166 INIT_LIST_HEAD(&n_ptr->conn_sks); 342 INIT_LIST_HEAD(&n->conn_sks);
167 skb_queue_head_init(&n_ptr->bc_entry.namedq); 343 skb_queue_head_init(&n->bc_entry.namedq);
168 skb_queue_head_init(&n_ptr->bc_entry.inputq1); 344 skb_queue_head_init(&n->bc_entry.inputq1);
169 __skb_queue_head_init(&n_ptr->bc_entry.arrvq); 345 __skb_queue_head_init(&n->bc_entry.arrvq);
170 skb_queue_head_init(&n_ptr->bc_entry.inputq2); 346 skb_queue_head_init(&n->bc_entry.inputq2);
171 hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); 347 for (i = 0; i < MAX_BEARERS; i++)
348 spin_lock_init(&n->links[i].lock);
349 hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
172 list_for_each_entry_rcu(temp_node, &tn->node_list, list) { 350 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
173 if (n_ptr->addr < temp_node->addr) 351 if (n->addr < temp_node->addr)
174 break; 352 break;
175 } 353 }
176 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 354 list_add_tail_rcu(&n->list, &temp_node->list);
177 n_ptr->state = SELF_DOWN_PEER_LEAVING; 355 n->state = SELF_DOWN_PEER_LEAVING;
178 n_ptr->signature = INVALID_NODE_SIG; 356 n->signature = INVALID_NODE_SIG;
179 n_ptr->active_links[0] = INVALID_BEARER_ID; 357 n->active_links[0] = INVALID_BEARER_ID;
180 n_ptr->active_links[1] = INVALID_BEARER_ID; 358 n->active_links[1] = INVALID_BEARER_ID;
181 if (!tipc_link_bc_create(net, tipc_own_addr(net), n_ptr->addr, 359 if (!tipc_link_bc_create(net, tipc_own_addr(net), n->addr,
182 U16_MAX, tipc_bc_sndlink(net)->window, 360 U16_MAX,
183 n_ptr->capabilities, 361 tipc_link_window(tipc_bc_sndlink(net)),
184 &n_ptr->bc_entry.inputq1, 362 n->capabilities,
185 &n_ptr->bc_entry.namedq, 363 &n->bc_entry.inputq1,
364 &n->bc_entry.namedq,
186 tipc_bc_sndlink(net), 365 tipc_bc_sndlink(net),
187 &n_ptr->bc_entry.link)) { 366 &n->bc_entry.link)) {
188 pr_warn("Broadcast rcv link creation failed, no memory\n"); 367 pr_warn("Broadcast rcv link creation failed, no memory\n");
189 kfree(n_ptr); 368 kfree(n);
190 n_ptr = NULL; 369 n = NULL;
191 goto exit; 370 goto exit;
192 } 371 }
193 tipc_node_get(n_ptr); 372 tipc_node_get(n);
194 setup_timer(&n_ptr->timer, tipc_node_timeout, (unsigned long)n_ptr); 373 setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
195 n_ptr->keepalive_intv = U32_MAX; 374 n->keepalive_intv = U32_MAX;
196exit: 375exit:
197 spin_unlock_bh(&tn->node_list_lock); 376 spin_unlock_bh(&tn->node_list_lock);
198 return n_ptr; 377 return n;
199} 378}
200 379
201static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l) 380static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
202{ 381{
203 unsigned long tol = l->tolerance; 382 unsigned long tol = tipc_link_tolerance(l);
204 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4; 383 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
205 unsigned long keepalive_intv = msecs_to_jiffies(intv); 384 unsigned long keepalive_intv = msecs_to_jiffies(intv);
206 385
@@ -209,7 +388,7 @@ static void tipc_node_calculate_timer(struct tipc_node *n, struct tipc_link *l)
209 n->keepalive_intv = keepalive_intv; 388 n->keepalive_intv = keepalive_intv;
210 389
211 /* Ensure link's abort limit corresponds to current interval */ 390 /* Ensure link's abort limit corresponds to current interval */
212 l->abort_limit = l->tolerance / jiffies_to_msecs(n->keepalive_intv); 391 tipc_link_set_abort_limit(l, tol / jiffies_to_msecs(n->keepalive_intv));
213} 392}
214 393
215static void tipc_node_delete(struct tipc_node *node) 394static void tipc_node_delete(struct tipc_node *node)
@@ -234,6 +413,42 @@ void tipc_node_stop(struct net *net)
234 spin_unlock_bh(&tn->node_list_lock); 413 spin_unlock_bh(&tn->node_list_lock);
235} 414}
236 415
416void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
417{
418 struct tipc_node *n;
419
420 if (in_own_node(net, addr))
421 return;
422
423 n = tipc_node_find(net, addr);
424 if (!n) {
425 pr_warn("Node subscribe rejected, unknown node 0x%x\n", addr);
426 return;
427 }
428 tipc_node_write_lock(n);
429 list_add_tail(subscr, &n->publ_list);
430 tipc_node_write_unlock(n);
431 tipc_node_put(n);
432}
433
434void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
435{
436 struct tipc_node *n;
437
438 if (in_own_node(net, addr))
439 return;
440
441 n = tipc_node_find(net, addr);
442 if (!n) {
443 pr_warn("Node unsubscribe rejected, unknown node 0x%x\n", addr);
444 return;
445 }
446 tipc_node_write_lock(n);
447 list_del_init(subscr);
448 tipc_node_write_unlock(n);
449 tipc_node_put(n);
450}
451
237int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) 452int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
238{ 453{
239 struct tipc_node *node; 454 struct tipc_node *node;
@@ -257,9 +472,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
257 conn->port = port; 472 conn->port = port;
258 conn->peer_port = peer_port; 473 conn->peer_port = peer_port;
259 474
260 tipc_node_lock(node); 475 tipc_node_write_lock(node);
261 list_add_tail(&conn->list, &node->conn_sks); 476 list_add_tail(&conn->list, &node->conn_sks);
262 tipc_node_unlock(node); 477 tipc_node_write_unlock(node);
263exit: 478exit:
264 tipc_node_put(node); 479 tipc_node_put(node);
265 return err; 480 return err;
@@ -277,14 +492,14 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
277 if (!node) 492 if (!node)
278 return; 493 return;
279 494
280 tipc_node_lock(node); 495 tipc_node_write_lock(node);
281 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) { 496 list_for_each_entry_safe(conn, safe, &node->conn_sks, list) {
282 if (port != conn->port) 497 if (port != conn->port)
283 continue; 498 continue;
284 list_del(&conn->list); 499 list_del(&conn->list);
285 kfree(conn); 500 kfree(conn);
286 } 501 }
287 tipc_node_unlock(node); 502 tipc_node_write_unlock(node);
288 tipc_node_put(node); 503 tipc_node_put(node);
289} 504}
290 505
@@ -301,14 +516,16 @@ static void tipc_node_timeout(unsigned long data)
301 __skb_queue_head_init(&xmitq); 516 __skb_queue_head_init(&xmitq);
302 517
303 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) { 518 for (bearer_id = 0; bearer_id < MAX_BEARERS; bearer_id++) {
304 tipc_node_lock(n); 519 tipc_node_read_lock(n);
305 le = &n->links[bearer_id]; 520 le = &n->links[bearer_id];
521 spin_lock_bh(&le->lock);
306 if (le->link) { 522 if (le->link) {
307 /* Link tolerance may change asynchronously: */ 523 /* Link tolerance may change asynchronously: */
308 tipc_node_calculate_timer(n, le->link); 524 tipc_node_calculate_timer(n, le->link);
309 rc = tipc_link_timeout(le->link, &xmitq); 525 rc = tipc_link_timeout(le->link, &xmitq);
310 } 526 }
311 tipc_node_unlock(n); 527 spin_unlock_bh(&le->lock);
528 tipc_node_read_unlock(n);
312 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr); 529 tipc_bearer_xmit(n->net, bearer_id, &xmitq, &le->maddr);
313 if (rc & TIPC_LINK_DOWN_EVT) 530 if (rc & TIPC_LINK_DOWN_EVT)
314 tipc_node_link_down(n, bearer_id, false); 531 tipc_node_link_down(n, bearer_id, false);
@@ -340,16 +557,16 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
340 557
341 n->working_links++; 558 n->working_links++;
342 n->action_flags |= TIPC_NOTIFY_LINK_UP; 559 n->action_flags |= TIPC_NOTIFY_LINK_UP;
343 n->link_id = nl->peer_bearer_id << 16 | bearer_id; 560 n->link_id = tipc_link_id(nl);
344 561
345 /* Leave room for tunnel header when returning 'mtu' to users: */ 562 /* Leave room for tunnel header when returning 'mtu' to users: */
346 n->links[bearer_id].mtu = nl->mtu - INT_H_SIZE; 563 n->links[bearer_id].mtu = tipc_link_mtu(nl) - INT_H_SIZE;
347 564
348 tipc_bearer_add_dest(n->net, bearer_id, n->addr); 565 tipc_bearer_add_dest(n->net, bearer_id, n->addr);
349 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id); 566 tipc_bcast_inc_bearer_dst_cnt(n->net, bearer_id);
350 567
351 pr_debug("Established link <%s> on network plane %c\n", 568 pr_debug("Established link <%s> on network plane %c\n",
352 nl->name, nl->net_plane); 569 tipc_link_name(nl), tipc_link_plane(nl));
353 570
354 /* First link? => give it both slots */ 571 /* First link? => give it both slots */
355 if (!ol) { 572 if (!ol) {
@@ -362,17 +579,17 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
362 } 579 }
363 580
364 /* Second link => redistribute slots */ 581 /* Second link => redistribute slots */
365 if (nl->priority > ol->priority) { 582 if (tipc_link_prio(nl) > tipc_link_prio(ol)) {
366 pr_debug("Old link <%s> becomes standby\n", ol->name); 583 pr_debug("Old link <%s> becomes standby\n", tipc_link_name(ol));
367 *slot0 = bearer_id; 584 *slot0 = bearer_id;
368 *slot1 = bearer_id; 585 *slot1 = bearer_id;
369 tipc_link_set_active(nl, true); 586 tipc_link_set_active(nl, true);
370 tipc_link_set_active(ol, false); 587 tipc_link_set_active(ol, false);
371 } else if (nl->priority == ol->priority) { 588 } else if (tipc_link_prio(nl) == tipc_link_prio(ol)) {
372 tipc_link_set_active(nl, true); 589 tipc_link_set_active(nl, true);
373 *slot1 = bearer_id; 590 *slot1 = bearer_id;
374 } else { 591 } else {
375 pr_debug("New link <%s> is standby\n", nl->name); 592 pr_debug("New link <%s> is standby\n", tipc_link_name(nl));
376 } 593 }
377 594
378 /* Prepare synchronization with first link */ 595 /* Prepare synchronization with first link */
@@ -387,9 +604,9 @@ static void __tipc_node_link_up(struct tipc_node *n, int bearer_id,
387static void tipc_node_link_up(struct tipc_node *n, int bearer_id, 604static void tipc_node_link_up(struct tipc_node *n, int bearer_id,
388 struct sk_buff_head *xmitq) 605 struct sk_buff_head *xmitq)
389{ 606{
390 tipc_node_lock(n); 607 tipc_node_write_lock(n);
391 __tipc_node_link_up(n, bearer_id, xmitq); 608 __tipc_node_link_up(n, bearer_id, xmitq);
392 tipc_node_unlock(n); 609 tipc_node_write_unlock(n);
393} 610}
394 611
395/** 612/**
@@ -402,7 +619,7 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
402 struct tipc_link_entry *le = &n->links[*bearer_id]; 619 struct tipc_link_entry *le = &n->links[*bearer_id];
403 int *slot0 = &n->active_links[0]; 620 int *slot0 = &n->active_links[0];
404 int *slot1 = &n->active_links[1]; 621 int *slot1 = &n->active_links[1];
405 int i, highest = 0; 622 int i, highest = 0, prio;
406 struct tipc_link *l, *_l, *tnl; 623 struct tipc_link *l, *_l, *tnl;
407 624
408 l = n->links[*bearer_id].link; 625 l = n->links[*bearer_id].link;
@@ -411,12 +628,12 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
411 628
412 n->working_links--; 629 n->working_links--;
413 n->action_flags |= TIPC_NOTIFY_LINK_DOWN; 630 n->action_flags |= TIPC_NOTIFY_LINK_DOWN;
414 n->link_id = l->peer_bearer_id << 16 | *bearer_id; 631 n->link_id = tipc_link_id(l);
415 632
416 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr); 633 tipc_bearer_remove_dest(n->net, *bearer_id, n->addr);
417 634
418 pr_debug("Lost link <%s> on network plane %c\n", 635 pr_debug("Lost link <%s> on network plane %c\n",
419 l->name, l->net_plane); 636 tipc_link_name(l), tipc_link_plane(l));
420 637
421 /* Select new active link if any available */ 638 /* Select new active link if any available */
422 *slot0 = INVALID_BEARER_ID; 639 *slot0 = INVALID_BEARER_ID;
@@ -427,10 +644,11 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
427 continue; 644 continue;
428 if (_l == l) 645 if (_l == l)
429 continue; 646 continue;
430 if (_l->priority < highest) 647 prio = tipc_link_prio(_l);
648 if (prio < highest)
431 continue; 649 continue;
432 if (_l->priority > highest) { 650 if (prio > highest) {
433 highest = _l->priority; 651 highest = prio;
434 *slot0 = i; 652 *slot0 = i;
435 *slot1 = i; 653 *slot1 = i;
436 continue; 654 continue;
@@ -453,17 +671,17 @@ static void __tipc_node_link_down(struct tipc_node *n, int *bearer_id,
453 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id); 671 tipc_bcast_dec_bearer_dst_cnt(n->net, *bearer_id);
454 672
455 /* There is still a working link => initiate failover */ 673 /* There is still a working link => initiate failover */
456 tnl = node_active_link(n, 0); 674 *bearer_id = n->active_links[0];
675 tnl = n->links[*bearer_id].link;
457 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 676 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
458 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 677 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
459 n->sync_point = tnl->rcv_nxt + (U16_MAX / 2 - 1); 678 n->sync_point = tipc_link_rcv_nxt(tnl) + (U16_MAX / 2 - 1);
460 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq); 679 tipc_link_tnl_prepare(l, tnl, FAILOVER_MSG, xmitq);
461 tipc_link_reset(l); 680 tipc_link_reset(l);
462 tipc_link_fsm_evt(l, LINK_RESET_EVT); 681 tipc_link_fsm_evt(l, LINK_RESET_EVT);
463 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT); 682 tipc_link_fsm_evt(l, LINK_FAILOVER_BEGIN_EVT);
464 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT); 683 tipc_node_fsm_evt(n, NODE_FAILOVER_BEGIN_EVT);
465 *maddr = &n->links[tnl->bearer_id].maddr; 684 *maddr = &n->links[*bearer_id].maddr;
466 *bearer_id = tnl->bearer_id;
467} 685}
468 686
469static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete) 687static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
@@ -478,7 +696,7 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
478 696
479 __skb_queue_head_init(&xmitq); 697 __skb_queue_head_init(&xmitq);
480 698
481 tipc_node_lock(n); 699 tipc_node_write_lock(n);
482 if (!tipc_link_is_establishing(l)) { 700 if (!tipc_link_is_establishing(l)) {
483 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr); 701 __tipc_node_link_down(n, &bearer_id, &xmitq, &maddr);
484 if (delete) { 702 if (delete) {
@@ -490,12 +708,12 @@ static void tipc_node_link_down(struct tipc_node *n, int bearer_id, bool delete)
490 /* Defuse pending tipc_node_link_up() */ 708 /* Defuse pending tipc_node_link_up() */
491 tipc_link_fsm_evt(l, LINK_RESET_EVT); 709 tipc_link_fsm_evt(l, LINK_RESET_EVT);
492 } 710 }
493 tipc_node_unlock(n); 711 tipc_node_write_unlock(n);
494 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr); 712 tipc_bearer_xmit(n->net, bearer_id, &xmitq, maddr);
495 tipc_sk_rcv(n->net, &le->inputq); 713 tipc_sk_rcv(n->net, &le->inputq);
496} 714}
497 715
498bool tipc_node_is_up(struct tipc_node *n) 716static bool tipc_node_is_up(struct tipc_node *n)
499{ 717{
500 return n->active_links[0] != INVALID_BEARER_ID; 718 return n->active_links[0] != INVALID_BEARER_ID;
501} 719}
@@ -523,7 +741,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
523 if (!n) 741 if (!n)
524 return; 742 return;
525 743
526 tipc_node_lock(n); 744 tipc_node_write_lock(n);
527 745
528 le = &n->links[b->identity]; 746 le = &n->links[b->identity];
529 747
@@ -626,7 +844,7 @@ void tipc_node_check_dest(struct net *net, u32 onode,
626 } 844 }
627 memcpy(&le->maddr, maddr, sizeof(*maddr)); 845 memcpy(&le->maddr, maddr, sizeof(*maddr));
628exit: 846exit:
629 tipc_node_unlock(n); 847 tipc_node_write_unlock(n);
630 if (reset && !tipc_link_is_reset(l)) 848 if (reset && !tipc_link_is_reset(l))
631 tipc_node_link_down(n, b->identity, false); 849 tipc_node_link_down(n, b->identity, false);
632 tipc_node_put(n); 850 tipc_node_put(n);
@@ -834,24 +1052,6 @@ illegal_evt:
834 pr_err("Illegal node fsm evt %x in state %x\n", evt, state); 1052 pr_err("Illegal node fsm evt %x in state %x\n", evt, state);
835} 1053}
836 1054
837bool tipc_node_filter_pkt(struct tipc_node *n, struct tipc_msg *hdr)
838{
839 int state = n->state;
840
841 if (likely(state == SELF_UP_PEER_UP))
842 return true;
843
844 if (state == SELF_LEAVING_PEER_DOWN)
845 return false;
846
847 if (state == SELF_DOWN_PEER_LEAVING) {
848 if (msg_peer_node_is_up(hdr))
849 return false;
850 }
851
852 return true;
853}
854
855static void node_lost_contact(struct tipc_node *n, 1055static void node_lost_contact(struct tipc_node *n,
856 struct sk_buff_head *inputq) 1056 struct sk_buff_head *inputq)
857{ 1057{
@@ -913,56 +1113,18 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
913 if (bearer_id >= MAX_BEARERS) 1113 if (bearer_id >= MAX_BEARERS)
914 goto exit; 1114 goto exit;
915 1115
916 tipc_node_lock(node); 1116 tipc_node_read_lock(node);
917 link = node->links[bearer_id].link; 1117 link = node->links[bearer_id].link;
918 if (link) { 1118 if (link) {
919 strncpy(linkname, link->name, len); 1119 strncpy(linkname, tipc_link_name(link), len);
920 err = 0; 1120 err = 0;
921 } 1121 }
922exit: 1122exit:
923 tipc_node_unlock(node); 1123 tipc_node_read_unlock(node);
924 tipc_node_put(node); 1124 tipc_node_put(node);
925 return err; 1125 return err;
926} 1126}
927 1127
928void tipc_node_unlock(struct tipc_node *node)
929{
930 struct net *net = node->net;
931 u32 addr = 0;
932 u32 flags = node->action_flags;
933 u32 link_id = 0;
934 struct list_head *publ_list;
935
936 if (likely(!flags)) {
937 spin_unlock_bh(&node->lock);
938 return;
939 }
940
941 addr = node->addr;
942 link_id = node->link_id;
943 publ_list = &node->publ_list;
944
945 node->action_flags &= ~(TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
946 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP);
947
948 spin_unlock_bh(&node->lock);
949
950 if (flags & TIPC_NOTIFY_NODE_DOWN)
951 tipc_publ_notify(net, publ_list, addr);
952
953 if (flags & TIPC_NOTIFY_NODE_UP)
954 tipc_named_node_up(net, addr);
955
956 if (flags & TIPC_NOTIFY_LINK_UP)
957 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
958 TIPC_NODE_SCOPE, link_id, addr);
959
960 if (flags & TIPC_NOTIFY_LINK_DOWN)
961 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
962 link_id, addr);
963
964}
965
966/* Caller should hold node lock for the passed node */ 1128/* Caller should hold node lock for the passed node */
967static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node) 1129static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
968{ 1130{
@@ -997,20 +1159,6 @@ msg_full:
997 return -EMSGSIZE; 1159 return -EMSGSIZE;
998} 1160}
999 1161
1000static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
1001 int *bearer_id,
1002 struct tipc_media_addr **maddr)
1003{
1004 int id = n->active_links[sel & 1];
1005
1006 if (unlikely(id < 0))
1007 return NULL;
1008
1009 *bearer_id = id;
1010 *maddr = &n->links[id].maddr;
1011 return n->links[id].link;
1012}
1013
1014/** 1162/**
1015 * tipc_node_xmit() is the general link level function for message sending 1163 * tipc_node_xmit() is the general link level function for message sending
1016 * @net: the applicable net namespace 1164 * @net: the applicable net namespace
@@ -1023,29 +1171,32 @@ static struct tipc_link *tipc_node_select_link(struct tipc_node *n, int sel,
1023int tipc_node_xmit(struct net *net, struct sk_buff_head *list, 1171int tipc_node_xmit(struct net *net, struct sk_buff_head *list,
1024 u32 dnode, int selector) 1172 u32 dnode, int selector)
1025{ 1173{
1026 struct tipc_link *l = NULL; 1174 struct tipc_link_entry *le = NULL;
1027 struct tipc_node *n; 1175 struct tipc_node *n;
1028 struct sk_buff_head xmitq; 1176 struct sk_buff_head xmitq;
1029 struct tipc_media_addr *maddr; 1177 int bearer_id = -1;
1030 int bearer_id;
1031 int rc = -EHOSTUNREACH; 1178 int rc = -EHOSTUNREACH;
1032 1179
1033 __skb_queue_head_init(&xmitq); 1180 __skb_queue_head_init(&xmitq);
1034 n = tipc_node_find(net, dnode); 1181 n = tipc_node_find(net, dnode);
1035 if (likely(n)) { 1182 if (likely(n)) {
1036 tipc_node_lock(n); 1183 tipc_node_read_lock(n);
1037 l = tipc_node_select_link(n, selector, &bearer_id, &maddr); 1184 bearer_id = n->active_links[selector & 1];
1038 if (likely(l)) 1185 if (bearer_id >= 0) {
1039 rc = tipc_link_xmit(l, list, &xmitq); 1186 le = &n->links[bearer_id];
1040 tipc_node_unlock(n); 1187 spin_lock_bh(&le->lock);
1041 if (unlikely(rc == -ENOBUFS)) 1188 rc = tipc_link_xmit(le->link, list, &xmitq);
1189 spin_unlock_bh(&le->lock);
1190 }
1191 tipc_node_read_unlock(n);
1192 if (likely(!rc))
1193 tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
1194 else if (rc == -ENOBUFS)
1042 tipc_node_link_down(n, bearer_id, false); 1195 tipc_node_link_down(n, bearer_id, false);
1043 tipc_node_put(n); 1196 tipc_node_put(n);
1197 return rc;
1044 } 1198 }
1045 if (likely(!rc)) { 1199
1046 tipc_bearer_xmit(net, bearer_id, &xmitq, maddr);
1047 return 0;
1048 }
1049 if (likely(in_own_node(net, dnode))) { 1200 if (likely(in_own_node(net, dnode))) {
1050 tipc_sk_rcv(net, list); 1201 tipc_sk_rcv(net, list);
1051 return 0; 1202 return 0;
@@ -1075,6 +1226,30 @@ int tipc_node_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
1075 return 0; 1226 return 0;
1076} 1227}
1077 1228
1229void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
1230{
1231 struct sk_buff *txskb;
1232 struct tipc_node *n;
1233 u32 dst;
1234
1235 rcu_read_lock();
1236 list_for_each_entry_rcu(n, tipc_nodes(net), list) {
1237 dst = n->addr;
1238 if (in_own_node(net, dst))
1239 continue;
1240 if (!tipc_node_is_up(n))
1241 continue;
1242 txskb = pskb_copy(skb, GFP_ATOMIC);
1243 if (!txskb)
1244 break;
1245 msg_set_destnode(buf_msg(txskb), dst);
1246 tipc_node_xmit_skb(net, txskb, dst, 0);
1247 }
1248 rcu_read_unlock();
1249
1250 kfree_skb(skb);
1251}
1252
1078/** 1253/**
1079 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node 1254 * tipc_node_bc_rcv - process TIPC broadcast packet arriving from off-node
1080 * @net: the applicable net namespace 1255 * @net: the applicable net namespace
@@ -1116,9 +1291,9 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
1116 1291
1117 /* Broadcast ACKs are sent on a unicast link */ 1292 /* Broadcast ACKs are sent on a unicast link */
1118 if (rc & TIPC_LINK_SND_BC_ACK) { 1293 if (rc & TIPC_LINK_SND_BC_ACK) {
1119 tipc_node_lock(n); 1294 tipc_node_read_lock(n);
1120 tipc_link_build_ack_msg(le->link, &xmitq); 1295 tipc_link_build_ack_msg(le->link, &xmitq);
1121 tipc_node_unlock(n); 1296 tipc_node_read_unlock(n);
1122 } 1297 }
1123 1298
1124 if (!skb_queue_empty(&xmitq)) 1299 if (!skb_queue_empty(&xmitq))
@@ -1151,30 +1326,30 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1151 u16 oseqno = msg_seqno(hdr); 1326 u16 oseqno = msg_seqno(hdr);
1152 u16 iseqno = msg_seqno(msg_get_wrapped(hdr)); 1327 u16 iseqno = msg_seqno(msg_get_wrapped(hdr));
1153 u16 exp_pkts = msg_msgcnt(hdr); 1328 u16 exp_pkts = msg_msgcnt(hdr);
1154 u16 rcv_nxt, syncpt, dlv_nxt; 1329 u16 rcv_nxt, syncpt, dlv_nxt, inputq_len;
1155 int state = n->state; 1330 int state = n->state;
1156 struct tipc_link *l, *tnl, *pl = NULL; 1331 struct tipc_link *l, *tnl, *pl = NULL;
1157 struct tipc_media_addr *maddr; 1332 struct tipc_media_addr *maddr;
1158 int i, pb_id; 1333 int pb_id;
1159 1334
1160 l = n->links[bearer_id].link; 1335 l = n->links[bearer_id].link;
1161 if (!l) 1336 if (!l)
1162 return false; 1337 return false;
1163 rcv_nxt = l->rcv_nxt; 1338 rcv_nxt = tipc_link_rcv_nxt(l);
1164 1339
1165 1340
1166 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) 1341 if (likely((state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL)))
1167 return true; 1342 return true;
1168 1343
1169 /* Find parallel link, if any */ 1344 /* Find parallel link, if any */
1170 for (i = 0; i < MAX_BEARERS; i++) { 1345 for (pb_id = 0; pb_id < MAX_BEARERS; pb_id++) {
1171 if ((i != bearer_id) && n->links[i].link) { 1346 if ((pb_id != bearer_id) && n->links[pb_id].link) {
1172 pl = n->links[i].link; 1347 pl = n->links[pb_id].link;
1173 break; 1348 break;
1174 } 1349 }
1175 } 1350 }
1176 1351
1177 /* Update node accesibility if applicable */ 1352 /* Check and update node accesibility if applicable */
1178 if (state == SELF_UP_PEER_COMING) { 1353 if (state == SELF_UP_PEER_COMING) {
1179 if (!tipc_link_is_up(l)) 1354 if (!tipc_link_is_up(l))
1180 return true; 1355 return true;
@@ -1187,8 +1362,12 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1187 if (msg_peer_node_is_up(hdr)) 1362 if (msg_peer_node_is_up(hdr))
1188 return false; 1363 return false;
1189 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT); 1364 tipc_node_fsm_evt(n, PEER_LOST_CONTACT_EVT);
1365 return true;
1190 } 1366 }
1191 1367
1368 if (state == SELF_LEAVING_PEER_DOWN)
1369 return false;
1370
1192 /* Ignore duplicate packets */ 1371 /* Ignore duplicate packets */
1193 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt)) 1372 if ((usr != LINK_PROTOCOL) && less(oseqno, rcv_nxt))
1194 return true; 1373 return true;
@@ -1197,9 +1376,9 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1197 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) { 1376 if ((usr == TUNNEL_PROTOCOL) && (mtyp == FAILOVER_MSG)) {
1198 syncpt = oseqno + exp_pkts - 1; 1377 syncpt = oseqno + exp_pkts - 1;
1199 if (pl && tipc_link_is_up(pl)) { 1378 if (pl && tipc_link_is_up(pl)) {
1200 pb_id = pl->bearer_id;
1201 __tipc_node_link_down(n, &pb_id, xmitq, &maddr); 1379 __tipc_node_link_down(n, &pb_id, xmitq, &maddr);
1202 tipc_skb_queue_splice_tail_init(pl->inputq, l->inputq); 1380 tipc_skb_queue_splice_tail_init(tipc_link_inputq(pl),
1381 tipc_link_inputq(l));
1203 } 1382 }
1204 /* If pkts arrive out of order, use lowest calculated syncpt */ 1383 /* If pkts arrive out of order, use lowest calculated syncpt */
1205 if (less(syncpt, n->sync_point)) 1384 if (less(syncpt, n->sync_point))
@@ -1232,19 +1411,18 @@ static bool tipc_node_check_state(struct tipc_node *n, struct sk_buff *skb,
1232 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT); 1411 tipc_link_fsm_evt(l, LINK_SYNCH_BEGIN_EVT);
1233 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT); 1412 tipc_node_fsm_evt(n, NODE_SYNCH_BEGIN_EVT);
1234 } 1413 }
1235 if (less(syncpt, n->sync_point))
1236 n->sync_point = syncpt;
1237 } 1414 }
1238 1415
1239 /* Open tunnel link when parallel link reaches synch point */ 1416 /* Open tunnel link when parallel link reaches synch point */
1240 if ((n->state == NODE_SYNCHING) && tipc_link_is_synching(l)) { 1417 if (n->state == NODE_SYNCHING) {
1241 if (tipc_link_is_synching(l)) { 1418 if (tipc_link_is_synching(l)) {
1242 tnl = l; 1419 tnl = l;
1243 } else { 1420 } else {
1244 tnl = pl; 1421 tnl = pl;
1245 pl = l; 1422 pl = l;
1246 } 1423 }
1247 dlv_nxt = pl->rcv_nxt - mod(skb_queue_len(pl->inputq)); 1424 inputq_len = skb_queue_len(tipc_link_inputq(pl));
1425 dlv_nxt = tipc_link_rcv_nxt(pl) - inputq_len;
1248 if (more(dlv_nxt, n->sync_point)) { 1426 if (more(dlv_nxt, n->sync_point)) {
1249 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT); 1427 tipc_link_fsm_evt(tnl, LINK_SYNCH_END_EVT);
1250 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT); 1428 tipc_node_fsm_evt(n, NODE_SYNCH_END_EVT);
@@ -1304,22 +1482,32 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
1304 /* Ensure broadcast reception is in synch with peer's send state */ 1482 /* Ensure broadcast reception is in synch with peer's send state */
1305 if (unlikely(usr == LINK_PROTOCOL)) 1483 if (unlikely(usr == LINK_PROTOCOL))
1306 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr); 1484 tipc_bcast_sync_rcv(net, n->bc_entry.link, hdr);
1307 else if (unlikely(n->bc_entry.link->acked != bc_ack)) 1485 else if (unlikely(tipc_link_acked(n->bc_entry.link) != bc_ack))
1308 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack); 1486 tipc_bcast_ack_rcv(net, n->bc_entry.link, bc_ack);
1309 1487
1310 tipc_node_lock(n); 1488 /* Receive packet directly if conditions permit */
1311 1489 tipc_node_read_lock(n);
1312 /* Is reception permitted at the moment ? */ 1490 if (likely((n->state == SELF_UP_PEER_UP) && (usr != TUNNEL_PROTOCOL))) {
1313 if (!tipc_node_filter_pkt(n, hdr)) 1491 spin_lock_bh(&le->lock);
1314 goto unlock; 1492 if (le->link) {
1315 1493 rc = tipc_link_rcv(le->link, skb, &xmitq);
1316 /* Check and if necessary update node state */ 1494 skb = NULL;
1317 if (likely(tipc_node_check_state(n, skb, bearer_id, &xmitq))) { 1495 }
1318 rc = tipc_link_rcv(le->link, skb, &xmitq); 1496 spin_unlock_bh(&le->lock);
1319 skb = NULL; 1497 }
1498 tipc_node_read_unlock(n);
1499
1500 /* Check/update node state before receiving */
1501 if (unlikely(skb)) {
1502 tipc_node_write_lock(n);
1503 if (tipc_node_check_state(n, skb, bearer_id, &xmitq)) {
1504 if (le->link) {
1505 rc = tipc_link_rcv(le->link, skb, &xmitq);
1506 skb = NULL;
1507 }
1508 }
1509 tipc_node_write_unlock(n);
1320 } 1510 }
1321unlock:
1322 tipc_node_unlock(n);
1323 1511
1324 if (unlikely(rc & TIPC_LINK_UP_EVT)) 1512 if (unlikely(rc & TIPC_LINK_UP_EVT))
1325 tipc_node_link_up(n, bearer_id, &xmitq); 1513 tipc_node_link_up(n, bearer_id, &xmitq);
@@ -1384,15 +1572,15 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
1384 continue; 1572 continue;
1385 } 1573 }
1386 1574
1387 tipc_node_lock(node); 1575 tipc_node_read_lock(node);
1388 err = __tipc_nl_add_node(&msg, node); 1576 err = __tipc_nl_add_node(&msg, node);
1389 if (err) { 1577 if (err) {
1390 last_addr = node->addr; 1578 last_addr = node->addr;
1391 tipc_node_unlock(node); 1579 tipc_node_read_unlock(node);
1392 goto out; 1580 goto out;
1393 } 1581 }
1394 1582
1395 tipc_node_unlock(node); 1583 tipc_node_read_unlock(node);
1396 } 1584 }
1397 done = 1; 1585 done = 1;
1398out: 1586out:
@@ -1402,3 +1590,314 @@ out:
1402 1590
1403 return skb->len; 1591 return skb->len;
1404} 1592}
1593
1594/* tipc_node_find_by_name - locate owner node of link by link's name
1595 * @net: the applicable net namespace
1596 * @name: pointer to link name string
1597 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1598 *
1599 * Returns pointer to node owning the link, or 0 if no matching link is found.
1600 */
1601static struct tipc_node *tipc_node_find_by_name(struct net *net,
1602 const char *link_name,
1603 unsigned int *bearer_id)
1604{
1605 struct tipc_net *tn = net_generic(net, tipc_net_id);
1606 struct tipc_link *l;
1607 struct tipc_node *n;
1608 struct tipc_node *found_node = NULL;
1609 int i;
1610
1611 *bearer_id = 0;
1612 rcu_read_lock();
1613 list_for_each_entry_rcu(n, &tn->node_list, list) {
1614 tipc_node_read_lock(n);
1615 for (i = 0; i < MAX_BEARERS; i++) {
1616 l = n->links[i].link;
1617 if (l && !strcmp(tipc_link_name(l), link_name)) {
1618 *bearer_id = i;
1619 found_node = n;
1620 break;
1621 }
1622 }
1623 tipc_node_read_unlock(n);
1624 if (found_node)
1625 break;
1626 }
1627 rcu_read_unlock();
1628
1629 return found_node;
1630}
1631
1632int tipc_nl_node_set_link(struct sk_buff *skb, struct genl_info *info)
1633{
1634 int err;
1635 int res = 0;
1636 int bearer_id;
1637 char *name;
1638 struct tipc_link *link;
1639 struct tipc_node *node;
1640 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1641 struct net *net = sock_net(skb->sk);
1642
1643 if (!info->attrs[TIPC_NLA_LINK])
1644 return -EINVAL;
1645
1646 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1647 info->attrs[TIPC_NLA_LINK],
1648 tipc_nl_link_policy);
1649 if (err)
1650 return err;
1651
1652 if (!attrs[TIPC_NLA_LINK_NAME])
1653 return -EINVAL;
1654
1655 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1656
1657 if (strcmp(name, tipc_bclink_name) == 0)
1658 return tipc_nl_bc_link_set(net, attrs);
1659
1660 node = tipc_node_find_by_name(net, name, &bearer_id);
1661 if (!node)
1662 return -EINVAL;
1663
1664 tipc_node_read_lock(node);
1665
1666 link = node->links[bearer_id].link;
1667 if (!link) {
1668 res = -EINVAL;
1669 goto out;
1670 }
1671
1672 if (attrs[TIPC_NLA_LINK_PROP]) {
1673 struct nlattr *props[TIPC_NLA_PROP_MAX + 1];
1674
1675 err = tipc_nl_parse_link_prop(attrs[TIPC_NLA_LINK_PROP],
1676 props);
1677 if (err) {
1678 res = err;
1679 goto out;
1680 }
1681
1682 if (props[TIPC_NLA_PROP_TOL]) {
1683 u32 tol;
1684
1685 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1686 tipc_link_set_tolerance(link, tol);
1687 }
1688 if (props[TIPC_NLA_PROP_PRIO]) {
1689 u32 prio;
1690
1691 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1692 tipc_link_set_prio(link, prio);
1693 }
1694 if (props[TIPC_NLA_PROP_WIN]) {
1695 u32 win;
1696
1697 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1698 tipc_link_set_queue_limits(link, win);
1699 }
1700 }
1701
1702out:
1703 tipc_node_read_unlock(node);
1704
1705 return res;
1706}
1707
1708int tipc_nl_node_get_link(struct sk_buff *skb, struct genl_info *info)
1709{
1710 struct net *net = genl_info_net(info);
1711 struct tipc_nl_msg msg;
1712 char *name;
1713 int err;
1714
1715 msg.portid = info->snd_portid;
1716 msg.seq = info->snd_seq;
1717
1718 if (!info->attrs[TIPC_NLA_LINK_NAME])
1719 return -EINVAL;
1720 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
1721
1722 msg.skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
1723 if (!msg.skb)
1724 return -ENOMEM;
1725
1726 if (strcmp(name, tipc_bclink_name) == 0) {
1727 err = tipc_nl_add_bc_link(net, &msg);
1728 if (err) {
1729 nlmsg_free(msg.skb);
1730 return err;
1731 }
1732 } else {
1733 int bearer_id;
1734 struct tipc_node *node;
1735 struct tipc_link *link;
1736
1737 node = tipc_node_find_by_name(net, name, &bearer_id);
1738 if (!node)
1739 return -EINVAL;
1740
1741 tipc_node_read_lock(node);
1742 link = node->links[bearer_id].link;
1743 if (!link) {
1744 tipc_node_read_unlock(node);
1745 nlmsg_free(msg.skb);
1746 return -EINVAL;
1747 }
1748
1749 err = __tipc_nl_add_link(net, &msg, link, 0);
1750 tipc_node_read_unlock(node);
1751 if (err) {
1752 nlmsg_free(msg.skb);
1753 return err;
1754 }
1755 }
1756
1757 return genlmsg_reply(msg.skb, info);
1758}
1759
1760int tipc_nl_node_reset_link_stats(struct sk_buff *skb, struct genl_info *info)
1761{
1762 int err;
1763 char *link_name;
1764 unsigned int bearer_id;
1765 struct tipc_link *link;
1766 struct tipc_node *node;
1767 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
1768 struct net *net = sock_net(skb->sk);
1769 struct tipc_link_entry *le;
1770
1771 if (!info->attrs[TIPC_NLA_LINK])
1772 return -EINVAL;
1773
1774 err = nla_parse_nested(attrs, TIPC_NLA_LINK_MAX,
1775 info->attrs[TIPC_NLA_LINK],
1776 tipc_nl_link_policy);
1777 if (err)
1778 return err;
1779
1780 if (!attrs[TIPC_NLA_LINK_NAME])
1781 return -EINVAL;
1782
1783 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
1784
1785 if (strcmp(link_name, tipc_bclink_name) == 0) {
1786 err = tipc_bclink_reset_stats(net);
1787 if (err)
1788 return err;
1789 return 0;
1790 }
1791
1792 node = tipc_node_find_by_name(net, link_name, &bearer_id);
1793 if (!node)
1794 return -EINVAL;
1795
1796 le = &node->links[bearer_id];
1797 tipc_node_read_lock(node);
1798 spin_lock_bh(&le->lock);
1799 link = node->links[bearer_id].link;
1800 if (!link) {
1801 spin_unlock_bh(&le->lock);
1802 tipc_node_read_unlock(node);
1803 return -EINVAL;
1804 }
1805 tipc_link_reset_stats(link);
1806 spin_unlock_bh(&le->lock);
1807 tipc_node_read_unlock(node);
1808 return 0;
1809}
1810
1811/* Caller should hold node lock */
1812static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
1813 struct tipc_node *node, u32 *prev_link)
1814{
1815 u32 i;
1816 int err;
1817
1818 for (i = *prev_link; i < MAX_BEARERS; i++) {
1819 *prev_link = i;
1820
1821 if (!node->links[i].link)
1822 continue;
1823
1824 err = __tipc_nl_add_link(net, msg,
1825 node->links[i].link, NLM_F_MULTI);
1826 if (err)
1827 return err;
1828 }
1829 *prev_link = 0;
1830
1831 return 0;
1832}
1833
1834int tipc_nl_node_dump_link(struct sk_buff *skb, struct netlink_callback *cb)
1835{
1836 struct net *net = sock_net(skb->sk);
1837 struct tipc_net *tn = net_generic(net, tipc_net_id);
1838 struct tipc_node *node;
1839 struct tipc_nl_msg msg;
1840 u32 prev_node = cb->args[0];
1841 u32 prev_link = cb->args[1];
1842 int done = cb->args[2];
1843 int err;
1844
1845 if (done)
1846 return 0;
1847
1848 msg.skb = skb;
1849 msg.portid = NETLINK_CB(cb->skb).portid;
1850 msg.seq = cb->nlh->nlmsg_seq;
1851
1852 rcu_read_lock();
1853 if (prev_node) {
1854 node = tipc_node_find(net, prev_node);
1855 if (!node) {
1856 /* We never set seq or call nl_dump_check_consistent()
1857 * this means that setting prev_seq here will cause the
1858 * consistence check to fail in the netlink callback
1859 * handler. Resulting in the last NLMSG_DONE message
1860 * having the NLM_F_DUMP_INTR flag set.
1861 */
1862 cb->prev_seq = 1;
1863 goto out;
1864 }
1865 tipc_node_put(node);
1866
1867 list_for_each_entry_continue_rcu(node, &tn->node_list,
1868 list) {
1869 tipc_node_read_lock(node);
1870 err = __tipc_nl_add_node_links(net, &msg, node,
1871 &prev_link);
1872 tipc_node_read_unlock(node);
1873 if (err)
1874 goto out;
1875
1876 prev_node = node->addr;
1877 }
1878 } else {
1879 err = tipc_nl_add_bc_link(net, &msg);
1880 if (err)
1881 goto out;
1882
1883 list_for_each_entry_rcu(node, &tn->node_list, list) {
1884 tipc_node_read_lock(node);
1885 err = __tipc_nl_add_node_links(net, &msg, node,
1886 &prev_link);
1887 tipc_node_read_unlock(node);
1888 if (err)
1889 goto out;
1890
1891 prev_node = node->addr;
1892 }
1893 }
1894 done = 1;
1895out:
1896 rcu_read_unlock();
1897
1898 cb->args[0] = prev_node;
1899 cb->args[1] = prev_link;
1900 cb->args[2] = done;
1901
1902 return skb->len;
1903}