diff options
Diffstat (limited to 'net/tipc/node.c')
-rw-r--r-- | net/tipc/node.c | 116 |
1 files changed, 77 insertions, 39 deletions
diff --git a/net/tipc/node.c b/net/tipc/node.c index 86152de8248d..22c059ad2999 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c | |||
@@ -42,6 +42,7 @@ | |||
42 | 42 | ||
43 | static void node_lost_contact(struct tipc_node *n_ptr); | 43 | static void node_lost_contact(struct tipc_node *n_ptr); |
44 | static void node_established_contact(struct tipc_node *n_ptr); | 44 | static void node_established_contact(struct tipc_node *n_ptr); |
45 | static void tipc_node_delete(struct tipc_node *node); | ||
45 | 46 | ||
46 | struct tipc_sock_conn { | 47 | struct tipc_sock_conn { |
47 | u32 port; | 48 | u32 port; |
@@ -67,6 +68,23 @@ static unsigned int tipc_hashfn(u32 addr) | |||
67 | return addr & (NODE_HTABLE_SIZE - 1); | 68 | return addr & (NODE_HTABLE_SIZE - 1); |
68 | } | 69 | } |
69 | 70 | ||
71 | static void tipc_node_kref_release(struct kref *kref) | ||
72 | { | ||
73 | struct tipc_node *node = container_of(kref, struct tipc_node, kref); | ||
74 | |||
75 | tipc_node_delete(node); | ||
76 | } | ||
77 | |||
78 | void tipc_node_put(struct tipc_node *node) | ||
79 | { | ||
80 | kref_put(&node->kref, tipc_node_kref_release); | ||
81 | } | ||
82 | |||
83 | static void tipc_node_get(struct tipc_node *node) | ||
84 | { | ||
85 | kref_get(&node->kref); | ||
86 | } | ||
87 | |||
70 | /* | 88 | /* |
71 | * tipc_node_find - locate specified node object, if it exists | 89 | * tipc_node_find - locate specified node object, if it exists |
72 | */ | 90 | */ |
@@ -82,6 +100,7 @@ struct tipc_node *tipc_node_find(struct net *net, u32 addr) | |||
82 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], | 100 | hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)], |
83 | hash) { | 101 | hash) { |
84 | if (node->addr == addr) { | 102 | if (node->addr == addr) { |
103 | tipc_node_get(node); | ||
85 | rcu_read_unlock(); | 104 | rcu_read_unlock(); |
86 | return node; | 105 | return node; |
87 | } | 106 | } |
@@ -106,12 +125,13 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr) | |||
106 | } | 125 | } |
107 | n_ptr->addr = addr; | 126 | n_ptr->addr = addr; |
108 | n_ptr->net = net; | 127 | n_ptr->net = net; |
128 | kref_init(&n_ptr->kref); | ||
109 | spin_lock_init(&n_ptr->lock); | 129 | spin_lock_init(&n_ptr->lock); |
110 | INIT_HLIST_NODE(&n_ptr->hash); | 130 | INIT_HLIST_NODE(&n_ptr->hash); |
111 | INIT_LIST_HEAD(&n_ptr->list); | 131 | INIT_LIST_HEAD(&n_ptr->list); |
112 | INIT_LIST_HEAD(&n_ptr->publ_list); | 132 | INIT_LIST_HEAD(&n_ptr->publ_list); |
113 | INIT_LIST_HEAD(&n_ptr->conn_sks); | 133 | INIT_LIST_HEAD(&n_ptr->conn_sks); |
114 | __skb_queue_head_init(&n_ptr->bclink.deferred_queue); | 134 | __skb_queue_head_init(&n_ptr->bclink.deferdq); |
115 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); | 135 | hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]); |
116 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { | 136 | list_for_each_entry_rcu(temp_node, &tn->node_list, list) { |
117 | if (n_ptr->addr < temp_node->addr) | 137 | if (n_ptr->addr < temp_node->addr) |
@@ -120,16 +140,17 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr) | |||
120 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); | 140 | list_add_tail_rcu(&n_ptr->list, &temp_node->list); |
121 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; | 141 | n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; |
122 | n_ptr->signature = INVALID_NODE_SIG; | 142 | n_ptr->signature = INVALID_NODE_SIG; |
143 | tipc_node_get(n_ptr); | ||
123 | exit: | 144 | exit: |
124 | spin_unlock_bh(&tn->node_list_lock); | 145 | spin_unlock_bh(&tn->node_list_lock); |
125 | return n_ptr; | 146 | return n_ptr; |
126 | } | 147 | } |
127 | 148 | ||
128 | static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr) | 149 | static void tipc_node_delete(struct tipc_node *node) |
129 | { | 150 | { |
130 | list_del_rcu(&n_ptr->list); | 151 | list_del_rcu(&node->list); |
131 | hlist_del_rcu(&n_ptr->hash); | 152 | hlist_del_rcu(&node->hash); |
132 | kfree_rcu(n_ptr, rcu); | 153 | kfree_rcu(node, rcu); |
133 | } | 154 | } |
134 | 155 | ||
135 | void tipc_node_stop(struct net *net) | 156 | void tipc_node_stop(struct net *net) |
@@ -139,7 +160,7 @@ void tipc_node_stop(struct net *net) | |||
139 | 160 | ||
140 | spin_lock_bh(&tn->node_list_lock); | 161 | spin_lock_bh(&tn->node_list_lock); |
141 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) | 162 | list_for_each_entry_safe(node, t_node, &tn->node_list, list) |
142 | tipc_node_delete(tn, node); | 163 | tipc_node_put(node); |
143 | spin_unlock_bh(&tn->node_list_lock); | 164 | spin_unlock_bh(&tn->node_list_lock); |
144 | } | 165 | } |
145 | 166 | ||
@@ -147,6 +168,7 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
147 | { | 168 | { |
148 | struct tipc_node *node; | 169 | struct tipc_node *node; |
149 | struct tipc_sock_conn *conn; | 170 | struct tipc_sock_conn *conn; |
171 | int err = 0; | ||
150 | 172 | ||
151 | if (in_own_node(net, dnode)) | 173 | if (in_own_node(net, dnode)) |
152 | return 0; | 174 | return 0; |
@@ -157,8 +179,10 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
157 | return -EHOSTUNREACH; | 179 | return -EHOSTUNREACH; |
158 | } | 180 | } |
159 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); | 181 | conn = kmalloc(sizeof(*conn), GFP_ATOMIC); |
160 | if (!conn) | 182 | if (!conn) { |
161 | return -EHOSTUNREACH; | 183 | err = -EHOSTUNREACH; |
184 | goto exit; | ||
185 | } | ||
162 | conn->peer_node = dnode; | 186 | conn->peer_node = dnode; |
163 | conn->port = port; | 187 | conn->port = port; |
164 | conn->peer_port = peer_port; | 188 | conn->peer_port = peer_port; |
@@ -166,7 +190,9 @@ int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port) | |||
166 | tipc_node_lock(node); | 190 | tipc_node_lock(node); |
167 | list_add_tail(&conn->list, &node->conn_sks); | 191 | list_add_tail(&conn->list, &node->conn_sks); |
168 | tipc_node_unlock(node); | 192 | tipc_node_unlock(node); |
169 | return 0; | 193 | exit: |
194 | tipc_node_put(node); | ||
195 | return err; | ||
170 | } | 196 | } |
171 | 197 | ||
172 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) | 198 | void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) |
@@ -189,6 +215,7 @@ void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port) | |||
189 | kfree(conn); | 215 | kfree(conn); |
190 | } | 216 | } |
191 | tipc_node_unlock(node); | 217 | tipc_node_unlock(node); |
218 | tipc_node_put(node); | ||
192 | } | 219 | } |
193 | 220 | ||
194 | /** | 221 | /** |
@@ -227,8 +254,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
227 | active[0] = active[1] = l_ptr; | 254 | active[0] = active[1] = l_ptr; |
228 | exit: | 255 | exit: |
229 | /* Leave room for changeover header when returning 'mtu' to users: */ | 256 | /* Leave room for changeover header when returning 'mtu' to users: */ |
230 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 257 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
231 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 258 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
232 | } | 259 | } |
233 | 260 | ||
234 | /** | 261 | /** |
@@ -292,11 +319,10 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) | |||
292 | 319 | ||
293 | /* Leave room for changeover header when returning 'mtu' to users: */ | 320 | /* Leave room for changeover header when returning 'mtu' to users: */ |
294 | if (active[0]) { | 321 | if (active[0]) { |
295 | n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE; | 322 | n_ptr->act_mtus[0] = active[0]->mtu - INT_H_SIZE; |
296 | n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE; | 323 | n_ptr->act_mtus[1] = active[1]->mtu - INT_H_SIZE; |
297 | return; | 324 | return; |
298 | } | 325 | } |
299 | |||
300 | /* Loopback link went down? No fragmentation needed from now on. */ | 326 | /* Loopback link went down? No fragmentation needed from now on. */ |
301 | if (n_ptr->addr == tn->own_addr) { | 327 | if (n_ptr->addr == tn->own_addr) { |
302 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; | 328 | n_ptr->act_mtus[0] = MAX_MSG_SIZE; |
@@ -354,7 +380,7 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
354 | 380 | ||
355 | /* Flush broadcast link info associated with lost node */ | 381 | /* Flush broadcast link info associated with lost node */ |
356 | if (n_ptr->bclink.recv_permitted) { | 382 | if (n_ptr->bclink.recv_permitted) { |
357 | __skb_queue_purge(&n_ptr->bclink.deferred_queue); | 383 | __skb_queue_purge(&n_ptr->bclink.deferdq); |
358 | 384 | ||
359 | if (n_ptr->bclink.reasm_buf) { | 385 | if (n_ptr->bclink.reasm_buf) { |
360 | kfree_skb(n_ptr->bclink.reasm_buf); | 386 | kfree_skb(n_ptr->bclink.reasm_buf); |
@@ -367,18 +393,17 @@ static void node_lost_contact(struct tipc_node *n_ptr) | |||
367 | n_ptr->bclink.recv_permitted = false; | 393 | n_ptr->bclink.recv_permitted = false; |
368 | } | 394 | } |
369 | 395 | ||
370 | /* Abort link changeover */ | 396 | /* Abort any ongoing link failover */ |
371 | for (i = 0; i < MAX_BEARERS; i++) { | 397 | for (i = 0; i < MAX_BEARERS; i++) { |
372 | struct tipc_link *l_ptr = n_ptr->links[i]; | 398 | struct tipc_link *l_ptr = n_ptr->links[i]; |
373 | if (!l_ptr) | 399 | if (!l_ptr) |
374 | continue; | 400 | continue; |
375 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | 401 | l_ptr->flags &= ~LINK_FAILINGOVER; |
376 | l_ptr->exp_msg_count = 0; | 402 | l_ptr->failover_checkpt = 0; |
403 | l_ptr->failover_pkts = 0; | ||
404 | kfree_skb(l_ptr->failover_skb); | ||
405 | l_ptr->failover_skb = NULL; | ||
377 | tipc_link_reset_fragments(l_ptr); | 406 | tipc_link_reset_fragments(l_ptr); |
378 | |||
379 | /* Link marked for deletion after failover? => do it now */ | ||
380 | if (l_ptr->flags & LINK_STOPPED) | ||
381 | tipc_link_delete(l_ptr); | ||
382 | } | 407 | } |
383 | 408 | ||
384 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; | 409 | n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN; |
@@ -417,19 +442,25 @@ int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr, | |||
417 | char *linkname, size_t len) | 442 | char *linkname, size_t len) |
418 | { | 443 | { |
419 | struct tipc_link *link; | 444 | struct tipc_link *link; |
445 | int err = -EINVAL; | ||
420 | struct tipc_node *node = tipc_node_find(net, addr); | 446 | struct tipc_node *node = tipc_node_find(net, addr); |
421 | 447 | ||
422 | if ((bearer_id >= MAX_BEARERS) || !node) | 448 | if (!node) |
423 | return -EINVAL; | 449 | return err; |
450 | |||
451 | if (bearer_id >= MAX_BEARERS) | ||
452 | goto exit; | ||
453 | |||
424 | tipc_node_lock(node); | 454 | tipc_node_lock(node); |
425 | link = node->links[bearer_id]; | 455 | link = node->links[bearer_id]; |
426 | if (link) { | 456 | if (link) { |
427 | strncpy(linkname, link->name, len); | 457 | strncpy(linkname, link->name, len); |
428 | tipc_node_unlock(node); | 458 | err = 0; |
429 | return 0; | ||
430 | } | 459 | } |
460 | exit: | ||
431 | tipc_node_unlock(node); | 461 | tipc_node_unlock(node); |
432 | return -EINVAL; | 462 | tipc_node_put(node); |
463 | return err; | ||
433 | } | 464 | } |
434 | 465 | ||
435 | void tipc_node_unlock(struct tipc_node *node) | 466 | void tipc_node_unlock(struct tipc_node *node) |
@@ -459,7 +490,7 @@ void tipc_node_unlock(struct tipc_node *node) | |||
459 | TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | | 490 | TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP | |
460 | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | | 491 | TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP | |
461 | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | | 492 | TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT | |
462 | TIPC_NAMED_MSG_EVT); | 493 | TIPC_NAMED_MSG_EVT | TIPC_BCAST_RESET); |
463 | 494 | ||
464 | spin_unlock_bh(&node->lock); | 495 | spin_unlock_bh(&node->lock); |
465 | 496 | ||
@@ -488,6 +519,9 @@ void tipc_node_unlock(struct tipc_node *node) | |||
488 | 519 | ||
489 | if (flags & TIPC_BCAST_MSG_EVT) | 520 | if (flags & TIPC_BCAST_MSG_EVT) |
490 | tipc_bclink_input(net); | 521 | tipc_bclink_input(net); |
522 | |||
523 | if (flags & TIPC_BCAST_RESET) | ||
524 | tipc_link_reset_all(node); | ||
491 | } | 525 | } |
492 | 526 | ||
493 | /* Caller should hold node lock for the passed node */ | 527 | /* Caller should hold node lock for the passed node */ |
@@ -542,17 +576,21 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) | |||
542 | msg.seq = cb->nlh->nlmsg_seq; | 576 | msg.seq = cb->nlh->nlmsg_seq; |
543 | 577 | ||
544 | rcu_read_lock(); | 578 | rcu_read_lock(); |
545 | 579 | if (last_addr) { | |
546 | if (last_addr && !tipc_node_find(net, last_addr)) { | 580 | node = tipc_node_find(net, last_addr); |
547 | rcu_read_unlock(); | 581 | if (!node) { |
548 | /* We never set seq or call nl_dump_check_consistent() this | 582 | rcu_read_unlock(); |
549 | * means that setting prev_seq here will cause the consistence | 583 | /* We never set seq or call nl_dump_check_consistent() |
550 | * check to fail in the netlink callback handler. Resulting in | 584 | * this means that setting prev_seq here will cause the |
551 | * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if | 585 | * consistence check to fail in the netlink callback |
552 | * the node state changed while we released the lock. | 586 | * handler. Resulting in the NLMSG_DONE message having |
553 | */ | 587 | * the NLM_F_DUMP_INTR flag set if the node state |
554 | cb->prev_seq = 1; | 588 | * changed while we released the lock. |
555 | return -EPIPE; | 589 | */ |
590 | cb->prev_seq = 1; | ||
591 | return -EPIPE; | ||
592 | } | ||
593 | tipc_node_put(node); | ||
556 | } | 594 | } |
557 | 595 | ||
558 | list_for_each_entry_rcu(node, &tn->node_list, list) { | 596 | list_for_each_entry_rcu(node, &tn->node_list, list) { |