aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig12
-rw-r--r--net/tipc/Makefile6
-rw-r--r--net/tipc/addr.c45
-rw-r--r--net/tipc/addr.h45
-rw-r--r--net/tipc/bcast.c499
-rw-r--r--net/tipc/bcast.h115
-rw-r--r--net/tipc/bearer.c205
-rw-r--r--net/tipc/bearer.h43
-rw-r--r--net/tipc/config.c342
-rw-r--r--net/tipc/config.h67
-rw-r--r--net/tipc/core.c154
-rw-r--r--net/tipc/core.h171
-rw-r--r--net/tipc/discover.c90
-rw-r--r--net/tipc/discover.h8
-rw-r--r--net/tipc/link.c881
-rw-r--r--net/tipc/link.h47
-rw-r--r--net/tipc/log.c55
-rw-r--r--net/tipc/msg.c153
-rw-r--r--net/tipc/msg.h143
-rw-r--r--net/tipc/name_distr.c145
-rw-r--r--net/tipc/name_distr.h16
-rw-r--r--net/tipc/name_table.c398
-rw-r--r--net/tipc/name_table.h49
-rw-r--r--net/tipc/net.c56
-rw-r--r--net/tipc/net.h4
-rw-r--r--net/tipc/netlink.c64
-rw-r--r--net/tipc/netlink.h7
-rw-r--r--net/tipc/netlink_compat.c1084
-rw-r--r--net/tipc/node.c336
-rw-r--r--net/tipc/node.h53
-rw-r--r--net/tipc/server.c6
-rw-r--r--net/tipc/server.h17
-rw-r--r--net/tipc/socket.c1015
-rw-r--r--net/tipc/socket.h20
-rw-r--r--net/tipc/subscr.c131
-rw-r--r--net/tipc/subscr.h14
36 files changed, 3245 insertions, 3251 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
index c890848f9d56..91c8a8e031db 100644
--- a/net/tipc/Kconfig
+++ b/net/tipc/Kconfig
@@ -20,18 +20,6 @@ menuconfig TIPC
20 20
21 If in doubt, say N. 21 If in doubt, say N.
22 22
23config TIPC_PORTS
24 int "Maximum number of ports in a node"
25 depends on TIPC
26 range 127 65535
27 default "8191"
28 help
29 Specifies how many ports can be supported by a node.
30 Can range from 127 to 65535 ports; default is 8191.
31
32 Setting this to a smaller value saves some memory,
33 setting it to higher allows for more ports.
34
35config TIPC_MEDIA_IB 23config TIPC_MEDIA_IB
36 bool "InfiniBand media type support" 24 bool "InfiniBand media type support"
37 depends on TIPC && INFINIBAND_IPOIB 25 depends on TIPC && INFINIBAND_IPOIB
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
index 333e4592772c..599b1a540d2b 100644
--- a/net/tipc/Makefile
+++ b/net/tipc/Makefile
@@ -4,11 +4,11 @@
4 4
5obj-$(CONFIG_TIPC) := tipc.o 5obj-$(CONFIG_TIPC) := tipc.o
6 6
7tipc-y += addr.o bcast.o bearer.o config.o \ 7tipc-y += addr.o bcast.o bearer.o \
8 core.o link.o discover.o msg.o \ 8 core.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \ 9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o socket.o log.o eth_media.o \ 10 netlink.o netlink_compat.o node.o socket.o eth_media.o \
11 server.o 11 server.o socket.o
12 12
13tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o 13tipc-$(CONFIG_TIPC_MEDIA_IB) += ib_media.o
14tipc-$(CONFIG_SYSCTL) += sysctl.o 14tipc-$(CONFIG_SYSCTL) += sysctl.o
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
index 357b74b26f9e..48fd3b5a73fb 100644
--- a/net/tipc/addr.c
+++ b/net/tipc/addr.c
@@ -34,8 +34,51 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include "core.h" 37#include <linux/kernel.h>
38#include "addr.h" 38#include "addr.h"
39#include "core.h"
40
41/**
42 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
43 */
44int in_own_cluster(struct net *net, u32 addr)
45{
46 return in_own_cluster_exact(net, addr) || !addr;
47}
48
49int in_own_cluster_exact(struct net *net, u32 addr)
50{
51 struct tipc_net *tn = net_generic(net, tipc_net_id);
52
53 return !((addr ^ tn->own_addr) >> 12);
54}
55
56/**
57 * in_own_node - test for node inclusion; <0.0.0> always matches
58 */
59int in_own_node(struct net *net, u32 addr)
60{
61 struct tipc_net *tn = net_generic(net, tipc_net_id);
62
63 return (addr == tn->own_addr) || !addr;
64}
65
66/**
67 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
68 *
69 * Needed when address of a named message must be looked up a second time
70 * after a network hop.
71 */
72u32 addr_domain(struct net *net, u32 sc)
73{
74 struct tipc_net *tn = net_generic(net, tipc_net_id);
75
76 if (likely(sc == TIPC_NODE_SCOPE))
77 return tn->own_addr;
78 if (sc == TIPC_CLUSTER_SCOPE)
79 return tipc_cluster_mask(tn->own_addr);
80 return tipc_zone_mask(tn->own_addr);
81}
39 82
40/** 83/**
41 * tipc_addr_domain_valid - validates a network domain address 84 * tipc_addr_domain_valid - validates a network domain address
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
index a74acf9ee804..c700c2d28e09 100644
--- a/net/tipc/addr.h
+++ b/net/tipc/addr.h
@@ -37,7 +37,10 @@
37#ifndef _TIPC_ADDR_H 37#ifndef _TIPC_ADDR_H
38#define _TIPC_ADDR_H 38#define _TIPC_ADDR_H
39 39
40#include "core.h" 40#include <linux/types.h>
41#include <linux/tipc.h>
42#include <net/net_namespace.h>
43#include <net/netns/generic.h>
41 44
42#define TIPC_ZONE_MASK 0xff000000u 45#define TIPC_ZONE_MASK 0xff000000u
43#define TIPC_CLUSTER_MASK 0xfffff000u 46#define TIPC_CLUSTER_MASK 0xfffff000u
@@ -52,42 +55,10 @@ static inline u32 tipc_cluster_mask(u32 addr)
52 return addr & TIPC_CLUSTER_MASK; 55 return addr & TIPC_CLUSTER_MASK;
53} 56}
54 57
55static inline int in_own_cluster_exact(u32 addr) 58int in_own_cluster(struct net *net, u32 addr);
56{ 59int in_own_cluster_exact(struct net *net, u32 addr);
57 return !((addr ^ tipc_own_addr) >> 12); 60int in_own_node(struct net *net, u32 addr);
58} 61u32 addr_domain(struct net *net, u32 sc);
59
60/**
61 * in_own_node - test for node inclusion; <0.0.0> always matches
62 */
63static inline int in_own_node(u32 addr)
64{
65 return (addr == tipc_own_addr) || !addr;
66}
67
68/**
69 * in_own_cluster - test for cluster inclusion; <0.0.0> always matches
70 */
71static inline int in_own_cluster(u32 addr)
72{
73 return in_own_cluster_exact(addr) || !addr;
74}
75
76/**
77 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
78 *
79 * Needed when address of a named message must be looked up a second time
80 * after a network hop.
81 */
82static inline u32 addr_domain(u32 sc)
83{
84 if (likely(sc == TIPC_NODE_SCOPE))
85 return tipc_own_addr;
86 if (sc == TIPC_CLUSTER_SCOPE)
87 return tipc_cluster_mask(tipc_own_addr);
88 return tipc_zone_mask(tipc_own_addr);
89}
90
91int tipc_addr_domain_valid(u32); 62int tipc_addr_domain_valid(u32);
92int tipc_addr_node_valid(u32 addr); 63int tipc_addr_node_valid(u32 addr);
93int tipc_in_scope(u32 domain, u32 addr); 64int tipc_in_scope(u32 domain, u32 addr);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index a9e174fc0f91..3e41704832de 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bcast.c: TIPC broadcast code 2 * net/tipc/bcast.c: TIPC broadcast code
3 * 3 *
4 * Copyright (c) 2004-2006, 2014, Ericsson AB 4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation. 5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems 6 * Copyright (c) 2005, 2010-2011, Wind River Systems
7 * All rights reserved. 7 * All rights reserved.
@@ -35,77 +35,14 @@
35 * POSSIBILITY OF SUCH DAMAGE. 35 * POSSIBILITY OF SUCH DAMAGE.
36 */ 36 */
37 37
38#include "core.h"
39#include "link.h"
40#include "socket.h" 38#include "socket.h"
41#include "msg.h" 39#include "msg.h"
42#include "bcast.h" 40#include "bcast.h"
43#include "name_distr.h" 41#include "name_distr.h"
42#include "core.h"
44 43
45#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */ 44#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
46#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */ 45#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47#define BCBEARER MAX_BEARERS
48
49/**
50 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
51 * @primary: pointer to primary bearer
52 * @secondary: pointer to secondary bearer
53 *
54 * Bearers must have same priority and same set of reachable destinations
55 * to be paired.
56 */
57
58struct tipc_bcbearer_pair {
59 struct tipc_bearer *primary;
60 struct tipc_bearer *secondary;
61};
62
63/**
64 * struct tipc_bcbearer - bearer used by broadcast link
65 * @bearer: (non-standard) broadcast bearer structure
66 * @media: (non-standard) broadcast media structure
67 * @bpairs: array of bearer pairs
68 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
69 * @remains: temporary node map used by tipc_bcbearer_send()
70 * @remains_new: temporary node map used tipc_bcbearer_send()
71 *
72 * Note: The fields labelled "temporary" are incorporated into the bearer
73 * to avoid consuming potentially limited stack space through the use of
74 * large local variables within multicast routines. Concurrent access is
75 * prevented through use of the spinlock "bclink_lock".
76 */
77struct tipc_bcbearer {
78 struct tipc_bearer bearer;
79 struct tipc_media media;
80 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
81 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
82 struct tipc_node_map remains;
83 struct tipc_node_map remains_new;
84};
85
86/**
87 * struct tipc_bclink - link used for broadcast messages
88 * @lock: spinlock governing access to structure
89 * @link: (non-standard) broadcast link structure
90 * @node: (non-standard) node structure representing b'cast link's peer node
91 * @flags: represent bclink states
92 * @bcast_nodes: map of broadcast-capable nodes
93 * @retransmit_to: node that most recently requested a retransmit
94 *
95 * Handles sequence numbering, fragmentation, bundling, etc.
96 */
97struct tipc_bclink {
98 spinlock_t lock;
99 struct tipc_link link;
100 struct tipc_node node;
101 unsigned int flags;
102 struct tipc_node_map bcast_nodes;
103 struct tipc_node *retransmit_to;
104};
105
106static struct tipc_bcbearer *bcbearer;
107static struct tipc_bclink *bclink;
108static struct tipc_link *bcl;
109 46
110const char tipc_bclink_name[] = "broadcast-link"; 47const char tipc_bclink_name[] = "broadcast-link";
111 48
@@ -115,38 +52,50 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
115static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node); 52static void tipc_nmap_add(struct tipc_node_map *nm_ptr, u32 node);
116static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node); 53static void tipc_nmap_remove(struct tipc_node_map *nm_ptr, u32 node);
117 54
118static void tipc_bclink_lock(void) 55static void tipc_bclink_lock(struct net *net)
119{ 56{
120 spin_lock_bh(&bclink->lock); 57 struct tipc_net *tn = net_generic(net, tipc_net_id);
58
59 spin_lock_bh(&tn->bclink->lock);
121} 60}
122 61
123static void tipc_bclink_unlock(void) 62static void tipc_bclink_unlock(struct net *net)
124{ 63{
64 struct tipc_net *tn = net_generic(net, tipc_net_id);
125 struct tipc_node *node = NULL; 65 struct tipc_node *node = NULL;
126 66
127 if (likely(!bclink->flags)) { 67 if (likely(!tn->bclink->flags)) {
128 spin_unlock_bh(&bclink->lock); 68 spin_unlock_bh(&tn->bclink->lock);
129 return; 69 return;
130 } 70 }
131 71
132 if (bclink->flags & TIPC_BCLINK_RESET) { 72 if (tn->bclink->flags & TIPC_BCLINK_RESET) {
133 bclink->flags &= ~TIPC_BCLINK_RESET; 73 tn->bclink->flags &= ~TIPC_BCLINK_RESET;
134 node = tipc_bclink_retransmit_to(); 74 node = tipc_bclink_retransmit_to(net);
135 } 75 }
136 spin_unlock_bh(&bclink->lock); 76 spin_unlock_bh(&tn->bclink->lock);
137 77
138 if (node) 78 if (node)
139 tipc_link_reset_all(node); 79 tipc_link_reset_all(node);
140} 80}
141 81
82void tipc_bclink_input(struct net *net)
83{
84 struct tipc_net *tn = net_generic(net, tipc_net_id);
85
86 tipc_sk_mcast_rcv(net, &tn->bclink->arrvq, &tn->bclink->inputq);
87}
88
142uint tipc_bclink_get_mtu(void) 89uint tipc_bclink_get_mtu(void)
143{ 90{
144 return MAX_PKT_DEFAULT_MCAST; 91 return MAX_PKT_DEFAULT_MCAST;
145} 92}
146 93
147void tipc_bclink_set_flags(unsigned int flags) 94void tipc_bclink_set_flags(struct net *net, unsigned int flags)
148{ 95{
149 bclink->flags |= flags; 96 struct tipc_net *tn = net_generic(net, tipc_net_id);
97
98 tn->bclink->flags |= flags;
150} 99}
151 100
152static u32 bcbuf_acks(struct sk_buff *buf) 101static u32 bcbuf_acks(struct sk_buff *buf)
@@ -164,31 +113,40 @@ static void bcbuf_decr_acks(struct sk_buff *buf)
164 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1); 113 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
165} 114}
166 115
167void tipc_bclink_add_node(u32 addr) 116void tipc_bclink_add_node(struct net *net, u32 addr)
168{ 117{
169 tipc_bclink_lock(); 118 struct tipc_net *tn = net_generic(net, tipc_net_id);
170 tipc_nmap_add(&bclink->bcast_nodes, addr); 119
171 tipc_bclink_unlock(); 120 tipc_bclink_lock(net);
121 tipc_nmap_add(&tn->bclink->bcast_nodes, addr);
122 tipc_bclink_unlock(net);
172} 123}
173 124
174void tipc_bclink_remove_node(u32 addr) 125void tipc_bclink_remove_node(struct net *net, u32 addr)
175{ 126{
176 tipc_bclink_lock(); 127 struct tipc_net *tn = net_generic(net, tipc_net_id);
177 tipc_nmap_remove(&bclink->bcast_nodes, addr); 128
178 tipc_bclink_unlock(); 129 tipc_bclink_lock(net);
130 tipc_nmap_remove(&tn->bclink->bcast_nodes, addr);
131 tipc_bclink_unlock(net);
179} 132}
180 133
181static void bclink_set_last_sent(void) 134static void bclink_set_last_sent(struct net *net)
182{ 135{
136 struct tipc_net *tn = net_generic(net, tipc_net_id);
137 struct tipc_link *bcl = tn->bcl;
138
183 if (bcl->next_out) 139 if (bcl->next_out)
184 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1); 140 bcl->fsm_msg_cnt = mod(buf_seqno(bcl->next_out) - 1);
185 else 141 else
186 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1); 142 bcl->fsm_msg_cnt = mod(bcl->next_out_no - 1);
187} 143}
188 144
189u32 tipc_bclink_get_last_sent(void) 145u32 tipc_bclink_get_last_sent(struct net *net)
190{ 146{
191 return bcl->fsm_msg_cnt; 147 struct tipc_net *tn = net_generic(net, tipc_net_id);
148
149 return tn->bcl->fsm_msg_cnt;
192} 150}
193 151
194static void bclink_update_last_sent(struct tipc_node *node, u32 seqno) 152static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
@@ -203,9 +161,11 @@ static void bclink_update_last_sent(struct tipc_node *node, u32 seqno)
203 * 161 *
204 * Called with bclink_lock locked 162 * Called with bclink_lock locked
205 */ 163 */
206struct tipc_node *tipc_bclink_retransmit_to(void) 164struct tipc_node *tipc_bclink_retransmit_to(struct net *net)
207{ 165{
208 return bclink->retransmit_to; 166 struct tipc_net *tn = net_generic(net, tipc_net_id);
167
168 return tn->bclink->retransmit_to;
209} 169}
210 170
211/** 171/**
@@ -215,9 +175,10 @@ struct tipc_node *tipc_bclink_retransmit_to(void)
215 * 175 *
216 * Called with bclink_lock locked 176 * Called with bclink_lock locked
217 */ 177 */
218static void bclink_retransmit_pkt(u32 after, u32 to) 178static void bclink_retransmit_pkt(struct tipc_net *tn, u32 after, u32 to)
219{ 179{
220 struct sk_buff *skb; 180 struct sk_buff *skb;
181 struct tipc_link *bcl = tn->bcl;
221 182
222 skb_queue_walk(&bcl->outqueue, skb) { 183 skb_queue_walk(&bcl->outqueue, skb) {
223 if (more(buf_seqno(skb), after)) { 184 if (more(buf_seqno(skb), after)) {
@@ -232,13 +193,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
232 * 193 *
233 * Called with no locks taken 194 * Called with no locks taken
234 */ 195 */
235void tipc_bclink_wakeup_users(void) 196void tipc_bclink_wakeup_users(struct net *net)
236{ 197{
237 struct sk_buff *skb; 198 struct tipc_net *tn = net_generic(net, tipc_net_id);
238
239 while ((skb = skb_dequeue(&bclink->link.waiting_sks)))
240 tipc_sk_rcv(skb);
241 199
200 tipc_sk_rcv(net, &tn->bclink->link.wakeupq);
242} 201}
243 202
244/** 203/**
@@ -253,10 +212,12 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
253 struct sk_buff *skb, *tmp; 212 struct sk_buff *skb, *tmp;
254 struct sk_buff *next; 213 struct sk_buff *next;
255 unsigned int released = 0; 214 unsigned int released = 0;
215 struct net *net = n_ptr->net;
216 struct tipc_net *tn = net_generic(net, tipc_net_id);
256 217
257 tipc_bclink_lock(); 218 tipc_bclink_lock(net);
258 /* Bail out if tx queue is empty (no clean up is required) */ 219 /* Bail out if tx queue is empty (no clean up is required) */
259 skb = skb_peek(&bcl->outqueue); 220 skb = skb_peek(&tn->bcl->outqueue);
260 if (!skb) 221 if (!skb)
261 goto exit; 222 goto exit;
262 223
@@ -267,43 +228,43 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
267 * acknowledge sent messages only (if other nodes still exist) 228 * acknowledge sent messages only (if other nodes still exist)
268 * or both sent and unsent messages (otherwise) 229 * or both sent and unsent messages (otherwise)
269 */ 230 */
270 if (bclink->bcast_nodes.count) 231 if (tn->bclink->bcast_nodes.count)
271 acked = bcl->fsm_msg_cnt; 232 acked = tn->bcl->fsm_msg_cnt;
272 else 233 else
273 acked = bcl->next_out_no; 234 acked = tn->bcl->next_out_no;
274 } else { 235 } else {
275 /* 236 /*
276 * Bail out if specified sequence number does not correspond 237 * Bail out if specified sequence number does not correspond
277 * to a message that has been sent and not yet acknowledged 238 * to a message that has been sent and not yet acknowledged
278 */ 239 */
279 if (less(acked, buf_seqno(skb)) || 240 if (less(acked, buf_seqno(skb)) ||
280 less(bcl->fsm_msg_cnt, acked) || 241 less(tn->bcl->fsm_msg_cnt, acked) ||
281 less_eq(acked, n_ptr->bclink.acked)) 242 less_eq(acked, n_ptr->bclink.acked))
282 goto exit; 243 goto exit;
283 } 244 }
284 245
285 /* Skip over packets that node has previously acknowledged */ 246 /* Skip over packets that node has previously acknowledged */
286 skb_queue_walk(&bcl->outqueue, skb) { 247 skb_queue_walk(&tn->bcl->outqueue, skb) {
287 if (more(buf_seqno(skb), n_ptr->bclink.acked)) 248 if (more(buf_seqno(skb), n_ptr->bclink.acked))
288 break; 249 break;
289 } 250 }
290 251
291 /* Update packets that node is now acknowledging */ 252 /* Update packets that node is now acknowledging */
292 skb_queue_walk_from_safe(&bcl->outqueue, skb, tmp) { 253 skb_queue_walk_from_safe(&tn->bcl->outqueue, skb, tmp) {
293 if (more(buf_seqno(skb), acked)) 254 if (more(buf_seqno(skb), acked))
294 break; 255 break;
295 256
296 next = tipc_skb_queue_next(&bcl->outqueue, skb); 257 next = tipc_skb_queue_next(&tn->bcl->outqueue, skb);
297 if (skb != bcl->next_out) { 258 if (skb != tn->bcl->next_out) {
298 bcbuf_decr_acks(skb); 259 bcbuf_decr_acks(skb);
299 } else { 260 } else {
300 bcbuf_set_acks(skb, 0); 261 bcbuf_set_acks(skb, 0);
301 bcl->next_out = next; 262 tn->bcl->next_out = next;
302 bclink_set_last_sent(); 263 bclink_set_last_sent(net);
303 } 264 }
304 265
305 if (bcbuf_acks(skb) == 0) { 266 if (bcbuf_acks(skb) == 0) {
306 __skb_unlink(skb, &bcl->outqueue); 267 __skb_unlink(skb, &tn->bcl->outqueue);
307 kfree_skb(skb); 268 kfree_skb(skb);
308 released = 1; 269 released = 1;
309 } 270 }
@@ -311,15 +272,14 @@ void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked)
311 n_ptr->bclink.acked = acked; 272 n_ptr->bclink.acked = acked;
312 273
313 /* Try resolving broadcast link congestion, if necessary */ 274 /* Try resolving broadcast link congestion, if necessary */
314 if (unlikely(bcl->next_out)) { 275 if (unlikely(tn->bcl->next_out)) {
315 tipc_link_push_packets(bcl); 276 tipc_link_push_packets(tn->bcl);
316 bclink_set_last_sent(); 277 bclink_set_last_sent(net);
317 } 278 }
318 if (unlikely(released && !skb_queue_empty(&bcl->waiting_sks))) 279 if (unlikely(released && !skb_queue_empty(&tn->bcl->wakeupq)))
319 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS; 280 n_ptr->action_flags |= TIPC_WAKEUP_BCAST_USERS;
320
321exit: 281exit:
322 tipc_bclink_unlock(); 282 tipc_bclink_unlock(net);
323} 283}
324 284
325/** 285/**
@@ -327,9 +287,12 @@ exit:
327 * 287 *
328 * RCU and node lock set 288 * RCU and node lock set
329 */ 289 */
330void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent) 290void tipc_bclink_update_link_state(struct tipc_node *n_ptr,
291 u32 last_sent)
331{ 292{
332 struct sk_buff *buf; 293 struct sk_buff *buf;
294 struct net *net = n_ptr->net;
295 struct tipc_net *tn = net_generic(net, tipc_net_id);
333 296
334 /* Ignore "stale" link state info */ 297 /* Ignore "stale" link state info */
335 if (less_eq(last_sent, n_ptr->bclink.last_in)) 298 if (less_eq(last_sent, n_ptr->bclink.last_in))
@@ -359,18 +322,18 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
359 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue); 322 struct sk_buff *skb = skb_peek(&n_ptr->bclink.deferred_queue);
360 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent; 323 u32 to = skb ? buf_seqno(skb) - 1 : n_ptr->bclink.last_sent;
361 324
362 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, 325 tipc_msg_init(tn->own_addr, msg, BCAST_PROTOCOL, STATE_MSG,
363 INT_H_SIZE, n_ptr->addr); 326 INT_H_SIZE, n_ptr->addr);
364 msg_set_non_seq(msg, 1); 327 msg_set_non_seq(msg, 1);
365 msg_set_mc_netid(msg, tipc_net_id); 328 msg_set_mc_netid(msg, tn->net_id);
366 msg_set_bcast_ack(msg, n_ptr->bclink.last_in); 329 msg_set_bcast_ack(msg, n_ptr->bclink.last_in);
367 msg_set_bcgap_after(msg, n_ptr->bclink.last_in); 330 msg_set_bcgap_after(msg, n_ptr->bclink.last_in);
368 msg_set_bcgap_to(msg, to); 331 msg_set_bcgap_to(msg, to);
369 332
370 tipc_bclink_lock(); 333 tipc_bclink_lock(net);
371 tipc_bearer_send(MAX_BEARERS, buf, NULL); 334 tipc_bearer_send(net, MAX_BEARERS, buf, NULL);
372 bcl->stats.sent_nacks++; 335 tn->bcl->stats.sent_nacks++;
373 tipc_bclink_unlock(); 336 tipc_bclink_unlock(net);
374 kfree_skb(buf); 337 kfree_skb(buf);
375 338
376 n_ptr->bclink.oos_state++; 339 n_ptr->bclink.oos_state++;
@@ -383,9 +346,9 @@ void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent)
383 * Delay any upcoming NACK by this node if another node has already 346 * Delay any upcoming NACK by this node if another node has already
384 * requested the first message this node is going to ask for. 347 * requested the first message this node is going to ask for.
385 */ 348 */
386static void bclink_peek_nack(struct tipc_msg *msg) 349static void bclink_peek_nack(struct net *net, struct tipc_msg *msg)
387{ 350{
388 struct tipc_node *n_ptr = tipc_node_find(msg_destnode(msg)); 351 struct tipc_node *n_ptr = tipc_node_find(net, msg_destnode(msg));
389 352
390 if (unlikely(!n_ptr)) 353 if (unlikely(!n_ptr))
391 return; 354 return;
@@ -400,17 +363,23 @@ static void bclink_peek_nack(struct tipc_msg *msg)
400 tipc_node_unlock(n_ptr); 363 tipc_node_unlock(n_ptr);
401} 364}
402 365
403/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster 366/* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
404 * and to identified node local sockets 367 * and to identified node local sockets
368 * @net: the applicable net namespace
405 * @list: chain of buffers containing message 369 * @list: chain of buffers containing message
406 * Consumes the buffer chain, except when returning -ELINKCONG 370 * Consumes the buffer chain, except when returning -ELINKCONG
407 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 371 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
408 */ 372 */
409int tipc_bclink_xmit(struct sk_buff_head *list) 373int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list)
410{ 374{
375 struct tipc_net *tn = net_generic(net, tipc_net_id);
376 struct tipc_link *bcl = tn->bcl;
377 struct tipc_bclink *bclink = tn->bclink;
411 int rc = 0; 378 int rc = 0;
412 int bc = 0; 379 int bc = 0;
413 struct sk_buff *skb; 380 struct sk_buff *skb;
381 struct sk_buff_head arrvq;
382 struct sk_buff_head inputq;
414 383
415 /* Prepare clone of message for local node */ 384 /* Prepare clone of message for local node */
416 skb = tipc_msg_reassemble(list); 385 skb = tipc_msg_reassemble(list);
@@ -419,32 +388,35 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
419 return -EHOSTUNREACH; 388 return -EHOSTUNREACH;
420 } 389 }
421 390
422 /* Broadcast to all other nodes */ 391 /* Broadcast to all nodes */
423 if (likely(bclink)) { 392 if (likely(bclink)) {
424 tipc_bclink_lock(); 393 tipc_bclink_lock(net);
425 if (likely(bclink->bcast_nodes.count)) { 394 if (likely(bclink->bcast_nodes.count)) {
426 rc = __tipc_link_xmit(bcl, list); 395 rc = __tipc_link_xmit(net, bcl, list);
427 if (likely(!rc)) { 396 if (likely(!rc)) {
428 u32 len = skb_queue_len(&bcl->outqueue); 397 u32 len = skb_queue_len(&bcl->outqueue);
429 398
430 bclink_set_last_sent(); 399 bclink_set_last_sent(net);
431 bcl->stats.queue_sz_counts++; 400 bcl->stats.queue_sz_counts++;
432 bcl->stats.accu_queue_sz += len; 401 bcl->stats.accu_queue_sz += len;
433 } 402 }
434 bc = 1; 403 bc = 1;
435 } 404 }
436 tipc_bclink_unlock(); 405 tipc_bclink_unlock(net);
437 } 406 }
438 407
439 if (unlikely(!bc)) 408 if (unlikely(!bc))
440 __skb_queue_purge(list); 409 __skb_queue_purge(list);
441 410
442 /* Deliver message clone */ 411 if (unlikely(rc)) {
443 if (likely(!rc))
444 tipc_sk_mcast_rcv(skb);
445 else
446 kfree_skb(skb); 412 kfree_skb(skb);
447 413 return rc;
414 }
415 /* Deliver message clone */
416 __skb_queue_head_init(&arrvq);
417 skb_queue_head_init(&inputq);
418 __skb_queue_tail(&arrvq, skb);
419 tipc_sk_mcast_rcv(net, &arrvq, &inputq);
448 return rc; 420 return rc;
449} 421}
450 422
@@ -455,19 +427,21 @@ int tipc_bclink_xmit(struct sk_buff_head *list)
455 */ 427 */
456static void bclink_accept_pkt(struct tipc_node *node, u32 seqno) 428static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
457{ 429{
430 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
431
458 bclink_update_last_sent(node, seqno); 432 bclink_update_last_sent(node, seqno);
459 node->bclink.last_in = seqno; 433 node->bclink.last_in = seqno;
460 node->bclink.oos_state = 0; 434 node->bclink.oos_state = 0;
461 bcl->stats.recv_info++; 435 tn->bcl->stats.recv_info++;
462 436
463 /* 437 /*
464 * Unicast an ACK periodically, ensuring that 438 * Unicast an ACK periodically, ensuring that
465 * all nodes in the cluster don't ACK at the same time 439 * all nodes in the cluster don't ACK at the same time
466 */ 440 */
467 if (((seqno - tipc_own_addr) % TIPC_MIN_LINK_WIN) == 0) { 441 if (((seqno - tn->own_addr) % TIPC_MIN_LINK_WIN) == 0) {
468 tipc_link_proto_xmit(node->active_links[node->addr & 1], 442 tipc_link_proto_xmit(node->active_links[node->addr & 1],
469 STATE_MSG, 0, 0, 0, 0, 0); 443 STATE_MSG, 0, 0, 0, 0, 0);
470 bcl->stats.sent_acks++; 444 tn->bcl->stats.sent_acks++;
471 } 445 }
472} 446}
473 447
@@ -476,19 +450,24 @@ static void bclink_accept_pkt(struct tipc_node *node, u32 seqno)
476 * 450 *
477 * RCU is locked, no other locks set 451 * RCU is locked, no other locks set
478 */ 452 */
479void tipc_bclink_rcv(struct sk_buff *buf) 453void tipc_bclink_rcv(struct net *net, struct sk_buff *buf)
480{ 454{
455 struct tipc_net *tn = net_generic(net, tipc_net_id);
456 struct tipc_link *bcl = tn->bcl;
481 struct tipc_msg *msg = buf_msg(buf); 457 struct tipc_msg *msg = buf_msg(buf);
482 struct tipc_node *node; 458 struct tipc_node *node;
483 u32 next_in; 459 u32 next_in;
484 u32 seqno; 460 u32 seqno;
485 int deferred = 0; 461 int deferred = 0;
462 int pos = 0;
463 struct sk_buff *iskb;
464 struct sk_buff_head *arrvq, *inputq;
486 465
487 /* Screen out unwanted broadcast messages */ 466 /* Screen out unwanted broadcast messages */
488 if (msg_mc_netid(msg) != tipc_net_id) 467 if (msg_mc_netid(msg) != tn->net_id)
489 goto exit; 468 goto exit;
490 469
491 node = tipc_node_find(msg_prevnode(msg)); 470 node = tipc_node_find(net, msg_prevnode(msg));
492 if (unlikely(!node)) 471 if (unlikely(!node))
493 goto exit; 472 goto exit;
494 473
@@ -500,18 +479,18 @@ void tipc_bclink_rcv(struct sk_buff *buf)
500 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) { 479 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
501 if (msg_type(msg) != STATE_MSG) 480 if (msg_type(msg) != STATE_MSG)
502 goto unlock; 481 goto unlock;
503 if (msg_destnode(msg) == tipc_own_addr) { 482 if (msg_destnode(msg) == tn->own_addr) {
504 tipc_bclink_acknowledge(node, msg_bcast_ack(msg)); 483 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
505 tipc_node_unlock(node); 484 tipc_node_unlock(node);
506 tipc_bclink_lock(); 485 tipc_bclink_lock(net);
507 bcl->stats.recv_nacks++; 486 bcl->stats.recv_nacks++;
508 bclink->retransmit_to = node; 487 tn->bclink->retransmit_to = node;
509 bclink_retransmit_pkt(msg_bcgap_after(msg), 488 bclink_retransmit_pkt(tn, msg_bcgap_after(msg),
510 msg_bcgap_to(msg)); 489 msg_bcgap_to(msg));
511 tipc_bclink_unlock(); 490 tipc_bclink_unlock(net);
512 } else { 491 } else {
513 tipc_node_unlock(node); 492 tipc_node_unlock(node);
514 bclink_peek_nack(msg); 493 bclink_peek_nack(net, msg);
515 } 494 }
516 goto exit; 495 goto exit;
517 } 496 }
@@ -519,52 +498,54 @@ void tipc_bclink_rcv(struct sk_buff *buf)
519 /* Handle in-sequence broadcast message */ 498 /* Handle in-sequence broadcast message */
520 seqno = msg_seqno(msg); 499 seqno = msg_seqno(msg);
521 next_in = mod(node->bclink.last_in + 1); 500 next_in = mod(node->bclink.last_in + 1);
501 arrvq = &tn->bclink->arrvq;
502 inputq = &tn->bclink->inputq;
522 503
523 if (likely(seqno == next_in)) { 504 if (likely(seqno == next_in)) {
524receive: 505receive:
525 /* Deliver message to destination */ 506 /* Deliver message to destination */
526 if (likely(msg_isdata(msg))) { 507 if (likely(msg_isdata(msg))) {
527 tipc_bclink_lock(); 508 tipc_bclink_lock(net);
528 bclink_accept_pkt(node, seqno); 509 bclink_accept_pkt(node, seqno);
529 tipc_bclink_unlock(); 510 spin_lock_bh(&inputq->lock);
511 __skb_queue_tail(arrvq, buf);
512 spin_unlock_bh(&inputq->lock);
513 node->action_flags |= TIPC_BCAST_MSG_EVT;
514 tipc_bclink_unlock(net);
530 tipc_node_unlock(node); 515 tipc_node_unlock(node);
531 if (likely(msg_mcast(msg)))
532 tipc_sk_mcast_rcv(buf);
533 else
534 kfree_skb(buf);
535 } else if (msg_user(msg) == MSG_BUNDLER) { 516 } else if (msg_user(msg) == MSG_BUNDLER) {
536 tipc_bclink_lock(); 517 tipc_bclink_lock(net);
537 bclink_accept_pkt(node, seqno); 518 bclink_accept_pkt(node, seqno);
538 bcl->stats.recv_bundles++; 519 bcl->stats.recv_bundles++;
539 bcl->stats.recv_bundled += msg_msgcnt(msg); 520 bcl->stats.recv_bundled += msg_msgcnt(msg);
540 tipc_bclink_unlock(); 521 pos = 0;
522 while (tipc_msg_extract(buf, &iskb, &pos)) {
523 spin_lock_bh(&inputq->lock);
524 __skb_queue_tail(arrvq, iskb);
525 spin_unlock_bh(&inputq->lock);
526 }
527 node->action_flags |= TIPC_BCAST_MSG_EVT;
528 tipc_bclink_unlock(net);
541 tipc_node_unlock(node); 529 tipc_node_unlock(node);
542 tipc_link_bundle_rcv(buf);
543 } else if (msg_user(msg) == MSG_FRAGMENTER) { 530 } else if (msg_user(msg) == MSG_FRAGMENTER) {
544 tipc_buf_append(&node->bclink.reasm_buf, &buf); 531 tipc_buf_append(&node->bclink.reasm_buf, &buf);
545 if (unlikely(!buf && !node->bclink.reasm_buf)) 532 if (unlikely(!buf && !node->bclink.reasm_buf))
546 goto unlock; 533 goto unlock;
547 tipc_bclink_lock(); 534 tipc_bclink_lock(net);
548 bclink_accept_pkt(node, seqno); 535 bclink_accept_pkt(node, seqno);
549 bcl->stats.recv_fragments++; 536 bcl->stats.recv_fragments++;
550 if (buf) { 537 if (buf) {
551 bcl->stats.recv_fragmented++; 538 bcl->stats.recv_fragmented++;
552 msg = buf_msg(buf); 539 msg = buf_msg(buf);
553 tipc_bclink_unlock(); 540 tipc_bclink_unlock(net);
554 goto receive; 541 goto receive;
555 } 542 }
556 tipc_bclink_unlock(); 543 tipc_bclink_unlock(net);
557 tipc_node_unlock(node);
558 } else if (msg_user(msg) == NAME_DISTRIBUTOR) {
559 tipc_bclink_lock();
560 bclink_accept_pkt(node, seqno);
561 tipc_bclink_unlock();
562 tipc_node_unlock(node); 544 tipc_node_unlock(node);
563 tipc_named_rcv(buf);
564 } else { 545 } else {
565 tipc_bclink_lock(); 546 tipc_bclink_lock(net);
566 bclink_accept_pkt(node, seqno); 547 bclink_accept_pkt(node, seqno);
567 tipc_bclink_unlock(); 548 tipc_bclink_unlock(net);
568 tipc_node_unlock(node); 549 tipc_node_unlock(node);
569 kfree_skb(buf); 550 kfree_skb(buf);
570 } 551 }
@@ -602,14 +583,14 @@ receive:
602 buf = NULL; 583 buf = NULL;
603 } 584 }
604 585
605 tipc_bclink_lock(); 586 tipc_bclink_lock(net);
606 587
607 if (deferred) 588 if (deferred)
608 bcl->stats.deferred_recv++; 589 bcl->stats.deferred_recv++;
609 else 590 else
610 bcl->stats.duplicates++; 591 bcl->stats.duplicates++;
611 592
612 tipc_bclink_unlock(); 593 tipc_bclink_unlock(net);
613 594
614unlock: 595unlock:
615 tipc_node_unlock(node); 596 tipc_node_unlock(node);
@@ -620,7 +601,7 @@ exit:
620u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr) 601u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
621{ 602{
622 return (n_ptr->bclink.recv_permitted && 603 return (n_ptr->bclink.recv_permitted &&
623 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked)); 604 (tipc_bclink_get_last_sent(n_ptr->net) != n_ptr->bclink.acked));
624} 605}
625 606
626 607
@@ -633,11 +614,15 @@ u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr)
633 * Returns 0 (packet sent successfully) under all circumstances, 614 * Returns 0 (packet sent successfully) under all circumstances,
634 * since the broadcast link's pseudo-bearer never blocks 615 * since the broadcast link's pseudo-bearer never blocks
635 */ 616 */
636static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1, 617static int tipc_bcbearer_send(struct net *net, struct sk_buff *buf,
618 struct tipc_bearer *unused1,
637 struct tipc_media_addr *unused2) 619 struct tipc_media_addr *unused2)
638{ 620{
639 int bp_index; 621 int bp_index;
640 struct tipc_msg *msg = buf_msg(buf); 622 struct tipc_msg *msg = buf_msg(buf);
623 struct tipc_net *tn = net_generic(net, tipc_net_id);
624 struct tipc_bcbearer *bcbearer = tn->bcbearer;
625 struct tipc_bclink *bclink = tn->bclink;
641 626
642 /* Prepare broadcast link message for reliable transmission, 627 /* Prepare broadcast link message for reliable transmission,
643 * if first time trying to send it; 628 * if first time trying to send it;
@@ -647,8 +632,8 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
647 if (likely(!msg_non_seq(buf_msg(buf)))) { 632 if (likely(!msg_non_seq(buf_msg(buf)))) {
648 bcbuf_set_acks(buf, bclink->bcast_nodes.count); 633 bcbuf_set_acks(buf, bclink->bcast_nodes.count);
649 msg_set_non_seq(msg, 1); 634 msg_set_non_seq(msg, 1);
650 msg_set_mc_netid(msg, tipc_net_id); 635 msg_set_mc_netid(msg, tn->net_id);
651 bcl->stats.sent_info++; 636 tn->bcl->stats.sent_info++;
652 637
653 if (WARN_ON(!bclink->bcast_nodes.count)) { 638 if (WARN_ON(!bclink->bcast_nodes.count)) {
654 dump_stack(); 639 dump_stack();
@@ -677,13 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
677 662
678 if (bp_index == 0) { 663 if (bp_index == 0) {
679 /* Use original buffer for first bearer */ 664 /* Use original buffer for first bearer */
680 tipc_bearer_send(b->identity, buf, &b->bcast_addr); 665 tipc_bearer_send(net, b->identity, buf, &b->bcast_addr);
681 } else { 666 } else {
682 /* Avoid concurrent buffer access */ 667 /* Avoid concurrent buffer access */
683 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC); 668 tbuf = pskb_copy_for_clone(buf, GFP_ATOMIC);
684 if (!tbuf) 669 if (!tbuf)
685 break; 670 break;
686 tipc_bearer_send(b->identity, tbuf, &b->bcast_addr); 671 tipc_bearer_send(net, b->identity, tbuf,
672 &b->bcast_addr);
687 kfree_skb(tbuf); /* Bearer keeps a clone */ 673 kfree_skb(tbuf); /* Bearer keeps a clone */
688 } 674 }
689 if (bcbearer->remains_new.count == 0) 675 if (bcbearer->remains_new.count == 0)
@@ -698,15 +684,18 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
698/** 684/**
699 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer 685 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
700 */ 686 */
701void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action) 687void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
688 u32 node, bool action)
702{ 689{
690 struct tipc_net *tn = net_generic(net, tipc_net_id);
691 struct tipc_bcbearer *bcbearer = tn->bcbearer;
703 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp; 692 struct tipc_bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
704 struct tipc_bcbearer_pair *bp_curr; 693 struct tipc_bcbearer_pair *bp_curr;
705 struct tipc_bearer *b; 694 struct tipc_bearer *b;
706 int b_index; 695 int b_index;
707 int pri; 696 int pri;
708 697
709 tipc_bclink_lock(); 698 tipc_bclink_lock(net);
710 699
711 if (action) 700 if (action)
712 tipc_nmap_add(nm_ptr, node); 701 tipc_nmap_add(nm_ptr, node);
@@ -718,7 +707,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
718 707
719 rcu_read_lock(); 708 rcu_read_lock();
720 for (b_index = 0; b_index < MAX_BEARERS; b_index++) { 709 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
721 b = rcu_dereference_rtnl(bearer_list[b_index]); 710 b = rcu_dereference_rtnl(tn->bearer_list[b_index]);
722 if (!b || !b->nodes.count) 711 if (!b || !b->nodes.count)
723 continue; 712 continue;
724 713
@@ -753,7 +742,7 @@ void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action)
753 bp_curr++; 742 bp_curr++;
754 } 743 }
755 744
756 tipc_bclink_unlock(); 745 tipc_bclink_unlock(net);
757} 746}
758 747
759static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb, 748static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
@@ -807,19 +796,21 @@ msg_full:
807 return -EMSGSIZE; 796 return -EMSGSIZE;
808} 797}
809 798
810int tipc_nl_add_bc_link(struct tipc_nl_msg *msg) 799int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
811{ 800{
812 int err; 801 int err;
813 void *hdr; 802 void *hdr;
814 struct nlattr *attrs; 803 struct nlattr *attrs;
815 struct nlattr *prop; 804 struct nlattr *prop;
805 struct tipc_net *tn = net_generic(net, tipc_net_id);
806 struct tipc_link *bcl = tn->bcl;
816 807
817 if (!bcl) 808 if (!bcl)
818 return 0; 809 return 0;
819 810
820 tipc_bclink_lock(); 811 tipc_bclink_lock(net);
821 812
822 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 813 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
823 NLM_F_MULTI, TIPC_NL_LINK_GET); 814 NLM_F_MULTI, TIPC_NL_LINK_GET);
824 if (!hdr) 815 if (!hdr)
825 return -EMSGSIZE; 816 return -EMSGSIZE;
@@ -852,7 +843,7 @@ int tipc_nl_add_bc_link(struct tipc_nl_msg *msg)
852 if (err) 843 if (err)
853 goto attr_msg_full; 844 goto attr_msg_full;
854 845
855 tipc_bclink_unlock(); 846 tipc_bclink_unlock(net);
856 nla_nest_end(msg->skb, attrs); 847 nla_nest_end(msg->skb, attrs);
857 genlmsg_end(msg->skb, hdr); 848 genlmsg_end(msg->skb, hdr);
858 849
@@ -863,79 +854,49 @@ prop_msg_full:
863attr_msg_full: 854attr_msg_full:
864 nla_nest_cancel(msg->skb, attrs); 855 nla_nest_cancel(msg->skb, attrs);
865msg_full: 856msg_full:
866 tipc_bclink_unlock(); 857 tipc_bclink_unlock(net);
867 genlmsg_cancel(msg->skb, hdr); 858 genlmsg_cancel(msg->skb, hdr);
868 859
869 return -EMSGSIZE; 860 return -EMSGSIZE;
870} 861}
871 862
872int tipc_bclink_stats(char *buf, const u32 buf_size) 863int tipc_bclink_reset_stats(struct net *net)
873{ 864{
874 int ret; 865 struct tipc_net *tn = net_generic(net, tipc_net_id);
875 struct tipc_stats *s; 866 struct tipc_link *bcl = tn->bcl;
876 867
877 if (!bcl) 868 if (!bcl)
878 return 0;
879
880 tipc_bclink_lock();
881
882 s = &bcl->stats;
883
884 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
885 " Window:%u packets\n",
886 bcl->name, bcl->queue_limit[0]);
887 ret += tipc_snprintf(buf + ret, buf_size - ret,
888 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
889 s->recv_info, s->recv_fragments,
890 s->recv_fragmented, s->recv_bundles,
891 s->recv_bundled);
892 ret += tipc_snprintf(buf + ret, buf_size - ret,
893 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
894 s->sent_info, s->sent_fragments,
895 s->sent_fragmented, s->sent_bundles,
896 s->sent_bundled);
897 ret += tipc_snprintf(buf + ret, buf_size - ret,
898 " RX naks:%u defs:%u dups:%u\n",
899 s->recv_nacks, s->deferred_recv, s->duplicates);
900 ret += tipc_snprintf(buf + ret, buf_size - ret,
901 " TX naks:%u acks:%u dups:%u\n",
902 s->sent_nacks, s->sent_acks, s->retransmitted);
903 ret += tipc_snprintf(buf + ret, buf_size - ret,
904 " Congestion link:%u Send queue max:%u avg:%u\n",
905 s->link_congs, s->max_queue_sz,
906 s->queue_sz_counts ?
907 (s->accu_queue_sz / s->queue_sz_counts) : 0);
908
909 tipc_bclink_unlock();
910 return ret;
911}
912
913int tipc_bclink_reset_stats(void)
914{
915 if (!bcl)
916 return -ENOPROTOOPT; 869 return -ENOPROTOOPT;
917 870
918 tipc_bclink_lock(); 871 tipc_bclink_lock(net);
919 memset(&bcl->stats, 0, sizeof(bcl->stats)); 872 memset(&bcl->stats, 0, sizeof(bcl->stats));
920 tipc_bclink_unlock(); 873 tipc_bclink_unlock(net);
921 return 0; 874 return 0;
922} 875}
923 876
924int tipc_bclink_set_queue_limits(u32 limit) 877int tipc_bclink_set_queue_limits(struct net *net, u32 limit)
925{ 878{
879 struct tipc_net *tn = net_generic(net, tipc_net_id);
880 struct tipc_link *bcl = tn->bcl;
881
926 if (!bcl) 882 if (!bcl)
927 return -ENOPROTOOPT; 883 return -ENOPROTOOPT;
928 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN)) 884 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
929 return -EINVAL; 885 return -EINVAL;
930 886
931 tipc_bclink_lock(); 887 tipc_bclink_lock(net);
932 tipc_link_set_queue_limits(bcl, limit); 888 tipc_link_set_queue_limits(bcl, limit);
933 tipc_bclink_unlock(); 889 tipc_bclink_unlock(net);
934 return 0; 890 return 0;
935} 891}
936 892
937int tipc_bclink_init(void) 893int tipc_bclink_init(struct net *net)
938{ 894{
895 struct tipc_net *tn = net_generic(net, tipc_net_id);
896 struct tipc_bcbearer *bcbearer;
897 struct tipc_bclink *bclink;
898 struct tipc_link *bcl;
899
939 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC); 900 bcbearer = kzalloc(sizeof(*bcbearer), GFP_ATOMIC);
940 if (!bcbearer) 901 if (!bcbearer)
941 return -ENOMEM; 902 return -ENOMEM;
@@ -954,30 +915,39 @@ int tipc_bclink_init(void)
954 spin_lock_init(&bclink->lock); 915 spin_lock_init(&bclink->lock);
955 __skb_queue_head_init(&bcl->outqueue); 916 __skb_queue_head_init(&bcl->outqueue);
956 __skb_queue_head_init(&bcl->deferred_queue); 917 __skb_queue_head_init(&bcl->deferred_queue);
957 skb_queue_head_init(&bcl->waiting_sks); 918 skb_queue_head_init(&bcl->wakeupq);
958 bcl->next_out_no = 1; 919 bcl->next_out_no = 1;
959 spin_lock_init(&bclink->node.lock); 920 spin_lock_init(&bclink->node.lock);
960 __skb_queue_head_init(&bclink->node.waiting_sks); 921 __skb_queue_head_init(&bclink->arrvq);
922 skb_queue_head_init(&bclink->inputq);
961 bcl->owner = &bclink->node; 923 bcl->owner = &bclink->node;
924 bcl->owner->net = net;
962 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST; 925 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
963 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT); 926 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
964 bcl->bearer_id = MAX_BEARERS; 927 bcl->bearer_id = MAX_BEARERS;
965 rcu_assign_pointer(bearer_list[MAX_BEARERS], &bcbearer->bearer); 928 rcu_assign_pointer(tn->bearer_list[MAX_BEARERS], &bcbearer->bearer);
966 bcl->state = WORKING_WORKING; 929 bcl->state = WORKING_WORKING;
930 bcl->pmsg = (struct tipc_msg *)&bcl->proto_msg;
931 msg_set_prevnode(bcl->pmsg, tn->own_addr);
967 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME); 932 strlcpy(bcl->name, tipc_bclink_name, TIPC_MAX_LINK_NAME);
933 tn->bcbearer = bcbearer;
934 tn->bclink = bclink;
935 tn->bcl = bcl;
968 return 0; 936 return 0;
969} 937}
970 938
971void tipc_bclink_stop(void) 939void tipc_bclink_stop(struct net *net)
972{ 940{
973 tipc_bclink_lock(); 941 struct tipc_net *tn = net_generic(net, tipc_net_id);
974 tipc_link_purge_queues(bcl);
975 tipc_bclink_unlock();
976 942
977 RCU_INIT_POINTER(bearer_list[BCBEARER], NULL); 943 tipc_bclink_lock(net);
944 tipc_link_purge_queues(tn->bcl);
945 tipc_bclink_unlock(net);
946
947 RCU_INIT_POINTER(tn->bearer_list[BCBEARER], NULL);
978 synchronize_net(); 948 synchronize_net();
979 kfree(bcbearer); 949 kfree(tn->bcbearer);
980 kfree(bclink); 950 kfree(tn->bclink);
981} 951}
982 952
983/** 953/**
@@ -1037,50 +1007,3 @@ static void tipc_nmap_diff(struct tipc_node_map *nm_a,
1037 } 1007 }
1038 } 1008 }
1039} 1009}
1040
1041/**
1042 * tipc_port_list_add - add a port to a port list, ensuring no duplicates
1043 */
1044void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port)
1045{
1046 struct tipc_port_list *item = pl_ptr;
1047 int i;
1048 int item_sz = PLSIZE;
1049 int cnt = pl_ptr->count;
1050
1051 for (; ; cnt -= item_sz, item = item->next) {
1052 if (cnt < PLSIZE)
1053 item_sz = cnt;
1054 for (i = 0; i < item_sz; i++)
1055 if (item->ports[i] == port)
1056 return;
1057 if (i < PLSIZE) {
1058 item->ports[i] = port;
1059 pl_ptr->count++;
1060 return;
1061 }
1062 if (!item->next) {
1063 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
1064 if (!item->next) {
1065 pr_warn("Incomplete multicast delivery, no memory\n");
1066 return;
1067 }
1068 item->next->next = NULL;
1069 }
1070 }
1071}
1072
1073/**
1074 * tipc_port_list_free - free dynamically created entries in port_list chain
1075 *
1076 */
1077void tipc_port_list_free(struct tipc_port_list *pl_ptr)
1078{
1079 struct tipc_port_list *item;
1080 struct tipc_port_list *next;
1081
1082 for (item = pl_ptr->next; item; item = next) {
1083 next = item->next;
1084 kfree(item);
1085 }
1086}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 644d79129fba..43f397fbac55 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/bcast.h: Include file for TIPC broadcast code 2 * net/tipc/bcast.h: Include file for TIPC broadcast code
3 * 3 *
4 * Copyright (c) 2003-2006, 2014, Ericsson AB 4 * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -37,39 +37,73 @@
37#ifndef _TIPC_BCAST_H 37#ifndef _TIPC_BCAST_H
38#define _TIPC_BCAST_H 38#define _TIPC_BCAST_H
39 39
40#include "netlink.h" 40#include <linux/tipc_config.h>
41 41#include "link.h"
42#define MAX_NODES 4096 42#include "node.h"
43#define WSIZE 32
44#define TIPC_BCLINK_RESET 1
45 43
46/** 44/**
47 * struct tipc_node_map - set of node identifiers 45 * struct tipc_bcbearer_pair - a pair of bearers used by broadcast link
48 * @count: # of nodes in set 46 * @primary: pointer to primary bearer
49 * @map: bitmap of node identifiers that are in the set 47 * @secondary: pointer to secondary bearer
48 *
49 * Bearers must have same priority and same set of reachable destinations
50 * to be paired.
50 */ 51 */
51struct tipc_node_map { 52
52 u32 count; 53struct tipc_bcbearer_pair {
53 u32 map[MAX_NODES / WSIZE]; 54 struct tipc_bearer *primary;
55 struct tipc_bearer *secondary;
54}; 56};
55 57
56#define PLSIZE 32 58#define TIPC_BCLINK_RESET 1
59#define BCBEARER MAX_BEARERS
57 60
58/** 61/**
59 * struct tipc_port_list - set of node local destination ports 62 * struct tipc_bcbearer - bearer used by broadcast link
60 * @count: # of ports in set (only valid for first entry in list) 63 * @bearer: (non-standard) broadcast bearer structure
61 * @next: pointer to next entry in list 64 * @media: (non-standard) broadcast media structure
62 * @ports: array of port references 65 * @bpairs: array of bearer pairs
66 * @bpairs_temp: temporary array of bearer pairs used by tipc_bcbearer_sort()
67 * @remains: temporary node map used by tipc_bcbearer_send()
68 * @remains_new: temporary node map used tipc_bcbearer_send()
69 *
70 * Note: The fields labelled "temporary" are incorporated into the bearer
71 * to avoid consuming potentially limited stack space through the use of
72 * large local variables within multicast routines. Concurrent access is
73 * prevented through use of the spinlock "bclink_lock".
63 */ 74 */
64struct tipc_port_list { 75struct tipc_bcbearer {
65 int count; 76 struct tipc_bearer bearer;
66 struct tipc_port_list *next; 77 struct tipc_media media;
67 u32 ports[PLSIZE]; 78 struct tipc_bcbearer_pair bpairs[MAX_BEARERS];
79 struct tipc_bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
80 struct tipc_node_map remains;
81 struct tipc_node_map remains_new;
68}; 82};
69 83
84/**
85 * struct tipc_bclink - link used for broadcast messages
86 * @lock: spinlock governing access to structure
87 * @link: (non-standard) broadcast link structure
88 * @node: (non-standard) node structure representing b'cast link's peer node
89 * @flags: represent bclink states
90 * @bcast_nodes: map of broadcast-capable nodes
91 * @retransmit_to: node that most recently requested a retransmit
92 *
93 * Handles sequence numbering, fragmentation, bundling, etc.
94 */
95struct tipc_bclink {
96 spinlock_t lock;
97 struct tipc_link link;
98 struct tipc_node node;
99 unsigned int flags;
100 struct sk_buff_head arrvq;
101 struct sk_buff_head inputq;
102 struct tipc_node_map bcast_nodes;
103 struct tipc_node *retransmit_to;
104};
70 105
71struct tipc_node; 106struct tipc_node;
72
73extern const char tipc_bclink_name[]; 107extern const char tipc_bclink_name[];
74 108
75/** 109/**
@@ -81,27 +115,26 @@ static inline int tipc_nmap_equal(struct tipc_node_map *nm_a,
81 return !memcmp(nm_a, nm_b, sizeof(*nm_a)); 115 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
82} 116}
83 117
84void tipc_port_list_add(struct tipc_port_list *pl_ptr, u32 port); 118int tipc_bclink_init(struct net *net);
85void tipc_port_list_free(struct tipc_port_list *pl_ptr); 119void tipc_bclink_stop(struct net *net);
86 120void tipc_bclink_set_flags(struct net *tn, unsigned int flags);
87int tipc_bclink_init(void); 121void tipc_bclink_add_node(struct net *net, u32 addr);
88void tipc_bclink_stop(void); 122void tipc_bclink_remove_node(struct net *net, u32 addr);
89void tipc_bclink_set_flags(unsigned int flags); 123struct tipc_node *tipc_bclink_retransmit_to(struct net *tn);
90void tipc_bclink_add_node(u32 addr);
91void tipc_bclink_remove_node(u32 addr);
92struct tipc_node *tipc_bclink_retransmit_to(void);
93void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked); 124void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
94void tipc_bclink_rcv(struct sk_buff *buf); 125void tipc_bclink_rcv(struct net *net, struct sk_buff *buf);
95u32 tipc_bclink_get_last_sent(void); 126u32 tipc_bclink_get_last_sent(struct net *net);
96u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr); 127u32 tipc_bclink_acks_missing(struct tipc_node *n_ptr);
97void tipc_bclink_update_link_state(struct tipc_node *n_ptr, u32 last_sent); 128void tipc_bclink_update_link_state(struct tipc_node *node,
98int tipc_bclink_stats(char *stats_buf, const u32 buf_size); 129 u32 last_sent);
99int tipc_bclink_reset_stats(void); 130int tipc_bclink_reset_stats(struct net *net);
100int tipc_bclink_set_queue_limits(u32 limit); 131int tipc_bclink_set_queue_limits(struct net *net, u32 limit);
101void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action); 132void tipc_bcbearer_sort(struct net *net, struct tipc_node_map *nm_ptr,
133 u32 node, bool action);
102uint tipc_bclink_get_mtu(void); 134uint tipc_bclink_get_mtu(void);
103int tipc_bclink_xmit(struct sk_buff_head *list); 135int tipc_bclink_xmit(struct net *net, struct sk_buff_head *list);
104void tipc_bclink_wakeup_users(void); 136void tipc_bclink_wakeup_users(struct net *net);
105int tipc_nl_add_bc_link(struct tipc_nl_msg *msg); 137int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg);
138void tipc_bclink_input(struct net *net);
106 139
107#endif 140#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 463db5b15b8b..48852c2dcc03 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -34,11 +34,12 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <net/sock.h>
37#include "core.h" 38#include "core.h"
38#include "config.h"
39#include "bearer.h" 39#include "bearer.h"
40#include "link.h" 40#include "link.h"
41#include "discover.h" 41#include "discover.h"
42#include "bcast.h"
42 43
43#define MAX_ADDR_STR 60 44#define MAX_ADDR_STR 60
44 45
@@ -67,9 +68,8 @@ static const struct nla_policy tipc_nl_media_policy[TIPC_NLA_MEDIA_MAX + 1] = {
67 [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED } 68 [TIPC_NLA_MEDIA_PROP] = { .type = NLA_NESTED }
68}; 69};
69 70
70struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1]; 71static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
71 72 bool shutting_down);
72static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down);
73 73
74/** 74/**
75 * tipc_media_find - locates specified media object by name 75 * tipc_media_find - locates specified media object by name
@@ -111,38 +111,18 @@ void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a)
111 m_ptr = media_find_id(a->media_id); 111 m_ptr = media_find_id(a->media_id);
112 112
113 if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str))) 113 if (m_ptr && !m_ptr->addr2str(a, addr_str, sizeof(addr_str)))
114 ret = tipc_snprintf(buf, len, "%s(%s)", m_ptr->name, addr_str); 114 ret = scnprintf(buf, len, "%s(%s)", m_ptr->name, addr_str);
115 else { 115 else {
116 u32 i; 116 u32 i;
117 117
118 ret = tipc_snprintf(buf, len, "UNKNOWN(%u)", a->media_id); 118 ret = scnprintf(buf, len, "UNKNOWN(%u)", a->media_id);
119 for (i = 0; i < sizeof(a->value); i++) 119 for (i = 0; i < sizeof(a->value); i++)
120 ret += tipc_snprintf(buf - ret, len + ret, 120 ret += scnprintf(buf - ret, len + ret,
121 "-%02x", a->value[i]); 121 "-%02x", a->value[i]);
122 } 122 }
123} 123}
124 124
125/** 125/**
126 * tipc_media_get_names - record names of registered media in buffer
127 */
128struct sk_buff *tipc_media_get_names(void)
129{
130 struct sk_buff *buf;
131 int i;
132
133 buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
134 if (!buf)
135 return NULL;
136
137 for (i = 0; media_info_array[i] != NULL; i++) {
138 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME,
139 media_info_array[i]->name,
140 strlen(media_info_array[i]->name) + 1);
141 }
142 return buf;
143}
144
145/**
146 * bearer_name_validate - validate & (optionally) deconstruct bearer name 126 * bearer_name_validate - validate & (optionally) deconstruct bearer name
147 * @name: ptr to bearer name string 127 * @name: ptr to bearer name string
148 * @name_parts: ptr to area for bearer name components (or NULL if not needed) 128 * @name_parts: ptr to area for bearer name components (or NULL if not needed)
@@ -190,68 +170,43 @@ static int bearer_name_validate(const char *name,
190/** 170/**
191 * tipc_bearer_find - locates bearer object with matching bearer name 171 * tipc_bearer_find - locates bearer object with matching bearer name
192 */ 172 */
193struct tipc_bearer *tipc_bearer_find(const char *name) 173struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name)
194{ 174{
175 struct tipc_net *tn = net_generic(net, tipc_net_id);
195 struct tipc_bearer *b_ptr; 176 struct tipc_bearer *b_ptr;
196 u32 i; 177 u32 i;
197 178
198 for (i = 0; i < MAX_BEARERS; i++) { 179 for (i = 0; i < MAX_BEARERS; i++) {
199 b_ptr = rtnl_dereference(bearer_list[i]); 180 b_ptr = rtnl_dereference(tn->bearer_list[i]);
200 if (b_ptr && (!strcmp(b_ptr->name, name))) 181 if (b_ptr && (!strcmp(b_ptr->name, name)))
201 return b_ptr; 182 return b_ptr;
202 } 183 }
203 return NULL; 184 return NULL;
204} 185}
205 186
206/** 187void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest)
207 * tipc_bearer_get_names - record names of bearers in buffer
208 */
209struct sk_buff *tipc_bearer_get_names(void)
210{
211 struct sk_buff *buf;
212 struct tipc_bearer *b;
213 int i, j;
214
215 buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
216 if (!buf)
217 return NULL;
218
219 for (i = 0; media_info_array[i] != NULL; i++) {
220 for (j = 0; j < MAX_BEARERS; j++) {
221 b = rtnl_dereference(bearer_list[j]);
222 if (!b)
223 continue;
224 if (b->media == media_info_array[i]) {
225 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
226 b->name,
227 strlen(b->name) + 1);
228 }
229 }
230 }
231 return buf;
232}
233
234void tipc_bearer_add_dest(u32 bearer_id, u32 dest)
235{ 188{
189 struct tipc_net *tn = net_generic(net, tipc_net_id);
236 struct tipc_bearer *b_ptr; 190 struct tipc_bearer *b_ptr;
237 191
238 rcu_read_lock(); 192 rcu_read_lock();
239 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); 193 b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
240 if (b_ptr) { 194 if (b_ptr) {
241 tipc_bcbearer_sort(&b_ptr->nodes, dest, true); 195 tipc_bcbearer_sort(net, &b_ptr->nodes, dest, true);
242 tipc_disc_add_dest(b_ptr->link_req); 196 tipc_disc_add_dest(b_ptr->link_req);
243 } 197 }
244 rcu_read_unlock(); 198 rcu_read_unlock();
245} 199}
246 200
247void tipc_bearer_remove_dest(u32 bearer_id, u32 dest) 201void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest)
248{ 202{
203 struct tipc_net *tn = net_generic(net, tipc_net_id);
249 struct tipc_bearer *b_ptr; 204 struct tipc_bearer *b_ptr;
250 205
251 rcu_read_lock(); 206 rcu_read_lock();
252 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); 207 b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
253 if (b_ptr) { 208 if (b_ptr) {
254 tipc_bcbearer_sort(&b_ptr->nodes, dest, false); 209 tipc_bcbearer_sort(net, &b_ptr->nodes, dest, false);
255 tipc_disc_remove_dest(b_ptr->link_req); 210 tipc_disc_remove_dest(b_ptr->link_req);
256 } 211 }
257 rcu_read_unlock(); 212 rcu_read_unlock();
@@ -260,8 +215,10 @@ void tipc_bearer_remove_dest(u32 bearer_id, u32 dest)
260/** 215/**
261 * tipc_enable_bearer - enable bearer with the given name 216 * tipc_enable_bearer - enable bearer with the given name
262 */ 217 */
263int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority) 218static int tipc_enable_bearer(struct net *net, const char *name,
219 u32 disc_domain, u32 priority)
264{ 220{
221 struct tipc_net *tn = net_generic(net, tipc_net_id);
265 struct tipc_bearer *b_ptr; 222 struct tipc_bearer *b_ptr;
266 struct tipc_media *m_ptr; 223 struct tipc_media *m_ptr;
267 struct tipc_bearer_names b_names; 224 struct tipc_bearer_names b_names;
@@ -271,7 +228,7 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
271 u32 i; 228 u32 i;
272 int res = -EINVAL; 229 int res = -EINVAL;
273 230
274 if (!tipc_own_addr) { 231 if (!tn->own_addr) {
275 pr_warn("Bearer <%s> rejected, not supported in standalone mode\n", 232 pr_warn("Bearer <%s> rejected, not supported in standalone mode\n",
276 name); 233 name);
277 return -ENOPROTOOPT; 234 return -ENOPROTOOPT;
@@ -281,11 +238,11 @@ int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
281 return -EINVAL; 238 return -EINVAL;
282 } 239 }
283 if (tipc_addr_domain_valid(disc_domain) && 240 if (tipc_addr_domain_valid(disc_domain) &&
284 (disc_domain != tipc_own_addr)) { 241 (disc_domain != tn->own_addr)) {
285 if (tipc_in_scope(disc_domain, tipc_own_addr)) { 242 if (tipc_in_scope(disc_domain, tn->own_addr)) {
286 disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK; 243 disc_domain = tn->own_addr & TIPC_CLUSTER_MASK;
287 res = 0; /* accept any node in own cluster */ 244 res = 0; /* accept any node in own cluster */
288 } else if (in_own_cluster_exact(disc_domain)) 245 } else if (in_own_cluster_exact(net, disc_domain))
289 res = 0; /* accept specified node in own cluster */ 246 res = 0; /* accept specified node in own cluster */
290 } 247 }
291 if (res) { 248 if (res) {
@@ -313,7 +270,7 @@ restart:
313 bearer_id = MAX_BEARERS; 270 bearer_id = MAX_BEARERS;
314 with_this_prio = 1; 271 with_this_prio = 1;
315 for (i = MAX_BEARERS; i-- != 0; ) { 272 for (i = MAX_BEARERS; i-- != 0; ) {
316 b_ptr = rtnl_dereference(bearer_list[i]); 273 b_ptr = rtnl_dereference(tn->bearer_list[i]);
317 if (!b_ptr) { 274 if (!b_ptr) {
318 bearer_id = i; 275 bearer_id = i;
319 continue; 276 continue;
@@ -347,7 +304,7 @@ restart:
347 304
348 strcpy(b_ptr->name, name); 305 strcpy(b_ptr->name, name);
349 b_ptr->media = m_ptr; 306 b_ptr->media = m_ptr;
350 res = m_ptr->enable_media(b_ptr); 307 res = m_ptr->enable_media(net, b_ptr);
351 if (res) { 308 if (res) {
352 pr_warn("Bearer <%s> rejected, enable failure (%d)\n", 309 pr_warn("Bearer <%s> rejected, enable failure (%d)\n",
353 name, -res); 310 name, -res);
@@ -361,15 +318,15 @@ restart:
361 b_ptr->net_plane = bearer_id + 'A'; 318 b_ptr->net_plane = bearer_id + 'A';
362 b_ptr->priority = priority; 319 b_ptr->priority = priority;
363 320
364 res = tipc_disc_create(b_ptr, &b_ptr->bcast_addr); 321 res = tipc_disc_create(net, b_ptr, &b_ptr->bcast_addr);
365 if (res) { 322 if (res) {
366 bearer_disable(b_ptr, false); 323 bearer_disable(net, b_ptr, false);
367 pr_warn("Bearer <%s> rejected, discovery object creation failed\n", 324 pr_warn("Bearer <%s> rejected, discovery object creation failed\n",
368 name); 325 name);
369 return -EINVAL; 326 return -EINVAL;
370 } 327 }
371 328
372 rcu_assign_pointer(bearer_list[bearer_id], b_ptr); 329 rcu_assign_pointer(tn->bearer_list[bearer_id], b_ptr);
373 330
374 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n", 331 pr_info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
375 name, 332 name,
@@ -380,11 +337,11 @@ restart:
380/** 337/**
381 * tipc_reset_bearer - Reset all links established over this bearer 338 * tipc_reset_bearer - Reset all links established over this bearer
382 */ 339 */
383static int tipc_reset_bearer(struct tipc_bearer *b_ptr) 340static int tipc_reset_bearer(struct net *net, struct tipc_bearer *b_ptr)
384{ 341{
385 pr_info("Resetting bearer <%s>\n", b_ptr->name); 342 pr_info("Resetting bearer <%s>\n", b_ptr->name);
386 tipc_link_reset_list(b_ptr->identity); 343 tipc_link_reset_list(net, b_ptr->identity);
387 tipc_disc_reset(b_ptr); 344 tipc_disc_reset(net, b_ptr);
388 return 0; 345 return 0;
389} 346}
390 347
@@ -393,49 +350,35 @@ static int tipc_reset_bearer(struct tipc_bearer *b_ptr)
393 * 350 *
394 * Note: This routine assumes caller holds RTNL lock. 351 * Note: This routine assumes caller holds RTNL lock.
395 */ 352 */
396static void bearer_disable(struct tipc_bearer *b_ptr, bool shutting_down) 353static void bearer_disable(struct net *net, struct tipc_bearer *b_ptr,
354 bool shutting_down)
397{ 355{
356 struct tipc_net *tn = net_generic(net, tipc_net_id);
398 u32 i; 357 u32 i;
399 358
400 pr_info("Disabling bearer <%s>\n", b_ptr->name); 359 pr_info("Disabling bearer <%s>\n", b_ptr->name);
401 b_ptr->media->disable_media(b_ptr); 360 b_ptr->media->disable_media(b_ptr);
402 361
403 tipc_link_delete_list(b_ptr->identity, shutting_down); 362 tipc_link_delete_list(net, b_ptr->identity, shutting_down);
404 if (b_ptr->link_req) 363 if (b_ptr->link_req)
405 tipc_disc_delete(b_ptr->link_req); 364 tipc_disc_delete(b_ptr->link_req);
406 365
407 for (i = 0; i < MAX_BEARERS; i++) { 366 for (i = 0; i < MAX_BEARERS; i++) {
408 if (b_ptr == rtnl_dereference(bearer_list[i])) { 367 if (b_ptr == rtnl_dereference(tn->bearer_list[i])) {
409 RCU_INIT_POINTER(bearer_list[i], NULL); 368 RCU_INIT_POINTER(tn->bearer_list[i], NULL);
410 break; 369 break;
411 } 370 }
412 } 371 }
413 kfree_rcu(b_ptr, rcu); 372 kfree_rcu(b_ptr, rcu);
414} 373}
415 374
416int tipc_disable_bearer(const char *name) 375int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b)
417{
418 struct tipc_bearer *b_ptr;
419 int res;
420
421 b_ptr = tipc_bearer_find(name);
422 if (b_ptr == NULL) {
423 pr_warn("Attempt to disable unknown bearer <%s>\n", name);
424 res = -EINVAL;
425 } else {
426 bearer_disable(b_ptr, false);
427 res = 0;
428 }
429 return res;
430}
431
432int tipc_enable_l2_media(struct tipc_bearer *b)
433{ 376{
434 struct net_device *dev; 377 struct net_device *dev;
435 char *driver_name = strchr((const char *)b->name, ':') + 1; 378 char *driver_name = strchr((const char *)b->name, ':') + 1;
436 379
437 /* Find device with specified name */ 380 /* Find device with specified name */
438 dev = dev_get_by_name(&init_net, driver_name); 381 dev = dev_get_by_name(net, driver_name);
439 if (!dev) 382 if (!dev)
440 return -ENODEV; 383 return -ENODEV;
441 384
@@ -474,8 +417,8 @@ void tipc_disable_l2_media(struct tipc_bearer *b)
474 * @b_ptr: the bearer through which the packet is to be sent 417 * @b_ptr: the bearer through which the packet is to be sent
475 * @dest: peer destination address 418 * @dest: peer destination address
476 */ 419 */
477int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, 420int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
478 struct tipc_media_addr *dest) 421 struct tipc_bearer *b, struct tipc_media_addr *dest)
479{ 422{
480 struct sk_buff *clone; 423 struct sk_buff *clone;
481 struct net_device *dev; 424 struct net_device *dev;
@@ -511,15 +454,16 @@ int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b,
511 * The media send routine must not alter the buffer being passed in 454 * The media send routine must not alter the buffer being passed in
512 * as it may be needed for later retransmission! 455 * as it may be needed for later retransmission!
513 */ 456 */
514void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf, 457void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
515 struct tipc_media_addr *dest) 458 struct tipc_media_addr *dest)
516{ 459{
460 struct tipc_net *tn = net_generic(net, tipc_net_id);
517 struct tipc_bearer *b_ptr; 461 struct tipc_bearer *b_ptr;
518 462
519 rcu_read_lock(); 463 rcu_read_lock();
520 b_ptr = rcu_dereference_rtnl(bearer_list[bearer_id]); 464 b_ptr = rcu_dereference_rtnl(tn->bearer_list[bearer_id]);
521 if (likely(b_ptr)) 465 if (likely(b_ptr))
522 b_ptr->media->send_msg(buf, b_ptr, dest); 466 b_ptr->media->send_msg(net, buf, b_ptr, dest);
523 rcu_read_unlock(); 467 rcu_read_unlock();
524} 468}
525 469
@@ -539,17 +483,12 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
539{ 483{
540 struct tipc_bearer *b_ptr; 484 struct tipc_bearer *b_ptr;
541 485
542 if (!net_eq(dev_net(dev), &init_net)) {
543 kfree_skb(buf);
544 return NET_RX_DROP;
545 }
546
547 rcu_read_lock(); 486 rcu_read_lock();
548 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr); 487 b_ptr = rcu_dereference_rtnl(dev->tipc_ptr);
549 if (likely(b_ptr)) { 488 if (likely(b_ptr)) {
550 if (likely(buf->pkt_type <= PACKET_BROADCAST)) { 489 if (likely(buf->pkt_type <= PACKET_BROADCAST)) {
551 buf->next = NULL; 490 buf->next = NULL;
552 tipc_rcv(buf, b_ptr); 491 tipc_rcv(dev_net(dev), buf, b_ptr);
553 rcu_read_unlock(); 492 rcu_read_unlock();
554 return NET_RX_SUCCESS; 493 return NET_RX_SUCCESS;
555 } 494 }
@@ -572,11 +511,9 @@ static int tipc_l2_rcv_msg(struct sk_buff *buf, struct net_device *dev,
572static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt, 511static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
573 void *ptr) 512 void *ptr)
574{ 513{
575 struct tipc_bearer *b_ptr;
576 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 514 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
577 515 struct net *net = dev_net(dev);
578 if (!net_eq(dev_net(dev), &init_net)) 516 struct tipc_bearer *b_ptr;
579 return NOTIFY_DONE;
580 517
581 b_ptr = rtnl_dereference(dev->tipc_ptr); 518 b_ptr = rtnl_dereference(dev->tipc_ptr);
582 if (!b_ptr) 519 if (!b_ptr)
@@ -590,16 +527,16 @@ static int tipc_l2_device_event(struct notifier_block *nb, unsigned long evt,
590 break; 527 break;
591 case NETDEV_DOWN: 528 case NETDEV_DOWN:
592 case NETDEV_CHANGEMTU: 529 case NETDEV_CHANGEMTU:
593 tipc_reset_bearer(b_ptr); 530 tipc_reset_bearer(net, b_ptr);
594 break; 531 break;
595 case NETDEV_CHANGEADDR: 532 case NETDEV_CHANGEADDR:
596 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr, 533 b_ptr->media->raw2addr(b_ptr, &b_ptr->addr,
597 (char *)dev->dev_addr); 534 (char *)dev->dev_addr);
598 tipc_reset_bearer(b_ptr); 535 tipc_reset_bearer(net, b_ptr);
599 break; 536 break;
600 case NETDEV_UNREGISTER: 537 case NETDEV_UNREGISTER:
601 case NETDEV_CHANGENAME: 538 case NETDEV_CHANGENAME:
602 bearer_disable(b_ptr, false); 539 bearer_disable(dev_net(dev), b_ptr, false);
603 break; 540 break;
604 } 541 }
605 return NOTIFY_OK; 542 return NOTIFY_OK;
@@ -632,16 +569,17 @@ void tipc_bearer_cleanup(void)
632 dev_remove_pack(&tipc_packet_type); 569 dev_remove_pack(&tipc_packet_type);
633} 570}
634 571
635void tipc_bearer_stop(void) 572void tipc_bearer_stop(struct net *net)
636{ 573{
574 struct tipc_net *tn = net_generic(net, tipc_net_id);
637 struct tipc_bearer *b_ptr; 575 struct tipc_bearer *b_ptr;
638 u32 i; 576 u32 i;
639 577
640 for (i = 0; i < MAX_BEARERS; i++) { 578 for (i = 0; i < MAX_BEARERS; i++) {
641 b_ptr = rtnl_dereference(bearer_list[i]); 579 b_ptr = rtnl_dereference(tn->bearer_list[i]);
642 if (b_ptr) { 580 if (b_ptr) {
643 bearer_disable(b_ptr, true); 581 bearer_disable(net, b_ptr, true);
644 bearer_list[i] = NULL; 582 tn->bearer_list[i] = NULL;
645 } 583 }
646 } 584 }
647} 585}
@@ -654,7 +592,7 @@ static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
654 struct nlattr *attrs; 592 struct nlattr *attrs;
655 struct nlattr *prop; 593 struct nlattr *prop;
656 594
657 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 595 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
658 NLM_F_MULTI, TIPC_NL_BEARER_GET); 596 NLM_F_MULTI, TIPC_NL_BEARER_GET);
659 if (!hdr) 597 if (!hdr)
660 return -EMSGSIZE; 598 return -EMSGSIZE;
@@ -698,6 +636,8 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
698 int i = cb->args[0]; 636 int i = cb->args[0];
699 struct tipc_bearer *bearer; 637 struct tipc_bearer *bearer;
700 struct tipc_nl_msg msg; 638 struct tipc_nl_msg msg;
639 struct net *net = sock_net(skb->sk);
640 struct tipc_net *tn = net_generic(net, tipc_net_id);
701 641
702 if (i == MAX_BEARERS) 642 if (i == MAX_BEARERS)
703 return 0; 643 return 0;
@@ -708,7 +648,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
708 648
709 rtnl_lock(); 649 rtnl_lock();
710 for (i = 0; i < MAX_BEARERS; i++) { 650 for (i = 0; i < MAX_BEARERS; i++) {
711 bearer = rtnl_dereference(bearer_list[i]); 651 bearer = rtnl_dereference(tn->bearer_list[i]);
712 if (!bearer) 652 if (!bearer)
713 continue; 653 continue;
714 654
@@ -730,6 +670,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
730 struct tipc_bearer *bearer; 670 struct tipc_bearer *bearer;
731 struct tipc_nl_msg msg; 671 struct tipc_nl_msg msg;
732 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; 672 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
673 struct net *net = genl_info_net(info);
733 674
734 if (!info->attrs[TIPC_NLA_BEARER]) 675 if (!info->attrs[TIPC_NLA_BEARER])
735 return -EINVAL; 676 return -EINVAL;
@@ -753,7 +694,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
753 msg.seq = info->snd_seq; 694 msg.seq = info->snd_seq;
754 695
755 rtnl_lock(); 696 rtnl_lock();
756 bearer = tipc_bearer_find(name); 697 bearer = tipc_bearer_find(net, name);
757 if (!bearer) { 698 if (!bearer) {
758 err = -EINVAL; 699 err = -EINVAL;
759 goto err_out; 700 goto err_out;
@@ -778,6 +719,7 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
778 char *name; 719 char *name;
779 struct tipc_bearer *bearer; 720 struct tipc_bearer *bearer;
780 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; 721 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
722 struct net *net = sock_net(skb->sk);
781 723
782 if (!info->attrs[TIPC_NLA_BEARER]) 724 if (!info->attrs[TIPC_NLA_BEARER])
783 return -EINVAL; 725 return -EINVAL;
@@ -794,13 +736,13 @@ int tipc_nl_bearer_disable(struct sk_buff *skb, struct genl_info *info)
794 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 736 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
795 737
796 rtnl_lock(); 738 rtnl_lock();
797 bearer = tipc_bearer_find(name); 739 bearer = tipc_bearer_find(net, name);
798 if (!bearer) { 740 if (!bearer) {
799 rtnl_unlock(); 741 rtnl_unlock();
800 return -EINVAL; 742 return -EINVAL;
801 } 743 }
802 744
803 bearer_disable(bearer, false); 745 bearer_disable(net, bearer, false);
804 rtnl_unlock(); 746 rtnl_unlock();
805 747
806 return 0; 748 return 0;
@@ -811,11 +753,13 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
811 int err; 753 int err;
812 char *bearer; 754 char *bearer;
813 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; 755 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
756 struct net *net = sock_net(skb->sk);
757 struct tipc_net *tn = net_generic(net, tipc_net_id);
814 u32 domain; 758 u32 domain;
815 u32 prio; 759 u32 prio;
816 760
817 prio = TIPC_MEDIA_LINK_PRI; 761 prio = TIPC_MEDIA_LINK_PRI;
818 domain = tipc_own_addr & TIPC_CLUSTER_MASK; 762 domain = tn->own_addr & TIPC_CLUSTER_MASK;
819 763
820 if (!info->attrs[TIPC_NLA_BEARER]) 764 if (!info->attrs[TIPC_NLA_BEARER])
821 return -EINVAL; 765 return -EINVAL;
@@ -847,7 +791,7 @@ int tipc_nl_bearer_enable(struct sk_buff *skb, struct genl_info *info)
847 } 791 }
848 792
849 rtnl_lock(); 793 rtnl_lock();
850 err = tipc_enable_bearer(bearer, domain, prio); 794 err = tipc_enable_bearer(net, bearer, domain, prio);
851 if (err) { 795 if (err) {
852 rtnl_unlock(); 796 rtnl_unlock();
853 return err; 797 return err;
@@ -863,6 +807,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
863 char *name; 807 char *name;
864 struct tipc_bearer *b; 808 struct tipc_bearer *b;
865 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1]; 809 struct nlattr *attrs[TIPC_NLA_BEARER_MAX + 1];
810 struct net *net = genl_info_net(info);
866 811
867 if (!info->attrs[TIPC_NLA_BEARER]) 812 if (!info->attrs[TIPC_NLA_BEARER])
868 return -EINVAL; 813 return -EINVAL;
@@ -878,7 +823,7 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
878 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]); 823 name = nla_data(attrs[TIPC_NLA_BEARER_NAME]);
879 824
880 rtnl_lock(); 825 rtnl_lock();
881 b = tipc_bearer_find(name); 826 b = tipc_bearer_find(net, name);
882 if (!b) { 827 if (!b) {
883 rtnl_unlock(); 828 rtnl_unlock();
884 return -EINVAL; 829 return -EINVAL;
@@ -913,7 +858,7 @@ static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
913 struct nlattr *attrs; 858 struct nlattr *attrs;
914 struct nlattr *prop; 859 struct nlattr *prop;
915 860
916 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 861 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
917 NLM_F_MULTI, TIPC_NL_MEDIA_GET); 862 NLM_F_MULTI, TIPC_NL_MEDIA_GET);
918 if (!hdr) 863 if (!hdr)
919 return -EMSGSIZE; 864 return -EMSGSIZE;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 2c1230ac5dfe..6b17795ff8bc 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -37,12 +37,13 @@
37#ifndef _TIPC_BEARER_H 37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H 38#define _TIPC_BEARER_H
39 39
40#include "bcast.h"
41#include "netlink.h" 40#include "netlink.h"
42#include <net/genetlink.h> 41#include <net/genetlink.h>
43 42
44#define MAX_BEARERS 2 43#define MAX_BEARERS 2
45#define MAX_MEDIA 2 44#define MAX_MEDIA 2
45#define MAX_NODES 4096
46#define WSIZE 32
46 47
47/* Identifiers associated with TIPC message header media address info 48/* Identifiers associated with TIPC message header media address info
48 * - address info field is 32 bytes long 49 * - address info field is 32 bytes long
@@ -59,6 +60,16 @@
59#define TIPC_MEDIA_TYPE_IB 2 60#define TIPC_MEDIA_TYPE_IB 2
60 61
61/** 62/**
63 * struct tipc_node_map - set of node identifiers
64 * @count: # of nodes in set
65 * @map: bitmap of node identifiers that are in the set
66 */
67struct tipc_node_map {
68 u32 count;
69 u32 map[MAX_NODES / WSIZE];
70};
71
72/**
62 * struct tipc_media_addr - destination address used by TIPC bearers 73 * struct tipc_media_addr - destination address used by TIPC bearers
63 * @value: address info (format defined by media) 74 * @value: address info (format defined by media)
64 * @media_id: TIPC media type identifier 75 * @media_id: TIPC media type identifier
@@ -89,10 +100,10 @@ struct tipc_bearer;
89 * @name: media name 100 * @name: media name
90 */ 101 */
91struct tipc_media { 102struct tipc_media {
92 int (*send_msg)(struct sk_buff *buf, 103 int (*send_msg)(struct net *net, struct sk_buff *buf,
93 struct tipc_bearer *b_ptr, 104 struct tipc_bearer *b_ptr,
94 struct tipc_media_addr *dest); 105 struct tipc_media_addr *dest);
95 int (*enable_media)(struct tipc_bearer *b_ptr); 106 int (*enable_media)(struct net *net, struct tipc_bearer *b_ptr);
96 void (*disable_media)(struct tipc_bearer *b_ptr); 107 void (*disable_media)(struct tipc_bearer *b_ptr);
97 int (*addr2str)(struct tipc_media_addr *addr, 108 int (*addr2str)(struct tipc_media_addr *addr,
98 char *strbuf, 109 char *strbuf,
@@ -157,17 +168,11 @@ struct tipc_bearer_names {
157 char if_name[TIPC_MAX_IF_NAME]; 168 char if_name[TIPC_MAX_IF_NAME];
158}; 169};
159 170
160struct tipc_link;
161
162extern struct tipc_bearer __rcu *bearer_list[];
163
164/* 171/*
165 * TIPC routines available to supported media types 172 * TIPC routines available to supported media types
166 */ 173 */
167 174
168void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *tb_ptr); 175void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr);
169int tipc_enable_bearer(const char *bearer_name, u32 disc_domain, u32 priority);
170int tipc_disable_bearer(const char *name);
171 176
172/* 177/*
173 * Routines made available to TIPC by supported media types 178 * Routines made available to TIPC by supported media types
@@ -191,21 +196,19 @@ int tipc_nl_media_set(struct sk_buff *skb, struct genl_info *info);
191int tipc_media_set_priority(const char *name, u32 new_value); 196int tipc_media_set_priority(const char *name, u32 new_value);
192int tipc_media_set_window(const char *name, u32 new_value); 197int tipc_media_set_window(const char *name, u32 new_value);
193void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a); 198void tipc_media_addr_printf(char *buf, int len, struct tipc_media_addr *a);
194struct sk_buff *tipc_media_get_names(void); 199int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b);
195int tipc_enable_l2_media(struct tipc_bearer *b);
196void tipc_disable_l2_media(struct tipc_bearer *b); 200void tipc_disable_l2_media(struct tipc_bearer *b);
197int tipc_l2_send_msg(struct sk_buff *buf, struct tipc_bearer *b, 201int tipc_l2_send_msg(struct net *net, struct sk_buff *buf,
198 struct tipc_media_addr *dest); 202 struct tipc_bearer *b, struct tipc_media_addr *dest);
199 203
200struct sk_buff *tipc_bearer_get_names(void); 204void tipc_bearer_add_dest(struct net *net, u32 bearer_id, u32 dest);
201void tipc_bearer_add_dest(u32 bearer_id, u32 dest); 205void tipc_bearer_remove_dest(struct net *net, u32 bearer_id, u32 dest);
202void tipc_bearer_remove_dest(u32 bearer_id, u32 dest); 206struct tipc_bearer *tipc_bearer_find(struct net *net, const char *name);
203struct tipc_bearer *tipc_bearer_find(const char *name);
204struct tipc_media *tipc_media_find(const char *name); 207struct tipc_media *tipc_media_find(const char *name);
205int tipc_bearer_setup(void); 208int tipc_bearer_setup(void);
206void tipc_bearer_cleanup(void); 209void tipc_bearer_cleanup(void);
207void tipc_bearer_stop(void); 210void tipc_bearer_stop(struct net *net);
208void tipc_bearer_send(u32 bearer_id, struct sk_buff *buf, 211void tipc_bearer_send(struct net *net, u32 bearer_id, struct sk_buff *buf,
209 struct tipc_media_addr *dest); 212 struct tipc_media_addr *dest);
210 213
211#endif /* _TIPC_BEARER_H */ 214#endif /* _TIPC_BEARER_H */
diff --git a/net/tipc/config.c b/net/tipc/config.c
deleted file mode 100644
index 876f4c6a2631..000000000000
--- a/net/tipc/config.c
+++ /dev/null
@@ -1,342 +0,0 @@
1/*
2 * net/tipc/config.c: TIPC configuration management code
3 *
4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "socket.h"
39#include "name_table.h"
40#include "config.h"
41#include "server.h"
42
43#define REPLY_TRUNCATED "<truncated>\n"
44
45static const void *req_tlv_area; /* request message TLV area */
46static int req_tlv_space; /* request message TLV area size */
47static int rep_headroom; /* reply message headroom to use */
48
49struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
50{
51 struct sk_buff *buf;
52
53 buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
54 if (buf)
55 skb_reserve(buf, rep_headroom);
56 return buf;
57}
58
59int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
60 void *tlv_data, int tlv_data_size)
61{
62 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf);
63 int new_tlv_space = TLV_SPACE(tlv_data_size);
64
65 if (skb_tailroom(buf) < new_tlv_space)
66 return 0;
67 skb_put(buf, new_tlv_space);
68 tlv->tlv_type = htons(tlv_type);
69 tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
70 if (tlv_data_size && tlv_data)
71 memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
72 return 1;
73}
74
75static struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
76{
77 struct sk_buff *buf;
78 __be32 value_net;
79
80 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
81 if (buf) {
82 value_net = htonl(value);
83 tipc_cfg_append_tlv(buf, tlv_type, &value_net,
84 sizeof(value_net));
85 }
86 return buf;
87}
88
89static struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
90{
91 return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
92}
93
94struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
95{
96 struct sk_buff *buf;
97 int string_len = strlen(string) + 1;
98
99 buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
100 if (buf)
101 tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
102 return buf;
103}
104
105static struct sk_buff *tipc_show_stats(void)
106{
107 struct sk_buff *buf;
108 struct tlv_desc *rep_tlv;
109 char *pb;
110 int pb_len;
111 int str_len;
112 u32 value;
113
114 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
115 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
116
117 value = ntohl(*(u32 *)TLV_DATA(req_tlv_area));
118 if (value != 0)
119 return tipc_cfg_reply_error_string("unsupported argument");
120
121 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
122 if (buf == NULL)
123 return NULL;
124
125 rep_tlv = (struct tlv_desc *)buf->data;
126 pb = TLV_DATA(rep_tlv);
127 pb_len = ULTRA_STRING_MAX_LEN;
128
129 str_len = tipc_snprintf(pb, pb_len, "TIPC version " TIPC_MOD_VER "\n");
130 str_len += 1; /* for "\0" */
131 skb_put(buf, TLV_SPACE(str_len));
132 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
133
134 return buf;
135}
136
137static struct sk_buff *cfg_enable_bearer(void)
138{
139 struct tipc_bearer_config *args;
140
141 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
142 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
143
144 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
145 if (tipc_enable_bearer(args->name,
146 ntohl(args->disc_domain),
147 ntohl(args->priority)))
148 return tipc_cfg_reply_error_string("unable to enable bearer");
149
150 return tipc_cfg_reply_none();
151}
152
153static struct sk_buff *cfg_disable_bearer(void)
154{
155 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
156 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
157
158 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
159 return tipc_cfg_reply_error_string("unable to disable bearer");
160
161 return tipc_cfg_reply_none();
162}
163
164static struct sk_buff *cfg_set_own_addr(void)
165{
166 u32 addr;
167
168 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
169 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
170
171 addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
172 if (addr == tipc_own_addr)
173 return tipc_cfg_reply_none();
174 if (!tipc_addr_node_valid(addr))
175 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
176 " (node address)");
177 if (tipc_own_addr)
178 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
179 " (cannot change node address once assigned)");
180 if (!tipc_net_start(addr))
181 return tipc_cfg_reply_none();
182
183 return tipc_cfg_reply_error_string("cannot change to network mode");
184}
185
186static struct sk_buff *cfg_set_max_ports(void)
187{
188 u32 value;
189
190 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
191 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
192 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
193 if (value == tipc_max_ports)
194 return tipc_cfg_reply_none();
195 if (value < 127 || value > 65535)
196 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
197 " (max ports must be 127-65535)");
198 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
199 " (cannot change max ports while TIPC is active)");
200}
201
202static struct sk_buff *cfg_set_netid(void)
203{
204 u32 value;
205
206 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
207 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
208 value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
209 if (value == tipc_net_id)
210 return tipc_cfg_reply_none();
211 if (value < 1 || value > 9999)
212 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
213 " (network id must be 1-9999)");
214 if (tipc_own_addr)
215 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
216 " (cannot change network id once TIPC has joined a network)");
217 tipc_net_id = value;
218 return tipc_cfg_reply_none();
219}
220
221struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
222 int request_space, int reply_headroom)
223{
224 struct sk_buff *rep_tlv_buf;
225
226 rtnl_lock();
227
228 /* Save request and reply details in a well-known location */
229 req_tlv_area = request_area;
230 req_tlv_space = request_space;
231 rep_headroom = reply_headroom;
232
233 /* Check command authorization */
234 if (likely(in_own_node(orig_node))) {
235 /* command is permitted */
236 } else {
237 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
238 " (cannot be done remotely)");
239 goto exit;
240 }
241
242 /* Call appropriate processing routine */
243 switch (cmd) {
244 case TIPC_CMD_NOOP:
245 rep_tlv_buf = tipc_cfg_reply_none();
246 break;
247 case TIPC_CMD_GET_NODES:
248 rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
249 break;
250 case TIPC_CMD_GET_LINKS:
251 rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
252 break;
253 case TIPC_CMD_SHOW_LINK_STATS:
254 rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
255 break;
256 case TIPC_CMD_RESET_LINK_STATS:
257 rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
258 break;
259 case TIPC_CMD_SHOW_NAME_TABLE:
260 rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
261 break;
262 case TIPC_CMD_GET_BEARER_NAMES:
263 rep_tlv_buf = tipc_bearer_get_names();
264 break;
265 case TIPC_CMD_GET_MEDIA_NAMES:
266 rep_tlv_buf = tipc_media_get_names();
267 break;
268 case TIPC_CMD_SHOW_PORTS:
269 rep_tlv_buf = tipc_sk_socks_show();
270 break;
271 case TIPC_CMD_SHOW_STATS:
272 rep_tlv_buf = tipc_show_stats();
273 break;
274 case TIPC_CMD_SET_LINK_TOL:
275 case TIPC_CMD_SET_LINK_PRI:
276 case TIPC_CMD_SET_LINK_WINDOW:
277 rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
278 break;
279 case TIPC_CMD_ENABLE_BEARER:
280 rep_tlv_buf = cfg_enable_bearer();
281 break;
282 case TIPC_CMD_DISABLE_BEARER:
283 rep_tlv_buf = cfg_disable_bearer();
284 break;
285 case TIPC_CMD_SET_NODE_ADDR:
286 rep_tlv_buf = cfg_set_own_addr();
287 break;
288 case TIPC_CMD_SET_MAX_PORTS:
289 rep_tlv_buf = cfg_set_max_ports();
290 break;
291 case TIPC_CMD_SET_NETID:
292 rep_tlv_buf = cfg_set_netid();
293 break;
294 case TIPC_CMD_GET_MAX_PORTS:
295 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
296 break;
297 case TIPC_CMD_GET_NETID:
298 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
299 break;
300 case TIPC_CMD_NOT_NET_ADMIN:
301 rep_tlv_buf =
302 tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
303 break;
304 case TIPC_CMD_SET_MAX_ZONES:
305 case TIPC_CMD_GET_MAX_ZONES:
306 case TIPC_CMD_SET_MAX_SLAVES:
307 case TIPC_CMD_GET_MAX_SLAVES:
308 case TIPC_CMD_SET_MAX_CLUSTERS:
309 case TIPC_CMD_GET_MAX_CLUSTERS:
310 case TIPC_CMD_SET_MAX_NODES:
311 case TIPC_CMD_GET_MAX_NODES:
312 case TIPC_CMD_SET_MAX_SUBSCR:
313 case TIPC_CMD_GET_MAX_SUBSCR:
314 case TIPC_CMD_SET_MAX_PUBL:
315 case TIPC_CMD_GET_MAX_PUBL:
316 case TIPC_CMD_SET_LOG_SIZE:
317 case TIPC_CMD_SET_REMOTE_MNG:
318 case TIPC_CMD_GET_REMOTE_MNG:
319 case TIPC_CMD_DUMP_LOG:
320 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
321 " (obsolete command)");
322 break;
323 default:
324 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
325 " (unknown command)");
326 break;
327 }
328
329 WARN_ON(rep_tlv_buf->len > TLV_SPACE(ULTRA_STRING_MAX_LEN));
330
331 /* Append an error message if we cannot return all requested data */
332 if (rep_tlv_buf->len == TLV_SPACE(ULTRA_STRING_MAX_LEN)) {
333 if (*(rep_tlv_buf->data + ULTRA_STRING_MAX_LEN) != '\0')
334 sprintf(rep_tlv_buf->data + rep_tlv_buf->len -
335 sizeof(REPLY_TRUNCATED) - 1, REPLY_TRUNCATED);
336 }
337
338 /* Return reply buffer */
339exit:
340 rtnl_unlock();
341 return rep_tlv_buf;
342}
diff --git a/net/tipc/config.h b/net/tipc/config.h
deleted file mode 100644
index 47b1bf181612..000000000000
--- a/net/tipc/config.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * net/tipc/config.h: Include file for TIPC configuration service code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CONFIG_H
38#define _TIPC_CONFIG_H
39
40/* ---------------------------------------------------------------------- */
41
42#include "link.h"
43
44struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
45int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
46 void *tlv_data, int tlv_data_size);
47struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
48
49static inline struct sk_buff *tipc_cfg_reply_none(void)
50{
51 return tipc_cfg_reply_alloc(0);
52}
53
54static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
55{
56 return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
57}
58
59static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
60{
61 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
62}
63
64struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
65 const void *req_tlv_area, int req_tlv_space,
66 int headroom);
67#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
index a5737b8407dd..935205e6bcfe 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -34,82 +34,88 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
37#include "core.h" 39#include "core.h"
38#include "name_table.h" 40#include "name_table.h"
39#include "subscr.h" 41#include "subscr.h"
40#include "config.h" 42#include "bearer.h"
43#include "net.h"
41#include "socket.h" 44#include "socket.h"
42 45
43#include <linux/module.h> 46#include <linux/module.h>
44 47
45/* global variables used by multiple sub-systems within TIPC */
46int tipc_random __read_mostly;
47
48/* configurable TIPC parameters */ 48/* configurable TIPC parameters */
49u32 tipc_own_addr __read_mostly;
50int tipc_max_ports __read_mostly;
51int tipc_net_id __read_mostly; 49int tipc_net_id __read_mostly;
52int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */ 50int sysctl_tipc_rmem[3] __read_mostly; /* min/default/max */
53 51
54/** 52static int __net_init tipc_init_net(struct net *net)
55 * tipc_buf_acquire - creates a TIPC message buffer
56 * @size: message size (including TIPC header)
57 *
58 * Returns a new buffer with data pointers set to the specified size.
59 *
60 * NOTE: Headroom is reserved to allow prepending of a data link header.
61 * There may also be unrequested tailroom present at the buffer's end.
62 */
63struct sk_buff *tipc_buf_acquire(u32 size)
64{ 53{
65 struct sk_buff *skb; 54 struct tipc_net *tn = net_generic(net, tipc_net_id);
66 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u; 55 int err;
67 56
68 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC); 57 tn->net_id = 4711;
69 if (skb) { 58 tn->own_addr = 0;
70 skb_reserve(skb, BUF_HEADROOM); 59 get_random_bytes(&tn->random, sizeof(int));
71 skb_put(skb, size); 60 INIT_LIST_HEAD(&tn->node_list);
72 skb->next = NULL; 61 spin_lock_init(&tn->node_list_lock);
73 } 62
74 return skb; 63 err = tipc_sk_rht_init(net);
64 if (err)
65 goto out_sk_rht;
66
67 err = tipc_nametbl_init(net);
68 if (err)
69 goto out_nametbl;
70
71 err = tipc_subscr_start(net);
72 if (err)
73 goto out_subscr;
74 return 0;
75
76out_subscr:
77 tipc_nametbl_stop(net);
78out_nametbl:
79 tipc_sk_rht_destroy(net);
80out_sk_rht:
81 return err;
75} 82}
76 83
77/** 84static void __net_exit tipc_exit_net(struct net *net)
78 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
79 */
80static void tipc_core_stop(void)
81{ 85{
82 tipc_net_stop(); 86 tipc_subscr_stop(net);
83 tipc_bearer_cleanup(); 87 tipc_net_stop(net);
84 tipc_netlink_stop(); 88 tipc_nametbl_stop(net);
85 tipc_subscr_stop(); 89 tipc_sk_rht_destroy(net);
86 tipc_nametbl_stop();
87 tipc_sk_ref_table_stop();
88 tipc_socket_stop();
89 tipc_unregister_sysctl();
90} 90}
91 91
92/** 92static struct pernet_operations tipc_net_ops = {
93 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode 93 .init = tipc_init_net,
94 */ 94 .exit = tipc_exit_net,
95static int tipc_core_start(void) 95 .id = &tipc_net_id,
96 .size = sizeof(struct tipc_net),
97};
98
99static int __init tipc_init(void)
96{ 100{
97 int err; 101 int err;
98 102
99 get_random_bytes(&tipc_random, sizeof(tipc_random)); 103 pr_info("Activated (version " TIPC_MOD_VER ")\n");
100
101 err = tipc_sk_ref_table_init(tipc_max_ports, tipc_random);
102 if (err)
103 goto out_reftbl;
104 104
105 err = tipc_nametbl_init(); 105 sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
106 if (err) 106 TIPC_LOW_IMPORTANCE;
107 goto out_nametbl; 107 sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
108 TIPC_CRITICAL_IMPORTANCE;
109 sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
108 110
109 err = tipc_netlink_start(); 111 err = tipc_netlink_start();
110 if (err) 112 if (err)
111 goto out_netlink; 113 goto out_netlink;
112 114
115 err = tipc_netlink_compat_start();
116 if (err)
117 goto out_netlink_compat;
118
113 err = tipc_socket_init(); 119 err = tipc_socket_init();
114 if (err) 120 if (err)
115 goto out_socket; 121 goto out_socket;
@@ -118,58 +124,40 @@ static int tipc_core_start(void)
118 if (err) 124 if (err)
119 goto out_sysctl; 125 goto out_sysctl;
120 126
121 err = tipc_subscr_start(); 127 err = register_pernet_subsys(&tipc_net_ops);
122 if (err) 128 if (err)
123 goto out_subscr; 129 goto out_pernet;
124 130
125 err = tipc_bearer_setup(); 131 err = tipc_bearer_setup();
126 if (err) 132 if (err)
127 goto out_bearer; 133 goto out_bearer;
128 134
135 pr_info("Started in single node mode\n");
129 return 0; 136 return 0;
130out_bearer: 137out_bearer:
131 tipc_subscr_stop(); 138 unregister_pernet_subsys(&tipc_net_ops);
132out_subscr: 139out_pernet:
133 tipc_unregister_sysctl(); 140 tipc_unregister_sysctl();
134out_sysctl: 141out_sysctl:
135 tipc_socket_stop(); 142 tipc_socket_stop();
136out_socket: 143out_socket:
144 tipc_netlink_compat_stop();
145out_netlink_compat:
137 tipc_netlink_stop(); 146 tipc_netlink_stop();
138out_netlink: 147out_netlink:
139 tipc_nametbl_stop(); 148 pr_err("Unable to start in single node mode\n");
140out_nametbl:
141 tipc_sk_ref_table_stop();
142out_reftbl:
143 return err; 149 return err;
144} 150}
145 151
146static int __init tipc_init(void)
147{
148 int res;
149
150 pr_info("Activated (version " TIPC_MOD_VER ")\n");
151
152 tipc_own_addr = 0;
153 tipc_max_ports = CONFIG_TIPC_PORTS;
154 tipc_net_id = 4711;
155
156 sysctl_tipc_rmem[0] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
157 TIPC_LOW_IMPORTANCE;
158 sysctl_tipc_rmem[1] = TIPC_CONN_OVERLOAD_LIMIT >> 4 <<
159 TIPC_CRITICAL_IMPORTANCE;
160 sysctl_tipc_rmem[2] = TIPC_CONN_OVERLOAD_LIMIT;
161
162 res = tipc_core_start();
163 if (res)
164 pr_err("Unable to start in single node mode\n");
165 else
166 pr_info("Started in single node mode\n");
167 return res;
168}
169
170static void __exit tipc_exit(void) 152static void __exit tipc_exit(void)
171{ 153{
172 tipc_core_stop(); 154 tipc_bearer_cleanup();
155 tipc_netlink_stop();
156 tipc_netlink_compat_stop();
157 tipc_socket_stop();
158 tipc_unregister_sysctl();
159 unregister_pernet_subsys(&tipc_net_ops);
160
173 pr_info("Deactivated\n"); 161 pr_info("Deactivated\n");
174} 162}
175 163
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 84602137ce20..3dc68c7a966d 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -37,8 +37,6 @@
37#ifndef _TIPC_CORE_H 37#ifndef _TIPC_CORE_H
38#define _TIPC_CORE_H 38#define _TIPC_CORE_H
39 39
40#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
41
42#include <linux/tipc.h> 40#include <linux/tipc.h>
43#include <linux/tipc_config.h> 41#include <linux/tipc_config.h>
44#include <linux/tipc_netlink.h> 42#include <linux/tipc_netlink.h>
@@ -59,47 +57,54 @@
59#include <linux/vmalloc.h> 57#include <linux/vmalloc.h>
60#include <linux/rtnetlink.h> 58#include <linux/rtnetlink.h>
61#include <linux/etherdevice.h> 59#include <linux/etherdevice.h>
60#include <net/netns/generic.h>
61#include <linux/rhashtable.h>
62
63#include "node.h"
64#include "bearer.h"
65#include "bcast.h"
66#include "netlink.h"
67#include "link.h"
68#include "node.h"
69#include "msg.h"
62 70
63#define TIPC_MOD_VER "2.0.0" 71#define TIPC_MOD_VER "2.0.0"
64 72
65#define ULTRA_STRING_MAX_LEN 32768 73extern int tipc_net_id __read_mostly;
66#define TIPC_MAX_SUBSCRIPTIONS 65535 74extern int sysctl_tipc_rmem[3] __read_mostly;
67#define TIPC_MAX_PUBLICATIONS 65535 75extern int sysctl_tipc_named_timeout __read_mostly;
68 76
69struct tipc_msg; /* msg.h */ 77struct tipc_net {
78 u32 own_addr;
79 int net_id;
80 int random;
70 81
71int tipc_snprintf(char *buf, int len, const char *fmt, ...); 82 /* Node table and node list */
83 spinlock_t node_list_lock;
84 struct hlist_head node_htable[NODE_HTABLE_SIZE];
85 struct list_head node_list;
86 u32 num_nodes;
87 u32 num_links;
72 88
73/* 89 /* Bearer list */
74 * TIPC-specific error codes 90 struct tipc_bearer __rcu *bearer_list[MAX_BEARERS + 1];
75 */
76#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
77 91
78/* 92 /* Broadcast link */
79 * Global configuration variables 93 struct tipc_bcbearer *bcbearer;
80 */ 94 struct tipc_bclink *bclink;
81extern u32 tipc_own_addr __read_mostly; 95 struct tipc_link *bcl;
82extern int tipc_max_ports __read_mostly;
83extern int tipc_net_id __read_mostly;
84extern int sysctl_tipc_rmem[3] __read_mostly;
85extern int sysctl_tipc_named_timeout __read_mostly;
86 96
87/* 97 /* Socket hash table */
88 * Other global variables 98 struct rhashtable sk_rht;
89 */
90extern int tipc_random __read_mostly;
91 99
92/* 100 /* Name table */
93 * Routines available to privileged subsystems 101 spinlock_t nametbl_lock;
94 */ 102 struct name_table *nametbl;
95int tipc_netlink_start(void); 103
96void tipc_netlink_stop(void); 104 /* Topology subscription server */
97int tipc_socket_init(void); 105 struct tipc_server *topsrv;
98void tipc_socket_stop(void); 106 atomic_t subscription_count;
99int tipc_sock_create_local(int type, struct socket **res); 107};
100void tipc_sock_release_local(struct socket *sock);
101int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
102 int flags);
103 108
104#ifdef CONFIG_SYSCTL 109#ifdef CONFIG_SYSCTL
105int tipc_register_sysctl(void); 110int tipc_register_sysctl(void);
@@ -108,102 +113,4 @@ void tipc_unregister_sysctl(void);
108#define tipc_register_sysctl() 0 113#define tipc_register_sysctl() 0
109#define tipc_unregister_sysctl() 114#define tipc_unregister_sysctl()
110#endif 115#endif
111
112/*
113 * TIPC timer code
114 */
115typedef void (*Handler) (unsigned long);
116
117/**
118 * k_init_timer - initialize a timer
119 * @timer: pointer to timer structure
120 * @routine: pointer to routine to invoke when timer expires
121 * @argument: value to pass to routine when timer expires
122 *
123 * Timer must be initialized before use (and terminated when no longer needed).
124 */
125static inline void k_init_timer(struct timer_list *timer, Handler routine,
126 unsigned long argument)
127{
128 setup_timer(timer, routine, argument);
129}
130
131/**
132 * k_start_timer - start a timer
133 * @timer: pointer to timer structure
134 * @msec: time to delay (in ms)
135 *
136 * Schedules a previously initialized timer for later execution.
137 * If timer is already running, the new timeout overrides the previous request.
138 *
139 * To ensure the timer doesn't expire before the specified delay elapses,
140 * the amount of delay is rounded up when converting to the jiffies
141 * then an additional jiffy is added to account for the fact that
142 * the starting time may be in the middle of the current jiffy.
143 */
144static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
145{
146 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
147}
148
149/**
150 * k_cancel_timer - cancel a timer
151 * @timer: pointer to timer structure
152 *
153 * Cancels a previously initialized timer.
154 * Can be called safely even if the timer is already inactive.
155 *
156 * WARNING: Must not be called when holding locks required by the timer's
157 * timeout routine, otherwise deadlock can occur on SMP systems!
158 */
159static inline void k_cancel_timer(struct timer_list *timer)
160{
161 del_timer_sync(timer);
162}
163
164/**
165 * k_term_timer - terminate a timer
166 * @timer: pointer to timer structure
167 *
168 * Prevents further use of a previously initialized timer.
169 *
170 * WARNING: Caller must ensure timer isn't currently running.
171 *
172 * (Do not "enhance" this routine to automatically cancel an active timer,
173 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
174 */
175static inline void k_term_timer(struct timer_list *timer)
176{
177}
178
179/*
180 * TIPC message buffer code
181 *
182 * TIPC message buffer headroom reserves space for the worst-case
183 * link-level device header (in case the message is sent off-node).
184 *
185 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
186 * are word aligned for quicker access
187 */
188#define BUF_HEADROOM LL_MAX_HEADER
189
190struct tipc_skb_cb {
191 void *handle;
192 struct sk_buff *tail;
193 bool deferred;
194 bool wakeup_pending;
195 bool bundling;
196 u16 chain_sz;
197 u16 chain_imp;
198};
199
200#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
201
202static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
203{
204 return (struct tipc_msg *)skb->data;
205}
206
207struct sk_buff *tipc_buf_acquire(u32 size);
208
209#endif 116#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index aa722a42ef8b..feef3753615d 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/discover.c 2 * net/tipc/discover.c
3 * 3 *
4 * Copyright (c) 2003-2006, 2014, Ericsson AB 4 * Copyright (c) 2003-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems 5 * Copyright (c) 2005-2006, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -38,15 +38,19 @@
38#include "link.h" 38#include "link.h"
39#include "discover.h" 39#include "discover.h"
40 40
41#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */ 41/* min delay during bearer start up */
42#define TIPC_LINK_REQ_FAST 1000 /* max delay if bearer has no links */ 42#define TIPC_LINK_REQ_INIT msecs_to_jiffies(125)
43#define TIPC_LINK_REQ_SLOW 60000 /* max delay if bearer has links */ 43/* max delay if bearer has no links */
44#define TIPC_LINK_REQ_INACTIVE 0xffffffff /* indicates no timer in use */ 44#define TIPC_LINK_REQ_FAST msecs_to_jiffies(1000)
45 45/* max delay if bearer has links */
46#define TIPC_LINK_REQ_SLOW msecs_to_jiffies(60000)
47/* indicates no timer in use */
48#define TIPC_LINK_REQ_INACTIVE 0xffffffff
46 49
47/** 50/**
48 * struct tipc_link_req - information about an ongoing link setup request 51 * struct tipc_link_req - information about an ongoing link setup request
49 * @bearer_id: identity of bearer issuing requests 52 * @bearer_id: identity of bearer issuing requests
53 * @net: network namespace instance
50 * @dest: destination address for request messages 54 * @dest: destination address for request messages
51 * @domain: network domain to which links can be established 55 * @domain: network domain to which links can be established
52 * @num_nodes: number of nodes currently discovered (i.e. with an active link) 56 * @num_nodes: number of nodes currently discovered (i.e. with an active link)
@@ -58,31 +62,35 @@
58struct tipc_link_req { 62struct tipc_link_req {
59 u32 bearer_id; 63 u32 bearer_id;
60 struct tipc_media_addr dest; 64 struct tipc_media_addr dest;
65 struct net *net;
61 u32 domain; 66 u32 domain;
62 int num_nodes; 67 int num_nodes;
63 spinlock_t lock; 68 spinlock_t lock;
64 struct sk_buff *buf; 69 struct sk_buff *buf;
65 struct timer_list timer; 70 struct timer_list timer;
66 unsigned int timer_intv; 71 unsigned long timer_intv;
67}; 72};
68 73
69/** 74/**
70 * tipc_disc_init_msg - initialize a link setup message 75 * tipc_disc_init_msg - initialize a link setup message
76 * @net: the applicable net namespace
71 * @type: message type (request or response) 77 * @type: message type (request or response)
72 * @b_ptr: ptr to bearer issuing message 78 * @b_ptr: ptr to bearer issuing message
73 */ 79 */
74static void tipc_disc_init_msg(struct sk_buff *buf, u32 type, 80static void tipc_disc_init_msg(struct net *net, struct sk_buff *buf, u32 type,
75 struct tipc_bearer *b_ptr) 81 struct tipc_bearer *b_ptr)
76{ 82{
83 struct tipc_net *tn = net_generic(net, tipc_net_id);
77 struct tipc_msg *msg; 84 struct tipc_msg *msg;
78 u32 dest_domain = b_ptr->domain; 85 u32 dest_domain = b_ptr->domain;
79 86
80 msg = buf_msg(buf); 87 msg = buf_msg(buf);
81 tipc_msg_init(msg, LINK_CONFIG, type, INT_H_SIZE, dest_domain); 88 tipc_msg_init(tn->own_addr, msg, LINK_CONFIG, type,
89 INT_H_SIZE, dest_domain);
82 msg_set_non_seq(msg, 1); 90 msg_set_non_seq(msg, 1);
83 msg_set_node_sig(msg, tipc_random); 91 msg_set_node_sig(msg, tn->random);
84 msg_set_dest_domain(msg, dest_domain); 92 msg_set_dest_domain(msg, dest_domain);
85 msg_set_bc_netid(msg, tipc_net_id); 93 msg_set_bc_netid(msg, tn->net_id);
86 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr); 94 b_ptr->media->addr2msg(msg_media_addr(msg), &b_ptr->addr);
87} 95}
88 96
@@ -107,11 +115,14 @@ static void disc_dupl_alert(struct tipc_bearer *b_ptr, u32 node_addr,
107 115
108/** 116/**
109 * tipc_disc_rcv - handle incoming discovery message (request or response) 117 * tipc_disc_rcv - handle incoming discovery message (request or response)
118 * @net: the applicable net namespace
110 * @buf: buffer containing message 119 * @buf: buffer containing message
111 * @bearer: bearer that message arrived on 120 * @bearer: bearer that message arrived on
112 */ 121 */
113void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer) 122void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
123 struct tipc_bearer *bearer)
114{ 124{
125 struct tipc_net *tn = net_generic(net, tipc_net_id);
115 struct tipc_node *node; 126 struct tipc_node *node;
116 struct tipc_link *link; 127 struct tipc_link *link;
117 struct tipc_media_addr maddr; 128 struct tipc_media_addr maddr;
@@ -133,7 +144,7 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
133 kfree_skb(buf); 144 kfree_skb(buf);
134 145
135 /* Ensure message from node is valid and communication is permitted */ 146 /* Ensure message from node is valid and communication is permitted */
136 if (net_id != tipc_net_id) 147 if (net_id != tn->net_id)
137 return; 148 return;
138 if (maddr.broadcast) 149 if (maddr.broadcast)
139 return; 150 return;
@@ -142,23 +153,19 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
142 if (!tipc_addr_node_valid(onode)) 153 if (!tipc_addr_node_valid(onode))
143 return; 154 return;
144 155
145 if (in_own_node(onode)) { 156 if (in_own_node(net, onode)) {
146 if (memcmp(&maddr, &bearer->addr, sizeof(maddr))) 157 if (memcmp(&maddr, &bearer->addr, sizeof(maddr)))
147 disc_dupl_alert(bearer, tipc_own_addr, &maddr); 158 disc_dupl_alert(bearer, tn->own_addr, &maddr);
148 return; 159 return;
149 } 160 }
150 if (!tipc_in_scope(ddom, tipc_own_addr)) 161 if (!tipc_in_scope(ddom, tn->own_addr))
151 return; 162 return;
152 if (!tipc_in_scope(bearer->domain, onode)) 163 if (!tipc_in_scope(bearer->domain, onode))
153 return; 164 return;
154 165
155 /* Locate, or if necessary, create, node: */ 166 node = tipc_node_create(net, onode);
156 node = tipc_node_find(onode);
157 if (!node)
158 node = tipc_node_create(onode);
159 if (!node) 167 if (!node)
160 return; 168 return;
161
162 tipc_node_lock(node); 169 tipc_node_lock(node);
163 link = node->links[bearer->identity]; 170 link = node->links[bearer->identity];
164 171
@@ -244,8 +251,8 @@ void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *bearer)
244 if (respond && (mtyp == DSC_REQ_MSG)) { 251 if (respond && (mtyp == DSC_REQ_MSG)) {
245 rbuf = tipc_buf_acquire(INT_H_SIZE); 252 rbuf = tipc_buf_acquire(INT_H_SIZE);
246 if (rbuf) { 253 if (rbuf) {
247 tipc_disc_init_msg(rbuf, DSC_RESP_MSG, bearer); 254 tipc_disc_init_msg(net, rbuf, DSC_RESP_MSG, bearer);
248 tipc_bearer_send(bearer->identity, rbuf, &maddr); 255 tipc_bearer_send(net, bearer->identity, rbuf, &maddr);
249 kfree_skb(rbuf); 256 kfree_skb(rbuf);
250 } 257 }
251 } 258 }
@@ -265,7 +272,7 @@ static void disc_update(struct tipc_link_req *req)
265 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || 272 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) ||
266 (req->timer_intv > TIPC_LINK_REQ_FAST)) { 273 (req->timer_intv > TIPC_LINK_REQ_FAST)) {
267 req->timer_intv = TIPC_LINK_REQ_INIT; 274 req->timer_intv = TIPC_LINK_REQ_INIT;
268 k_start_timer(&req->timer, req->timer_intv); 275 mod_timer(&req->timer, jiffies + req->timer_intv);
269 } 276 }
270 } 277 }
271} 278}
@@ -295,12 +302,13 @@ void tipc_disc_remove_dest(struct tipc_link_req *req)
295 302
296/** 303/**
297 * disc_timeout - send a periodic link setup request 304 * disc_timeout - send a periodic link setup request
298 * @req: ptr to link request structure 305 * @data: ptr to link request structure
299 * 306 *
300 * Called whenever a link setup request timer associated with a bearer expires. 307 * Called whenever a link setup request timer associated with a bearer expires.
301 */ 308 */
302static void disc_timeout(struct tipc_link_req *req) 309static void disc_timeout(unsigned long data)
303{ 310{
311 struct tipc_link_req *req = (struct tipc_link_req *)data;
304 int max_delay; 312 int max_delay;
305 313
306 spin_lock_bh(&req->lock); 314 spin_lock_bh(&req->lock);
@@ -318,7 +326,7 @@ static void disc_timeout(struct tipc_link_req *req)
318 * hold at fast polling rate if don't have any associated nodes, 326 * hold at fast polling rate if don't have any associated nodes,
319 * otherwise hold at slow polling rate 327 * otherwise hold at slow polling rate
320 */ 328 */
321 tipc_bearer_send(req->bearer_id, req->buf, &req->dest); 329 tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest);
322 330
323 331
324 req->timer_intv *= 2; 332 req->timer_intv *= 2;
@@ -329,20 +337,22 @@ static void disc_timeout(struct tipc_link_req *req)
329 if (req->timer_intv > max_delay) 337 if (req->timer_intv > max_delay)
330 req->timer_intv = max_delay; 338 req->timer_intv = max_delay;
331 339
332 k_start_timer(&req->timer, req->timer_intv); 340 mod_timer(&req->timer, jiffies + req->timer_intv);
333exit: 341exit:
334 spin_unlock_bh(&req->lock); 342 spin_unlock_bh(&req->lock);
335} 343}
336 344
337/** 345/**
338 * tipc_disc_create - create object to send periodic link setup requests 346 * tipc_disc_create - create object to send periodic link setup requests
347 * @net: the applicable net namespace
339 * @b_ptr: ptr to bearer issuing requests 348 * @b_ptr: ptr to bearer issuing requests
340 * @dest: destination address for request messages 349 * @dest: destination address for request messages
341 * @dest_domain: network domain to which links can be established 350 * @dest_domain: network domain to which links can be established
342 * 351 *
343 * Returns 0 if successful, otherwise -errno. 352 * Returns 0 if successful, otherwise -errno.
344 */ 353 */
345int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest) 354int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
355 struct tipc_media_addr *dest)
346{ 356{
347 struct tipc_link_req *req; 357 struct tipc_link_req *req;
348 358
@@ -356,17 +366,18 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
356 return -ENOMEM; 366 return -ENOMEM;
357 } 367 }
358 368
359 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr); 369 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
360 memcpy(&req->dest, dest, sizeof(*dest)); 370 memcpy(&req->dest, dest, sizeof(*dest));
371 req->net = net;
361 req->bearer_id = b_ptr->identity; 372 req->bearer_id = b_ptr->identity;
362 req->domain = b_ptr->domain; 373 req->domain = b_ptr->domain;
363 req->num_nodes = 0; 374 req->num_nodes = 0;
364 req->timer_intv = TIPC_LINK_REQ_INIT; 375 req->timer_intv = TIPC_LINK_REQ_INIT;
365 spin_lock_init(&req->lock); 376 spin_lock_init(&req->lock);
366 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req); 377 setup_timer(&req->timer, disc_timeout, (unsigned long)req);
367 k_start_timer(&req->timer, req->timer_intv); 378 mod_timer(&req->timer, jiffies + req->timer_intv);
368 b_ptr->link_req = req; 379 b_ptr->link_req = req;
369 tipc_bearer_send(req->bearer_id, req->buf, &req->dest); 380 tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
370 return 0; 381 return 0;
371} 382}
372 383
@@ -376,28 +387,29 @@ int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest)
376 */ 387 */
377void tipc_disc_delete(struct tipc_link_req *req) 388void tipc_disc_delete(struct tipc_link_req *req)
378{ 389{
379 k_cancel_timer(&req->timer); 390 del_timer_sync(&req->timer);
380 k_term_timer(&req->timer);
381 kfree_skb(req->buf); 391 kfree_skb(req->buf);
382 kfree(req); 392 kfree(req);
383} 393}
384 394
385/** 395/**
386 * tipc_disc_reset - reset object to send periodic link setup requests 396 * tipc_disc_reset - reset object to send periodic link setup requests
397 * @net: the applicable net namespace
387 * @b_ptr: ptr to bearer issuing requests 398 * @b_ptr: ptr to bearer issuing requests
388 * @dest_domain: network domain to which links can be established 399 * @dest_domain: network domain to which links can be established
389 */ 400 */
390void tipc_disc_reset(struct tipc_bearer *b_ptr) 401void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr)
391{ 402{
392 struct tipc_link_req *req = b_ptr->link_req; 403 struct tipc_link_req *req = b_ptr->link_req;
393 404
394 spin_lock_bh(&req->lock); 405 spin_lock_bh(&req->lock);
395 tipc_disc_init_msg(req->buf, DSC_REQ_MSG, b_ptr); 406 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr);
407 req->net = net;
396 req->bearer_id = b_ptr->identity; 408 req->bearer_id = b_ptr->identity;
397 req->domain = b_ptr->domain; 409 req->domain = b_ptr->domain;
398 req->num_nodes = 0; 410 req->num_nodes = 0;
399 req->timer_intv = TIPC_LINK_REQ_INIT; 411 req->timer_intv = TIPC_LINK_REQ_INIT;
400 k_start_timer(&req->timer, req->timer_intv); 412 mod_timer(&req->timer, jiffies + req->timer_intv);
401 tipc_bearer_send(req->bearer_id, req->buf, &req->dest); 413 tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest);
402 spin_unlock_bh(&req->lock); 414 spin_unlock_bh(&req->lock);
403} 415}
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
index 515b57392f4d..c9b12770c5ed 100644
--- a/net/tipc/discover.h
+++ b/net/tipc/discover.h
@@ -39,11 +39,13 @@
39 39
40struct tipc_link_req; 40struct tipc_link_req;
41 41
42int tipc_disc_create(struct tipc_bearer *b_ptr, struct tipc_media_addr *dest); 42int tipc_disc_create(struct net *net, struct tipc_bearer *b_ptr,
43 struct tipc_media_addr *dest);
43void tipc_disc_delete(struct tipc_link_req *req); 44void tipc_disc_delete(struct tipc_link_req *req);
44void tipc_disc_reset(struct tipc_bearer *b_ptr); 45void tipc_disc_reset(struct net *net, struct tipc_bearer *b_ptr);
45void tipc_disc_add_dest(struct tipc_link_req *req); 46void tipc_disc_add_dest(struct tipc_link_req *req);
46void tipc_disc_remove_dest(struct tipc_link_req *req); 47void tipc_disc_remove_dest(struct tipc_link_req *req);
47void tipc_disc_rcv(struct sk_buff *buf, struct tipc_bearer *b_ptr); 48void tipc_disc_rcv(struct net *net, struct sk_buff *buf,
49 struct tipc_bearer *b_ptr);
48 50
49#endif 51#endif
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 23bcc1132365..a4cf364316de 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -40,7 +40,6 @@
40#include "socket.h" 40#include "socket.h"
41#include "name_distr.h" 41#include "name_distr.h"
42#include "discover.h" 42#include "discover.h"
43#include "config.h"
44#include "netlink.h" 43#include "netlink.h"
45 44
46#include <linux/pkt_sched.h> 45#include <linux/pkt_sched.h>
@@ -101,19 +100,20 @@ static const struct nla_policy tipc_nl_prop_policy[TIPC_NLA_PROP_MAX + 1] = {
101 */ 100 */
102#define START_CHANGEOVER 100000u 101#define START_CHANGEOVER 100000u
103 102
104static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr, 103static void link_handle_out_of_seq_msg(struct tipc_link *link,
105 struct sk_buff *buf); 104 struct sk_buff *skb);
106static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf); 105static void tipc_link_proto_rcv(struct tipc_link *link,
107static int tipc_link_tunnel_rcv(struct tipc_node *n_ptr, 106 struct sk_buff *skb);
108 struct sk_buff **buf); 107static int tipc_link_tunnel_rcv(struct tipc_node *node,
109static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance); 108 struct sk_buff **skb);
109static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol);
110static void link_state_event(struct tipc_link *l_ptr, u32 event); 110static void link_state_event(struct tipc_link *l_ptr, u32 event);
111static void link_reset_statistics(struct tipc_link *l_ptr); 111static void link_reset_statistics(struct tipc_link *l_ptr);
112static void link_print(struct tipc_link *l_ptr, const char *str); 112static void link_print(struct tipc_link *l_ptr, const char *str);
113static void tipc_link_sync_xmit(struct tipc_link *l); 113static void tipc_link_sync_xmit(struct tipc_link *l);
114static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf); 114static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
115static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf); 115static void tipc_link_input(struct tipc_link *l, struct sk_buff *skb);
116static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf); 116static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb);
117 117
118/* 118/*
119 * Simple link routines 119 * Simple link routines
@@ -123,13 +123,30 @@ static unsigned int align(unsigned int i)
123 return (i + 3) & ~3u; 123 return (i + 3) & ~3u;
124} 124}
125 125
126static void tipc_link_release(struct kref *kref)
127{
128 kfree(container_of(kref, struct tipc_link, ref));
129}
130
131static void tipc_link_get(struct tipc_link *l_ptr)
132{
133 kref_get(&l_ptr->ref);
134}
135
136static void tipc_link_put(struct tipc_link *l_ptr)
137{
138 kref_put(&l_ptr->ref, tipc_link_release);
139}
140
126static void link_init_max_pkt(struct tipc_link *l_ptr) 141static void link_init_max_pkt(struct tipc_link *l_ptr)
127{ 142{
143 struct tipc_node *node = l_ptr->owner;
144 struct tipc_net *tn = net_generic(node->net, tipc_net_id);
128 struct tipc_bearer *b_ptr; 145 struct tipc_bearer *b_ptr;
129 u32 max_pkt; 146 u32 max_pkt;
130 147
131 rcu_read_lock(); 148 rcu_read_lock();
132 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]); 149 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
133 if (!b_ptr) { 150 if (!b_ptr) {
134 rcu_read_unlock(); 151 rcu_read_unlock();
135 return; 152 return;
@@ -169,8 +186,9 @@ int tipc_link_is_active(struct tipc_link *l_ptr)
169 * link_timeout - handle expiration of link timer 186 * link_timeout - handle expiration of link timer
170 * @l_ptr: pointer to link 187 * @l_ptr: pointer to link
171 */ 188 */
172static void link_timeout(struct tipc_link *l_ptr) 189static void link_timeout(unsigned long data)
173{ 190{
191 struct tipc_link *l_ptr = (struct tipc_link *)data;
174 struct sk_buff *skb; 192 struct sk_buff *skb;
175 193
176 tipc_node_lock(l_ptr->owner); 194 tipc_node_lock(l_ptr->owner);
@@ -215,11 +233,13 @@ static void link_timeout(struct tipc_link *l_ptr)
215 tipc_link_push_packets(l_ptr); 233 tipc_link_push_packets(l_ptr);
216 234
217 tipc_node_unlock(l_ptr->owner); 235 tipc_node_unlock(l_ptr->owner);
236 tipc_link_put(l_ptr);
218} 237}
219 238
220static void link_set_timer(struct tipc_link *l_ptr, u32 time) 239static void link_set_timer(struct tipc_link *link, unsigned long time)
221{ 240{
222 k_start_timer(&l_ptr->timer, time); 241 if (!mod_timer(&link->timer, jiffies + time))
242 tipc_link_get(link);
223} 243}
224 244
225/** 245/**
@@ -234,6 +254,7 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
234 struct tipc_bearer *b_ptr, 254 struct tipc_bearer *b_ptr,
235 const struct tipc_media_addr *media_addr) 255 const struct tipc_media_addr *media_addr)
236{ 256{
257 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
237 struct tipc_link *l_ptr; 258 struct tipc_link *l_ptr;
238 struct tipc_msg *msg; 259 struct tipc_msg *msg;
239 char *if_name; 260 char *if_name;
@@ -259,12 +280,12 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
259 pr_warn("Link creation failed, no memory\n"); 280 pr_warn("Link creation failed, no memory\n");
260 return NULL; 281 return NULL;
261 } 282 }
262 283 kref_init(&l_ptr->ref);
263 l_ptr->addr = peer; 284 l_ptr->addr = peer;
264 if_name = strchr(b_ptr->name, ':') + 1; 285 if_name = strchr(b_ptr->name, ':') + 1;
265 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown", 286 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
266 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr), 287 tipc_zone(tn->own_addr), tipc_cluster(tn->own_addr),
267 tipc_node(tipc_own_addr), 288 tipc_node(tn->own_addr),
268 if_name, 289 if_name,
269 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer)); 290 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
270 /* note: peer i/f name is updated by reset/activate message */ 291 /* note: peer i/f name is updated by reset/activate message */
@@ -278,9 +299,10 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
278 299
279 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg; 300 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
280 msg = l_ptr->pmsg; 301 msg = l_ptr->pmsg;
281 tipc_msg_init(msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE, l_ptr->addr); 302 tipc_msg_init(tn->own_addr, msg, LINK_PROTOCOL, RESET_MSG, INT_H_SIZE,
303 l_ptr->addr);
282 msg_set_size(msg, sizeof(l_ptr->proto_msg)); 304 msg_set_size(msg, sizeof(l_ptr->proto_msg));
283 msg_set_session(msg, (tipc_random & 0xffff)); 305 msg_set_session(msg, (tn->random & 0xffff));
284 msg_set_bearer_id(msg, b_ptr->identity); 306 msg_set_bearer_id(msg, b_ptr->identity);
285 strcpy((char *)msg_data(msg), if_name); 307 strcpy((char *)msg_data(msg), if_name);
286 308
@@ -293,48 +315,52 @@ struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
293 l_ptr->next_out_no = 1; 315 l_ptr->next_out_no = 1;
294 __skb_queue_head_init(&l_ptr->outqueue); 316 __skb_queue_head_init(&l_ptr->outqueue);
295 __skb_queue_head_init(&l_ptr->deferred_queue); 317 __skb_queue_head_init(&l_ptr->deferred_queue);
296 skb_queue_head_init(&l_ptr->waiting_sks); 318 skb_queue_head_init(&l_ptr->wakeupq);
297 319 skb_queue_head_init(&l_ptr->inputq);
320 skb_queue_head_init(&l_ptr->namedq);
298 link_reset_statistics(l_ptr); 321 link_reset_statistics(l_ptr);
299
300 tipc_node_attach_link(n_ptr, l_ptr); 322 tipc_node_attach_link(n_ptr, l_ptr);
301 323 setup_timer(&l_ptr->timer, link_timeout, (unsigned long)l_ptr);
302 k_init_timer(&l_ptr->timer, (Handler)link_timeout,
303 (unsigned long)l_ptr);
304
305 link_state_event(l_ptr, STARTING_EVT); 324 link_state_event(l_ptr, STARTING_EVT);
306 325
307 return l_ptr; 326 return l_ptr;
308} 327}
309 328
310void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down) 329/**
330 * link_delete - Conditional deletion of link.
331 * If timer still running, real delete is done when it expires
332 * @link: link to be deleted
333 */
334void tipc_link_delete(struct tipc_link *link)
335{
336 tipc_link_reset_fragments(link);
337 tipc_node_detach_link(link->owner, link);
338 tipc_link_put(link);
339}
340
341void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
342 bool shutting_down)
311{ 343{
312 struct tipc_link *l_ptr; 344 struct tipc_net *tn = net_generic(net, tipc_net_id);
313 struct tipc_node *n_ptr; 345 struct tipc_link *link;
346 struct tipc_node *node;
314 347
315 rcu_read_lock(); 348 rcu_read_lock();
316 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 349 list_for_each_entry_rcu(node, &tn->node_list, list) {
317 tipc_node_lock(n_ptr); 350 tipc_node_lock(node);
318 l_ptr = n_ptr->links[bearer_id]; 351 link = node->links[bearer_id];
319 if (l_ptr) { 352 if (!link) {
320 tipc_link_reset(l_ptr); 353 tipc_node_unlock(node);
321 if (shutting_down || !tipc_node_is_up(n_ptr)) {
322 tipc_node_detach_link(l_ptr->owner, l_ptr);
323 tipc_link_reset_fragments(l_ptr);
324 tipc_node_unlock(n_ptr);
325
326 /* Nobody else can access this link now: */
327 del_timer_sync(&l_ptr->timer);
328 kfree(l_ptr);
329 } else {
330 /* Detach/delete when failover is finished: */
331 l_ptr->flags |= LINK_STOPPED;
332 tipc_node_unlock(n_ptr);
333 del_timer_sync(&l_ptr->timer);
334 }
335 continue; 354 continue;
336 } 355 }
337 tipc_node_unlock(n_ptr); 356 tipc_link_reset(link);
357 if (del_timer(&link->timer))
358 tipc_link_put(link);
359 link->flags |= LINK_STOPPED;
360 /* Delete link now, or when failover is finished: */
361 if (shutting_down || !tipc_node_is_up(node))
362 tipc_link_delete(link);
363 tipc_node_unlock(node);
338 } 364 }
339 rcu_read_unlock(); 365 rcu_read_unlock();
340} 366}
@@ -352,13 +378,14 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
352{ 378{
353 struct sk_buff *buf; 379 struct sk_buff *buf;
354 380
355 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0, tipc_own_addr, 381 buf = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
356 tipc_own_addr, oport, 0, 0); 382 link_own_addr(link), link_own_addr(link),
383 oport, 0, 0);
357 if (!buf) 384 if (!buf)
358 return false; 385 return false;
359 TIPC_SKB_CB(buf)->chain_sz = chain_sz; 386 TIPC_SKB_CB(buf)->chain_sz = chain_sz;
360 TIPC_SKB_CB(buf)->chain_imp = imp; 387 TIPC_SKB_CB(buf)->chain_imp = imp;
361 skb_queue_tail(&link->waiting_sks, buf); 388 skb_queue_tail(&link->wakeupq, buf);
362 link->stats.link_congs++; 389 link->stats.link_congs++;
363 return true; 390 return true;
364} 391}
@@ -369,17 +396,19 @@ static bool link_schedule_user(struct tipc_link *link, u32 oport,
369 * Move a number of waiting users, as permitted by available space in 396 * Move a number of waiting users, as permitted by available space in
370 * the send queue, from link wait queue to node wait queue for wakeup 397 * the send queue, from link wait queue to node wait queue for wakeup
371 */ 398 */
372static void link_prepare_wakeup(struct tipc_link *link) 399void link_prepare_wakeup(struct tipc_link *link)
373{ 400{
374 uint pend_qsz = skb_queue_len(&link->outqueue); 401 uint pend_qsz = skb_queue_len(&link->outqueue);
375 struct sk_buff *skb, *tmp; 402 struct sk_buff *skb, *tmp;
376 403
377 skb_queue_walk_safe(&link->waiting_sks, skb, tmp) { 404 skb_queue_walk_safe(&link->wakeupq, skb, tmp) {
378 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp]) 405 if (pend_qsz >= link->queue_limit[TIPC_SKB_CB(skb)->chain_imp])
379 break; 406 break;
380 pend_qsz += TIPC_SKB_CB(skb)->chain_sz; 407 pend_qsz += TIPC_SKB_CB(skb)->chain_sz;
381 skb_unlink(skb, &link->waiting_sks); 408 skb_unlink(skb, &link->wakeupq);
382 skb_queue_tail(&link->owner->waiting_sks, skb); 409 skb_queue_tail(&link->inputq, skb);
410 link->owner->inputq = &link->inputq;
411 link->owner->action_flags |= TIPC_MSG_EVT;
383 } 412 }
384} 413}
385 414
@@ -425,20 +454,20 @@ void tipc_link_reset(struct tipc_link *l_ptr)
425 return; 454 return;
426 455
427 tipc_node_link_down(l_ptr->owner, l_ptr); 456 tipc_node_link_down(l_ptr->owner, l_ptr);
428 tipc_bearer_remove_dest(l_ptr->bearer_id, l_ptr->addr); 457 tipc_bearer_remove_dest(owner->net, l_ptr->bearer_id, l_ptr->addr);
429 458
430 if (was_active_link && tipc_node_active_links(l_ptr->owner)) { 459 if (was_active_link && tipc_node_active_links(l_ptr->owner)) {
431 l_ptr->reset_checkpoint = checkpoint; 460 l_ptr->reset_checkpoint = checkpoint;
432 l_ptr->exp_msg_count = START_CHANGEOVER; 461 l_ptr->exp_msg_count = START_CHANGEOVER;
433 } 462 }
434 463
435 /* Clean up all queues: */ 464 /* Clean up all queues, except inputq: */
436 __skb_queue_purge(&l_ptr->outqueue); 465 __skb_queue_purge(&l_ptr->outqueue);
437 __skb_queue_purge(&l_ptr->deferred_queue); 466 __skb_queue_purge(&l_ptr->deferred_queue);
438 if (!skb_queue_empty(&l_ptr->waiting_sks)) { 467 skb_queue_splice_init(&l_ptr->wakeupq, &l_ptr->inputq);
439 skb_queue_splice_init(&l_ptr->waiting_sks, &owner->waiting_sks); 468 if (!skb_queue_empty(&l_ptr->inputq))
440 owner->action_flags |= TIPC_WAKEUP_USERS; 469 owner->action_flags |= TIPC_MSG_EVT;
441 } 470 owner->inputq = &l_ptr->inputq;
442 l_ptr->next_out = NULL; 471 l_ptr->next_out = NULL;
443 l_ptr->unacked_window = 0; 472 l_ptr->unacked_window = 0;
444 l_ptr->checkpoint = 1; 473 l_ptr->checkpoint = 1;
@@ -448,13 +477,14 @@ void tipc_link_reset(struct tipc_link *l_ptr)
448 link_reset_statistics(l_ptr); 477 link_reset_statistics(l_ptr);
449} 478}
450 479
451void tipc_link_reset_list(unsigned int bearer_id) 480void tipc_link_reset_list(struct net *net, unsigned int bearer_id)
452{ 481{
482 struct tipc_net *tn = net_generic(net, tipc_net_id);
453 struct tipc_link *l_ptr; 483 struct tipc_link *l_ptr;
454 struct tipc_node *n_ptr; 484 struct tipc_node *n_ptr;
455 485
456 rcu_read_lock(); 486 rcu_read_lock();
457 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 487 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
458 tipc_node_lock(n_ptr); 488 tipc_node_lock(n_ptr);
459 l_ptr = n_ptr->links[bearer_id]; 489 l_ptr = n_ptr->links[bearer_id];
460 if (l_ptr) 490 if (l_ptr)
@@ -464,11 +494,14 @@ void tipc_link_reset_list(unsigned int bearer_id)
464 rcu_read_unlock(); 494 rcu_read_unlock();
465} 495}
466 496
467static void link_activate(struct tipc_link *l_ptr) 497static void link_activate(struct tipc_link *link)
468{ 498{
469 l_ptr->next_in_no = l_ptr->stats.recv_info = 1; 499 struct tipc_node *node = link->owner;
470 tipc_node_link_up(l_ptr->owner, l_ptr); 500
471 tipc_bearer_add_dest(l_ptr->bearer_id, l_ptr->addr); 501 link->next_in_no = 1;
502 link->stats.recv_info = 1;
503 tipc_node_link_up(node, link);
504 tipc_bearer_add_dest(node->net, link->bearer_id, link->addr);
472} 505}
473 506
474/** 507/**
@@ -479,7 +512,7 @@ static void link_activate(struct tipc_link *l_ptr)
479static void link_state_event(struct tipc_link *l_ptr, unsigned int event) 512static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
480{ 513{
481 struct tipc_link *other; 514 struct tipc_link *other;
482 u32 cont_intv = l_ptr->continuity_interval; 515 unsigned long cont_intv = l_ptr->cont_intv;
483 516
484 if (l_ptr->flags & LINK_STOPPED) 517 if (l_ptr->flags & LINK_STOPPED)
485 return; 518 return;
@@ -522,8 +555,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
522 link_set_timer(l_ptr, cont_intv / 4); 555 link_set_timer(l_ptr, cont_intv / 4);
523 break; 556 break;
524 case RESET_MSG: 557 case RESET_MSG:
525 pr_info("%s<%s>, requested by peer\n", link_rst_msg, 558 pr_debug("%s<%s>, requested by peer\n",
526 l_ptr->name); 559 link_rst_msg, l_ptr->name);
527 tipc_link_reset(l_ptr); 560 tipc_link_reset(l_ptr);
528 l_ptr->state = RESET_RESET; 561 l_ptr->state = RESET_RESET;
529 l_ptr->fsm_msg_cnt = 0; 562 l_ptr->fsm_msg_cnt = 0;
@@ -533,7 +566,7 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
533 link_set_timer(l_ptr, cont_intv); 566 link_set_timer(l_ptr, cont_intv);
534 break; 567 break;
535 default: 568 default:
536 pr_err("%s%u in WW state\n", link_unk_evt, event); 569 pr_debug("%s%u in WW state\n", link_unk_evt, event);
537 } 570 }
538 break; 571 break;
539 case WORKING_UNKNOWN: 572 case WORKING_UNKNOWN:
@@ -545,8 +578,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
545 link_set_timer(l_ptr, cont_intv); 578 link_set_timer(l_ptr, cont_intv);
546 break; 579 break;
547 case RESET_MSG: 580 case RESET_MSG:
548 pr_info("%s<%s>, requested by peer while probing\n", 581 pr_debug("%s<%s>, requested by peer while probing\n",
549 link_rst_msg, l_ptr->name); 582 link_rst_msg, l_ptr->name);
550 tipc_link_reset(l_ptr); 583 tipc_link_reset(l_ptr);
551 l_ptr->state = RESET_RESET; 584 l_ptr->state = RESET_RESET;
552 l_ptr->fsm_msg_cnt = 0; 585 l_ptr->fsm_msg_cnt = 0;
@@ -572,8 +605,8 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
572 l_ptr->fsm_msg_cnt++; 605 l_ptr->fsm_msg_cnt++;
573 link_set_timer(l_ptr, cont_intv / 4); 606 link_set_timer(l_ptr, cont_intv / 4);
574 } else { /* Link has failed */ 607 } else { /* Link has failed */
575 pr_warn("%s<%s>, peer not responding\n", 608 pr_debug("%s<%s>, peer not responding\n",
576 link_rst_msg, l_ptr->name); 609 link_rst_msg, l_ptr->name);
577 tipc_link_reset(l_ptr); 610 tipc_link_reset(l_ptr);
578 l_ptr->state = RESET_UNKNOWN; 611 l_ptr->state = RESET_UNKNOWN;
579 l_ptr->fsm_msg_cnt = 0; 612 l_ptr->fsm_msg_cnt = 0;
@@ -614,7 +647,9 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
614 break; 647 break;
615 case STARTING_EVT: 648 case STARTING_EVT:
616 l_ptr->flags |= LINK_STARTED; 649 l_ptr->flags |= LINK_STARTED;
617 /* fall through */ 650 l_ptr->fsm_msg_cnt++;
651 link_set_timer(l_ptr, cont_intv);
652 break;
618 case TIMEOUT_EVT: 653 case TIMEOUT_EVT:
619 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0); 654 tipc_link_proto_xmit(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
620 l_ptr->fsm_msg_cnt++; 655 l_ptr->fsm_msg_cnt++;
@@ -700,7 +735,8 @@ drop:
700 * Only the socket functions tipc_send_stream() and tipc_send_packet() need 735 * Only the socket functions tipc_send_stream() and tipc_send_packet() need
701 * to act on the return value, since they may need to do more send attempts. 736 * to act on the return value, since they may need to do more send attempts.
702 */ 737 */
703int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list) 738int __tipc_link_xmit(struct net *net, struct tipc_link *link,
739 struct sk_buff_head *list)
704{ 740{
705 struct tipc_msg *msg = buf_msg(skb_peek(list)); 741 struct tipc_msg *msg = buf_msg(skb_peek(list));
706 uint psz = msg_size(msg); 742 uint psz = msg_size(msg);
@@ -733,7 +769,8 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
733 769
734 if (skb_queue_len(outqueue) < sndlim) { 770 if (skb_queue_len(outqueue) < sndlim) {
735 __skb_queue_tail(outqueue, skb); 771 __skb_queue_tail(outqueue, skb);
736 tipc_bearer_send(link->bearer_id, skb, addr); 772 tipc_bearer_send(net, link->bearer_id,
773 skb, addr);
737 link->next_out = NULL; 774 link->next_out = NULL;
738 link->unacked_window = 0; 775 link->unacked_window = 0;
739 } else if (tipc_msg_bundle(outqueue, skb, mtu)) { 776 } else if (tipc_msg_bundle(outqueue, skb, mtu)) {
@@ -758,7 +795,7 @@ int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list)
758 795
759static void skb2list(struct sk_buff *skb, struct sk_buff_head *list) 796static void skb2list(struct sk_buff *skb, struct sk_buff_head *list)
760{ 797{
761 __skb_queue_head_init(list); 798 skb_queue_head_init(list);
762 __skb_queue_tail(list, skb); 799 __skb_queue_tail(list, skb);
763} 800}
764 801
@@ -767,19 +804,21 @@ static int __tipc_link_xmit_skb(struct tipc_link *link, struct sk_buff *skb)
767 struct sk_buff_head head; 804 struct sk_buff_head head;
768 805
769 skb2list(skb, &head); 806 skb2list(skb, &head);
770 return __tipc_link_xmit(link, &head); 807 return __tipc_link_xmit(link->owner->net, link, &head);
771} 808}
772 809
773int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector) 810int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dnode,
811 u32 selector)
774{ 812{
775 struct sk_buff_head head; 813 struct sk_buff_head head;
776 814
777 skb2list(skb, &head); 815 skb2list(skb, &head);
778 return tipc_link_xmit(&head, dnode, selector); 816 return tipc_link_xmit(net, &head, dnode, selector);
779} 817}
780 818
781/** 819/**
782 * tipc_link_xmit() is the general link level function for message sending 820 * tipc_link_xmit() is the general link level function for message sending
821 * @net: the applicable net namespace
783 * @list: chain of buffers containing message 822 * @list: chain of buffers containing message
784 * @dsz: amount of user data to be sent 823 * @dsz: amount of user data to be sent
785 * @dnode: address of destination node 824 * @dnode: address of destination node
@@ -787,33 +826,28 @@ int tipc_link_xmit_skb(struct sk_buff *skb, u32 dnode, u32 selector)
787 * Consumes the buffer chain, except when returning -ELINKCONG 826 * Consumes the buffer chain, except when returning -ELINKCONG
788 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE 827 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
789 */ 828 */
790int tipc_link_xmit(struct sk_buff_head *list, u32 dnode, u32 selector) 829int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dnode,
830 u32 selector)
791{ 831{
792 struct tipc_link *link = NULL; 832 struct tipc_link *link = NULL;
793 struct tipc_node *node; 833 struct tipc_node *node;
794 int rc = -EHOSTUNREACH; 834 int rc = -EHOSTUNREACH;
795 835
796 node = tipc_node_find(dnode); 836 node = tipc_node_find(net, dnode);
797 if (node) { 837 if (node) {
798 tipc_node_lock(node); 838 tipc_node_lock(node);
799 link = node->active_links[selector & 1]; 839 link = node->active_links[selector & 1];
800 if (link) 840 if (link)
801 rc = __tipc_link_xmit(link, list); 841 rc = __tipc_link_xmit(net, link, list);
802 tipc_node_unlock(node); 842 tipc_node_unlock(node);
803 } 843 }
804
805 if (link) 844 if (link)
806 return rc; 845 return rc;
807 846
808 if (likely(in_own_node(dnode))) { 847 if (likely(in_own_node(net, dnode)))
809 /* As a node local message chain never contains more than one 848 return tipc_sk_rcv(net, list);
810 * buffer, we just need to dequeue one SKB buffer from the
811 * head list.
812 */
813 return tipc_sk_rcv(__skb_dequeue(list));
814 }
815 __skb_queue_purge(list);
816 849
850 __skb_queue_purge(list);
817 return rc; 851 return rc;
818} 852}
819 853
@@ -835,7 +869,8 @@ static void tipc_link_sync_xmit(struct tipc_link *link)
835 return; 869 return;
836 870
837 msg = buf_msg(skb); 871 msg = buf_msg(skb);
838 tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr); 872 tipc_msg_init(link_own_addr(link), msg, BCAST_PROTOCOL, STATE_MSG,
873 INT_H_SIZE, link->addr);
839 msg_set_last_bcast(msg, link->owner->bclink.acked); 874 msg_set_last_bcast(msg, link->owner->bclink.acked);
840 __tipc_link_xmit_skb(link, skb); 875 __tipc_link_xmit_skb(link, skb);
841} 876}
@@ -890,7 +925,8 @@ void tipc_link_push_packets(struct tipc_link *l_ptr)
890 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 925 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
891 if (msg_user(msg) == MSG_BUNDLER) 926 if (msg_user(msg) == MSG_BUNDLER)
892 TIPC_SKB_CB(skb)->bundling = false; 927 TIPC_SKB_CB(skb)->bundling = false;
893 tipc_bearer_send(l_ptr->bearer_id, skb, 928 tipc_bearer_send(l_ptr->owner->net,
929 l_ptr->bearer_id, skb,
894 &l_ptr->media_addr); 930 &l_ptr->media_addr);
895 l_ptr->next_out = tipc_skb_queue_next(outqueue, skb); 931 l_ptr->next_out = tipc_skb_queue_next(outqueue, skb);
896 } else { 932 } else {
@@ -923,6 +959,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
923 struct sk_buff *buf) 959 struct sk_buff *buf)
924{ 960{
925 struct tipc_msg *msg = buf_msg(buf); 961 struct tipc_msg *msg = buf_msg(buf);
962 struct net *net = l_ptr->owner->net;
926 963
927 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name); 964 pr_warn("Retransmission failure on link <%s>\n", l_ptr->name);
928 965
@@ -940,7 +977,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
940 pr_cont("Outstanding acks: %lu\n", 977 pr_cont("Outstanding acks: %lu\n",
941 (unsigned long) TIPC_SKB_CB(buf)->handle); 978 (unsigned long) TIPC_SKB_CB(buf)->handle);
942 979
943 n_ptr = tipc_bclink_retransmit_to(); 980 n_ptr = tipc_bclink_retransmit_to(net);
944 tipc_node_lock(n_ptr); 981 tipc_node_lock(n_ptr);
945 982
946 tipc_addr_string_fill(addr_string, n_ptr->addr); 983 tipc_addr_string_fill(addr_string, n_ptr->addr);
@@ -955,7 +992,7 @@ static void link_retransmit_failure(struct tipc_link *l_ptr,
955 992
956 tipc_node_unlock(n_ptr); 993 tipc_node_unlock(n_ptr);
957 994
958 tipc_bclink_set_flags(TIPC_BCLINK_RESET); 995 tipc_bclink_set_flags(net, TIPC_BCLINK_RESET);
959 l_ptr->stale_count = 0; 996 l_ptr->stale_count = 0;
960 } 997 }
961} 998}
@@ -987,7 +1024,8 @@ void tipc_link_retransmit(struct tipc_link *l_ptr, struct sk_buff *skb,
987 msg = buf_msg(skb); 1024 msg = buf_msg(skb);
988 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); 1025 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
989 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1026 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
990 tipc_bearer_send(l_ptr->bearer_id, skb, &l_ptr->media_addr); 1027 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, skb,
1028 &l_ptr->media_addr);
991 retransmits--; 1029 retransmits--;
992 l_ptr->stats.retransmitted++; 1030 l_ptr->stats.retransmitted++;
993 } 1031 }
@@ -1063,14 +1101,16 @@ static int link_recv_buf_validate(struct sk_buff *buf)
1063 1101
1064/** 1102/**
1065 * tipc_rcv - process TIPC packets/messages arriving from off-node 1103 * tipc_rcv - process TIPC packets/messages arriving from off-node
1104 * @net: the applicable net namespace
1066 * @skb: TIPC packet 1105 * @skb: TIPC packet
1067 * @b_ptr: pointer to bearer message arrived on 1106 * @b_ptr: pointer to bearer message arrived on
1068 * 1107 *
1069 * Invoked with no locks held. Bearer pointer must point to a valid bearer 1108 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1070 * structure (i.e. cannot be NULL), but bearer can be inactive. 1109 * structure (i.e. cannot be NULL), but bearer can be inactive.
1071 */ 1110 */
1072void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr) 1111void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
1073{ 1112{
1113 struct tipc_net *tn = net_generic(net, tipc_net_id);
1074 struct sk_buff_head head; 1114 struct sk_buff_head head;
1075 struct tipc_node *n_ptr; 1115 struct tipc_node *n_ptr;
1076 struct tipc_link *l_ptr; 1116 struct tipc_link *l_ptr;
@@ -1096,19 +1136,19 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1096 1136
1097 if (unlikely(msg_non_seq(msg))) { 1137 if (unlikely(msg_non_seq(msg))) {
1098 if (msg_user(msg) == LINK_CONFIG) 1138 if (msg_user(msg) == LINK_CONFIG)
1099 tipc_disc_rcv(skb, b_ptr); 1139 tipc_disc_rcv(net, skb, b_ptr);
1100 else 1140 else
1101 tipc_bclink_rcv(skb); 1141 tipc_bclink_rcv(net, skb);
1102 continue; 1142 continue;
1103 } 1143 }
1104 1144
1105 /* Discard unicast link messages destined for another node */ 1145 /* Discard unicast link messages destined for another node */
1106 if (unlikely(!msg_short(msg) && 1146 if (unlikely(!msg_short(msg) &&
1107 (msg_destnode(msg) != tipc_own_addr))) 1147 (msg_destnode(msg) != tn->own_addr)))
1108 goto discard; 1148 goto discard;
1109 1149
1110 /* Locate neighboring node that sent message */ 1150 /* Locate neighboring node that sent message */
1111 n_ptr = tipc_node_find(msg_prevnode(msg)); 1151 n_ptr = tipc_node_find(net, msg_prevnode(msg));
1112 if (unlikely(!n_ptr)) 1152 if (unlikely(!n_ptr))
1113 goto discard; 1153 goto discard;
1114 tipc_node_lock(n_ptr); 1154 tipc_node_lock(n_ptr);
@@ -1116,7 +1156,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1116 /* Locate unicast link endpoint that should handle message */ 1156 /* Locate unicast link endpoint that should handle message */
1117 l_ptr = n_ptr->links[b_ptr->identity]; 1157 l_ptr = n_ptr->links[b_ptr->identity];
1118 if (unlikely(!l_ptr)) 1158 if (unlikely(!l_ptr))
1119 goto unlock_discard; 1159 goto unlock;
1120 1160
1121 /* Verify that communication with node is currently allowed */ 1161 /* Verify that communication with node is currently allowed */
1122 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) && 1162 if ((n_ptr->action_flags & TIPC_WAIT_PEER_LINKS_DOWN) &&
@@ -1127,7 +1167,7 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1127 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN; 1167 n_ptr->action_flags &= ~TIPC_WAIT_PEER_LINKS_DOWN;
1128 1168
1129 if (tipc_node_blocked(n_ptr)) 1169 if (tipc_node_blocked(n_ptr))
1130 goto unlock_discard; 1170 goto unlock;
1131 1171
1132 /* Validate message sequence number info */ 1172 /* Validate message sequence number info */
1133 seq_no = msg_seqno(msg); 1173 seq_no = msg_seqno(msg);
@@ -1151,18 +1191,16 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1151 if (unlikely(l_ptr->next_out)) 1191 if (unlikely(l_ptr->next_out))
1152 tipc_link_push_packets(l_ptr); 1192 tipc_link_push_packets(l_ptr);
1153 1193
1154 if (released && !skb_queue_empty(&l_ptr->waiting_sks)) { 1194 if (released && !skb_queue_empty(&l_ptr->wakeupq))
1155 link_prepare_wakeup(l_ptr); 1195 link_prepare_wakeup(l_ptr);
1156 l_ptr->owner->action_flags |= TIPC_WAKEUP_USERS;
1157 }
1158 1196
1159 /* Process the incoming packet */ 1197 /* Process the incoming packet */
1160 if (unlikely(!link_working_working(l_ptr))) { 1198 if (unlikely(!link_working_working(l_ptr))) {
1161 if (msg_user(msg) == LINK_PROTOCOL) { 1199 if (msg_user(msg) == LINK_PROTOCOL) {
1162 tipc_link_proto_rcv(l_ptr, skb); 1200 tipc_link_proto_rcv(l_ptr, skb);
1163 link_retrieve_defq(l_ptr, &head); 1201 link_retrieve_defq(l_ptr, &head);
1164 tipc_node_unlock(n_ptr); 1202 skb = NULL;
1165 continue; 1203 goto unlock;
1166 } 1204 }
1167 1205
1168 /* Traffic message. Conditionally activate link */ 1206 /* Traffic message. Conditionally activate link */
@@ -1171,18 +1209,18 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1171 if (link_working_working(l_ptr)) { 1209 if (link_working_working(l_ptr)) {
1172 /* Re-insert buffer in front of queue */ 1210 /* Re-insert buffer in front of queue */
1173 __skb_queue_head(&head, skb); 1211 __skb_queue_head(&head, skb);
1174 tipc_node_unlock(n_ptr); 1212 skb = NULL;
1175 continue; 1213 goto unlock;
1176 } 1214 }
1177 goto unlock_discard; 1215 goto unlock;
1178 } 1216 }
1179 1217
1180 /* Link is now in state WORKING_WORKING */ 1218 /* Link is now in state WORKING_WORKING */
1181 if (unlikely(seq_no != mod(l_ptr->next_in_no))) { 1219 if (unlikely(seq_no != mod(l_ptr->next_in_no))) {
1182 link_handle_out_of_seq_msg(l_ptr, skb); 1220 link_handle_out_of_seq_msg(l_ptr, skb);
1183 link_retrieve_defq(l_ptr, &head); 1221 link_retrieve_defq(l_ptr, &head);
1184 tipc_node_unlock(n_ptr); 1222 skb = NULL;
1185 continue; 1223 goto unlock;
1186 } 1224 }
1187 l_ptr->next_in_no++; 1225 l_ptr->next_in_no++;
1188 if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue))) 1226 if (unlikely(!skb_queue_empty(&l_ptr->deferred_queue)))
@@ -1192,95 +1230,102 @@ void tipc_rcv(struct sk_buff *skb, struct tipc_bearer *b_ptr)
1192 l_ptr->stats.sent_acks++; 1230 l_ptr->stats.sent_acks++;
1193 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0); 1231 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1194 } 1232 }
1195 1233 tipc_link_input(l_ptr, skb);
1196 if (tipc_link_prepare_input(l_ptr, &skb)) { 1234 skb = NULL;
1197 tipc_node_unlock(n_ptr); 1235unlock:
1198 continue;
1199 }
1200 tipc_node_unlock(n_ptr);
1201
1202 if (tipc_link_input(l_ptr, skb) != 0)
1203 goto discard;
1204 continue;
1205unlock_discard:
1206 tipc_node_unlock(n_ptr); 1236 tipc_node_unlock(n_ptr);
1207discard: 1237discard:
1208 kfree_skb(skb); 1238 if (unlikely(skb))
1239 kfree_skb(skb);
1209 } 1240 }
1210} 1241}
1211 1242
1212/** 1243/* tipc_data_input - deliver data and name distr msgs to upper layer
1213 * tipc_link_prepare_input - process TIPC link messages
1214 *
1215 * returns nonzero if the message was consumed
1216 * 1244 *
1245 * Consumes buffer if message is of right type
1217 * Node lock must be held 1246 * Node lock must be held
1218 */ 1247 */
1219static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf) 1248static bool tipc_data_input(struct tipc_link *link, struct sk_buff *skb)
1220{ 1249{
1221 struct tipc_node *n; 1250 struct tipc_node *node = link->owner;
1222 struct tipc_msg *msg; 1251 struct tipc_msg *msg = buf_msg(skb);
1223 int res = -EINVAL; 1252 u32 dport = msg_destport(msg);
1224 1253
1225 n = l->owner;
1226 msg = buf_msg(*buf);
1227 switch (msg_user(msg)) { 1254 switch (msg_user(msg)) {
1228 case CHANGEOVER_PROTOCOL: 1255 case TIPC_LOW_IMPORTANCE:
1229 if (tipc_link_tunnel_rcv(n, buf)) 1256 case TIPC_MEDIUM_IMPORTANCE:
1230 res = 0; 1257 case TIPC_HIGH_IMPORTANCE:
1231 break; 1258 case TIPC_CRITICAL_IMPORTANCE:
1232 case MSG_FRAGMENTER: 1259 case CONN_MANAGER:
1233 l->stats.recv_fragments++; 1260 if (tipc_skb_queue_tail(&link->inputq, skb, dport)) {
1234 if (tipc_buf_append(&l->reasm_buf, buf)) { 1261 node->inputq = &link->inputq;
1235 l->stats.recv_fragmented++; 1262 node->action_flags |= TIPC_MSG_EVT;
1236 res = 0;
1237 } else if (!l->reasm_buf) {
1238 tipc_link_reset(l);
1239 } 1263 }
1240 break; 1264 return true;
1241 case MSG_BUNDLER:
1242 l->stats.recv_bundles++;
1243 l->stats.recv_bundled += msg_msgcnt(msg);
1244 res = 0;
1245 break;
1246 case NAME_DISTRIBUTOR: 1265 case NAME_DISTRIBUTOR:
1247 n->bclink.recv_permitted = true; 1266 node->bclink.recv_permitted = true;
1248 res = 0; 1267 node->namedq = &link->namedq;
1249 break; 1268 skb_queue_tail(&link->namedq, skb);
1269 if (skb_queue_len(&link->namedq) == 1)
1270 node->action_flags |= TIPC_NAMED_MSG_EVT;
1271 return true;
1272 case MSG_BUNDLER:
1273 case CHANGEOVER_PROTOCOL:
1274 case MSG_FRAGMENTER:
1250 case BCAST_PROTOCOL: 1275 case BCAST_PROTOCOL:
1251 tipc_link_sync_rcv(n, *buf); 1276 return false;
1252 break;
1253 default: 1277 default:
1254 res = 0; 1278 pr_warn("Dropping received illegal msg type\n");
1255 } 1279 kfree_skb(skb);
1256 return res; 1280 return false;
1281 };
1257} 1282}
1258/** 1283
1259 * tipc_link_input - Deliver message too higher layers 1284/* tipc_link_input - process packet that has passed link protocol check
1285 *
1286 * Consumes buffer
1287 * Node lock must be held
1260 */ 1288 */
1261static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf) 1289static void tipc_link_input(struct tipc_link *link, struct sk_buff *skb)
1262{ 1290{
1263 struct tipc_msg *msg = buf_msg(buf); 1291 struct tipc_node *node = link->owner;
1264 int res = 0; 1292 struct tipc_msg *msg = buf_msg(skb);
1293 struct sk_buff *iskb;
1294 int pos = 0;
1295
1296 if (likely(tipc_data_input(link, skb)))
1297 return;
1265 1298
1266 switch (msg_user(msg)) { 1299 switch (msg_user(msg)) {
1267 case TIPC_LOW_IMPORTANCE: 1300 case CHANGEOVER_PROTOCOL:
1268 case TIPC_MEDIUM_IMPORTANCE: 1301 if (!tipc_link_tunnel_rcv(node, &skb))
1269 case TIPC_HIGH_IMPORTANCE: 1302 break;
1270 case TIPC_CRITICAL_IMPORTANCE: 1303 if (msg_user(buf_msg(skb)) != MSG_BUNDLER) {
1271 case CONN_MANAGER: 1304 tipc_data_input(link, skb);
1272 tipc_sk_rcv(buf); 1305 break;
1306 }
1307 case MSG_BUNDLER:
1308 link->stats.recv_bundles++;
1309 link->stats.recv_bundled += msg_msgcnt(msg);
1310
1311 while (tipc_msg_extract(skb, &iskb, &pos))
1312 tipc_data_input(link, iskb);
1273 break; 1313 break;
1274 case NAME_DISTRIBUTOR: 1314 case MSG_FRAGMENTER:
1275 tipc_named_rcv(buf); 1315 link->stats.recv_fragments++;
1316 if (tipc_buf_append(&link->reasm_buf, &skb)) {
1317 link->stats.recv_fragmented++;
1318 tipc_data_input(link, skb);
1319 } else if (!link->reasm_buf) {
1320 tipc_link_reset(link);
1321 }
1276 break; 1322 break;
1277 case MSG_BUNDLER: 1323 case BCAST_PROTOCOL:
1278 tipc_link_bundle_rcv(buf); 1324 tipc_link_sync_rcv(node, skb);
1279 break; 1325 break;
1280 default: 1326 default:
1281 res = -EINVAL; 1327 break;
1282 } 1328 };
1283 return res;
1284} 1329}
1285 1330
1286/** 1331/**
@@ -1381,7 +1426,7 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1381 msg_set_type(msg, msg_typ); 1426 msg_set_type(msg, msg_typ);
1382 msg_set_net_plane(msg, l_ptr->net_plane); 1427 msg_set_net_plane(msg, l_ptr->net_plane);
1383 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in); 1428 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1384 msg_set_last_bcast(msg, tipc_bclink_get_last_sent()); 1429 msg_set_last_bcast(msg, tipc_bclink_get_last_sent(l_ptr->owner->net));
1385 1430
1386 if (msg_typ == STATE_MSG) { 1431 if (msg_typ == STATE_MSG) {
1387 u32 next_sent = mod(l_ptr->next_out_no); 1432 u32 next_sent = mod(l_ptr->next_out_no);
@@ -1445,7 +1490,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1445 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg)); 1490 skb_copy_to_linear_data(buf, msg, sizeof(l_ptr->proto_msg));
1446 buf->priority = TC_PRIO_CONTROL; 1491 buf->priority = TC_PRIO_CONTROL;
1447 1492
1448 tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr); 1493 tipc_bearer_send(l_ptr->owner->net, l_ptr->bearer_id, buf,
1494 &l_ptr->media_addr);
1449 l_ptr->unacked_window = 0; 1495 l_ptr->unacked_window = 0;
1450 kfree_skb(buf); 1496 kfree_skb(buf);
1451} 1497}
@@ -1455,7 +1501,8 @@ void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int probe_msg,
1455 * Note that network plane id propagates through the network, and may 1501 * Note that network plane id propagates through the network, and may
1456 * change at any time. The node with lowest address rules 1502 * change at any time. The node with lowest address rules
1457 */ 1503 */
1458static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf) 1504static void tipc_link_proto_rcv(struct tipc_link *l_ptr,
1505 struct sk_buff *buf)
1459{ 1506{
1460 u32 rec_gap = 0; 1507 u32 rec_gap = 0;
1461 u32 max_pkt_info; 1508 u32 max_pkt_info;
@@ -1468,7 +1515,7 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1468 goto exit; 1515 goto exit;
1469 1516
1470 if (l_ptr->net_plane != msg_net_plane(msg)) 1517 if (l_ptr->net_plane != msg_net_plane(msg))
1471 if (tipc_own_addr > msg_prevnode(msg)) 1518 if (link_own_addr(l_ptr) > msg_prevnode(msg))
1472 l_ptr->net_plane = msg_net_plane(msg); 1519 l_ptr->net_plane = msg_net_plane(msg);
1473 1520
1474 switch (msg_type(msg)) { 1521 switch (msg_type(msg)) {
@@ -1535,9 +1582,9 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf)
1535 1582
1536 if (msg_linkprio(msg) && 1583 if (msg_linkprio(msg) &&
1537 (msg_linkprio(msg) != l_ptr->priority)) { 1584 (msg_linkprio(msg) != l_ptr->priority)) {
1538 pr_warn("%s<%s>, priority change %u->%u\n", 1585 pr_debug("%s<%s>, priority change %u->%u\n",
1539 link_rst_msg, l_ptr->name, l_ptr->priority, 1586 link_rst_msg, l_ptr->name,
1540 msg_linkprio(msg)); 1587 l_ptr->priority, msg_linkprio(msg));
1541 l_ptr->priority = msg_linkprio(msg); 1588 l_ptr->priority = msg_linkprio(msg);
1542 tipc_link_reset(l_ptr); /* Enforce change to take effect */ 1589 tipc_link_reset(l_ptr); /* Enforce change to take effect */
1543 break; 1590 break;
@@ -1636,8 +1683,8 @@ void tipc_link_failover_send_queue(struct tipc_link *l_ptr)
1636 if (!tunnel) 1683 if (!tunnel)
1637 return; 1684 return;
1638 1685
1639 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 1686 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1640 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr); 1687 ORIGINAL_MSG, INT_H_SIZE, l_ptr->addr);
1641 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1688 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1642 msg_set_msgcnt(&tunnel_hdr, msgcount); 1689 msg_set_msgcnt(&tunnel_hdr, msgcount);
1643 1690
@@ -1694,8 +1741,8 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1694 struct sk_buff *skb; 1741 struct sk_buff *skb;
1695 struct tipc_msg tunnel_hdr; 1742 struct tipc_msg tunnel_hdr;
1696 1743
1697 tipc_msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL, 1744 tipc_msg_init(link_own_addr(l_ptr), &tunnel_hdr, CHANGEOVER_PROTOCOL,
1698 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr); 1745 DUPLICATE_MSG, INT_H_SIZE, l_ptr->addr);
1699 msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue)); 1746 msg_set_msgcnt(&tunnel_hdr, skb_queue_len(&l_ptr->outqueue));
1700 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id); 1747 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
1701 skb_queue_walk(&l_ptr->outqueue, skb) { 1748 skb_queue_walk(&l_ptr->outqueue, skb) {
@@ -1729,7 +1776,7 @@ void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr,
1729 * @from_pos: offset to extract from 1776 * @from_pos: offset to extract from
1730 * 1777 *
1731 * Returns a new message buffer containing an embedded message. The 1778 * Returns a new message buffer containing an embedded message. The
1732 * encapsulating message itself is left unchanged. 1779 * encapsulating buffer is left unchanged.
1733 */ 1780 */
1734static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos) 1781static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1735{ 1782{
@@ -1743,8 +1790,6 @@ static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
1743 return eb; 1790 return eb;
1744} 1791}
1745 1792
1746
1747
1748/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet. 1793/* tipc_link_dup_rcv(): Receive a tunnelled DUPLICATE_MSG packet.
1749 * Owner node is locked. 1794 * Owner node is locked.
1750 */ 1795 */
@@ -1804,10 +1849,8 @@ static struct sk_buff *tipc_link_failover_rcv(struct tipc_link *l_ptr,
1804 } 1849 }
1805 } 1850 }
1806exit: 1851exit:
1807 if ((l_ptr->exp_msg_count == 0) && (l_ptr->flags & LINK_STOPPED)) { 1852 if ((!l_ptr->exp_msg_count) && (l_ptr->flags & LINK_STOPPED))
1808 tipc_node_detach_link(l_ptr->owner, l_ptr); 1853 tipc_link_delete(l_ptr);
1809 kfree(l_ptr);
1810 }
1811 return buf; 1854 return buf;
1812} 1855}
1813 1856
@@ -1845,50 +1888,16 @@ exit:
1845 return *buf != NULL; 1888 return *buf != NULL;
1846} 1889}
1847 1890
1848/* 1891static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tol)
1849 * Bundler functionality:
1850 */
1851void tipc_link_bundle_rcv(struct sk_buff *buf)
1852{ 1892{
1853 u32 msgcount = msg_msgcnt(buf_msg(buf)); 1893 unsigned long intv = ((tol / 4) > 500) ? 500 : tol / 4;
1854 u32 pos = INT_H_SIZE;
1855 struct sk_buff *obuf;
1856 struct tipc_msg *omsg;
1857
1858 while (msgcount--) {
1859 obuf = buf_extract(buf, pos);
1860 if (obuf == NULL) {
1861 pr_warn("Link unable to unbundle message(s)\n");
1862 break;
1863 }
1864 omsg = buf_msg(obuf);
1865 pos += align(msg_size(omsg));
1866 if (msg_isdata(omsg)) {
1867 if (unlikely(msg_type(omsg) == TIPC_MCAST_MSG))
1868 tipc_sk_mcast_rcv(obuf);
1869 else
1870 tipc_sk_rcv(obuf);
1871 } else if (msg_user(omsg) == CONN_MANAGER) {
1872 tipc_sk_rcv(obuf);
1873 } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
1874 tipc_named_rcv(obuf);
1875 } else {
1876 pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
1877 kfree_skb(obuf);
1878 }
1879 }
1880 kfree_skb(buf);
1881}
1882 1894
1883static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance) 1895 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1884{
1885 if ((tolerance < TIPC_MIN_LINK_TOL) || (tolerance > TIPC_MAX_LINK_TOL))
1886 return; 1896 return;
1887 1897
1888 l_ptr->tolerance = tolerance; 1898 l_ptr->tolerance = tol;
1889 l_ptr->continuity_interval = 1899 l_ptr->cont_intv = msecs_to_jiffies(intv);
1890 ((tolerance / 4) > 500) ? 500 : tolerance / 4; 1900 l_ptr->abort_limit = tol / (jiffies_to_msecs(l_ptr->cont_intv) / 4);
1891 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
1892} 1901}
1893 1902
1894void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window) 1903void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
@@ -1911,22 +1920,25 @@ void tipc_link_set_queue_limits(struct tipc_link *l_ptr, u32 window)
1911} 1920}
1912 1921
1913/* tipc_link_find_owner - locate owner node of link by link's name 1922/* tipc_link_find_owner - locate owner node of link by link's name
1923 * @net: the applicable net namespace
1914 * @name: pointer to link name string 1924 * @name: pointer to link name string
1915 * @bearer_id: pointer to index in 'node->links' array where the link was found. 1925 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1916 * 1926 *
1917 * Returns pointer to node owning the link, or 0 if no matching link is found. 1927 * Returns pointer to node owning the link, or 0 if no matching link is found.
1918 */ 1928 */
1919static struct tipc_node *tipc_link_find_owner(const char *link_name, 1929static struct tipc_node *tipc_link_find_owner(struct net *net,
1930 const char *link_name,
1920 unsigned int *bearer_id) 1931 unsigned int *bearer_id)
1921{ 1932{
1933 struct tipc_net *tn = net_generic(net, tipc_net_id);
1922 struct tipc_link *l_ptr; 1934 struct tipc_link *l_ptr;
1923 struct tipc_node *n_ptr; 1935 struct tipc_node *n_ptr;
1924 struct tipc_node *found_node = 0; 1936 struct tipc_node *found_node = NULL;
1925 int i; 1937 int i;
1926 1938
1927 *bearer_id = 0; 1939 *bearer_id = 0;
1928 rcu_read_lock(); 1940 rcu_read_lock();
1929 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) { 1941 list_for_each_entry_rcu(n_ptr, &tn->node_list, list) {
1930 tipc_node_lock(n_ptr); 1942 tipc_node_lock(n_ptr);
1931 for (i = 0; i < MAX_BEARERS; i++) { 1943 for (i = 0; i < MAX_BEARERS; i++) {
1932 l_ptr = n_ptr->links[i]; 1944 l_ptr = n_ptr->links[i];
@@ -1946,148 +1958,6 @@ static struct tipc_node *tipc_link_find_owner(const char *link_name,
1946} 1958}
1947 1959
1948/** 1960/**
1949 * link_value_is_valid -- validate proposed link tolerance/priority/window
1950 *
1951 * @cmd: value type (TIPC_CMD_SET_LINK_*)
1952 * @new_value: the new value
1953 *
1954 * Returns 1 if value is within range, 0 if not.
1955 */
1956static int link_value_is_valid(u16 cmd, u32 new_value)
1957{
1958 switch (cmd) {
1959 case TIPC_CMD_SET_LINK_TOL:
1960 return (new_value >= TIPC_MIN_LINK_TOL) &&
1961 (new_value <= TIPC_MAX_LINK_TOL);
1962 case TIPC_CMD_SET_LINK_PRI:
1963 return (new_value <= TIPC_MAX_LINK_PRI);
1964 case TIPC_CMD_SET_LINK_WINDOW:
1965 return (new_value >= TIPC_MIN_LINK_WIN) &&
1966 (new_value <= TIPC_MAX_LINK_WIN);
1967 }
1968 return 0;
1969}
1970
1971/**
1972 * link_cmd_set_value - change priority/tolerance/window for link/bearer/media
1973 * @name: ptr to link, bearer, or media name
1974 * @new_value: new value of link, bearer, or media setting
1975 * @cmd: which link, bearer, or media attribute to set (TIPC_CMD_SET_LINK_*)
1976 *
1977 * Caller must hold RTNL lock to ensure link/bearer/media is not deleted.
1978 *
1979 * Returns 0 if value updated and negative value on error.
1980 */
1981static int link_cmd_set_value(const char *name, u32 new_value, u16 cmd)
1982{
1983 struct tipc_node *node;
1984 struct tipc_link *l_ptr;
1985 struct tipc_bearer *b_ptr;
1986 struct tipc_media *m_ptr;
1987 int bearer_id;
1988 int res = 0;
1989
1990 node = tipc_link_find_owner(name, &bearer_id);
1991 if (node) {
1992 tipc_node_lock(node);
1993 l_ptr = node->links[bearer_id];
1994
1995 if (l_ptr) {
1996 switch (cmd) {
1997 case TIPC_CMD_SET_LINK_TOL:
1998 link_set_supervision_props(l_ptr, new_value);
1999 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2000 new_value, 0, 0);
2001 break;
2002 case TIPC_CMD_SET_LINK_PRI:
2003 l_ptr->priority = new_value;
2004 tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0,
2005 0, new_value, 0);
2006 break;
2007 case TIPC_CMD_SET_LINK_WINDOW:
2008 tipc_link_set_queue_limits(l_ptr, new_value);
2009 break;
2010 default:
2011 res = -EINVAL;
2012 break;
2013 }
2014 }
2015 tipc_node_unlock(node);
2016 return res;
2017 }
2018
2019 b_ptr = tipc_bearer_find(name);
2020 if (b_ptr) {
2021 switch (cmd) {
2022 case TIPC_CMD_SET_LINK_TOL:
2023 b_ptr->tolerance = new_value;
2024 break;
2025 case TIPC_CMD_SET_LINK_PRI:
2026 b_ptr->priority = new_value;
2027 break;
2028 case TIPC_CMD_SET_LINK_WINDOW:
2029 b_ptr->window = new_value;
2030 break;
2031 default:
2032 res = -EINVAL;
2033 break;
2034 }
2035 return res;
2036 }
2037
2038 m_ptr = tipc_media_find(name);
2039 if (!m_ptr)
2040 return -ENODEV;
2041 switch (cmd) {
2042 case TIPC_CMD_SET_LINK_TOL:
2043 m_ptr->tolerance = new_value;
2044 break;
2045 case TIPC_CMD_SET_LINK_PRI:
2046 m_ptr->priority = new_value;
2047 break;
2048 case TIPC_CMD_SET_LINK_WINDOW:
2049 m_ptr->window = new_value;
2050 break;
2051 default:
2052 res = -EINVAL;
2053 break;
2054 }
2055 return res;
2056}
2057
2058struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2059 u16 cmd)
2060{
2061 struct tipc_link_config *args;
2062 u32 new_value;
2063 int res;
2064
2065 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2066 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2067
2068 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2069 new_value = ntohl(args->value);
2070
2071 if (!link_value_is_valid(cmd, new_value))
2072 return tipc_cfg_reply_error_string(
2073 "cannot change, value invalid");
2074
2075 if (!strcmp(args->name, tipc_bclink_name)) {
2076 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2077 (tipc_bclink_set_queue_limits(new_value) == 0))
2078 return tipc_cfg_reply_none();
2079 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2080 " (cannot change setting on broadcast link)");
2081 }
2082
2083 res = link_cmd_set_value(args->name, new_value, cmd);
2084 if (res)
2085 return tipc_cfg_reply_error_string("cannot change link setting");
2086
2087 return tipc_cfg_reply_none();
2088}
2089
2090/**
2091 * link_reset_statistics - reset link statistics 1961 * link_reset_statistics - reset link statistics
2092 * @l_ptr: pointer to link 1962 * @l_ptr: pointer to link
2093 */ 1963 */
@@ -2098,207 +1968,13 @@ static void link_reset_statistics(struct tipc_link *l_ptr)
2098 l_ptr->stats.recv_info = l_ptr->next_in_no; 1968 l_ptr->stats.recv_info = l_ptr->next_in_no;
2099} 1969}
2100 1970
2101struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2102{
2103 char *link_name;
2104 struct tipc_link *l_ptr;
2105 struct tipc_node *node;
2106 unsigned int bearer_id;
2107
2108 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2109 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2110
2111 link_name = (char *)TLV_DATA(req_tlv_area);
2112 if (!strcmp(link_name, tipc_bclink_name)) {
2113 if (tipc_bclink_reset_stats())
2114 return tipc_cfg_reply_error_string("link not found");
2115 return tipc_cfg_reply_none();
2116 }
2117 node = tipc_link_find_owner(link_name, &bearer_id);
2118 if (!node)
2119 return tipc_cfg_reply_error_string("link not found");
2120
2121 tipc_node_lock(node);
2122 l_ptr = node->links[bearer_id];
2123 if (!l_ptr) {
2124 tipc_node_unlock(node);
2125 return tipc_cfg_reply_error_string("link not found");
2126 }
2127 link_reset_statistics(l_ptr);
2128 tipc_node_unlock(node);
2129 return tipc_cfg_reply_none();
2130}
2131
2132/**
2133 * percent - convert count to a percentage of total (rounding up or down)
2134 */
2135static u32 percent(u32 count, u32 total)
2136{
2137 return (count * 100 + (total / 2)) / total;
2138}
2139
2140/**
2141 * tipc_link_stats - print link statistics
2142 * @name: link name
2143 * @buf: print buffer area
2144 * @buf_size: size of print buffer area
2145 *
2146 * Returns length of print buffer data string (or 0 if error)
2147 */
2148static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2149{
2150 struct tipc_link *l;
2151 struct tipc_stats *s;
2152 struct tipc_node *node;
2153 char *status;
2154 u32 profile_total = 0;
2155 unsigned int bearer_id;
2156 int ret;
2157
2158 if (!strcmp(name, tipc_bclink_name))
2159 return tipc_bclink_stats(buf, buf_size);
2160
2161 node = tipc_link_find_owner(name, &bearer_id);
2162 if (!node)
2163 return 0;
2164
2165 tipc_node_lock(node);
2166
2167 l = node->links[bearer_id];
2168 if (!l) {
2169 tipc_node_unlock(node);
2170 return 0;
2171 }
2172
2173 s = &l->stats;
2174
2175 if (tipc_link_is_active(l))
2176 status = "ACTIVE";
2177 else if (tipc_link_is_up(l))
2178 status = "STANDBY";
2179 else
2180 status = "DEFUNCT";
2181
2182 ret = tipc_snprintf(buf, buf_size, "Link <%s>\n"
2183 " %s MTU:%u Priority:%u Tolerance:%u ms"
2184 " Window:%u packets\n",
2185 l->name, status, l->max_pkt, l->priority,
2186 l->tolerance, l->queue_limit[0]);
2187
2188 ret += tipc_snprintf(buf + ret, buf_size - ret,
2189 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2190 l->next_in_no - s->recv_info, s->recv_fragments,
2191 s->recv_fragmented, s->recv_bundles,
2192 s->recv_bundled);
2193
2194 ret += tipc_snprintf(buf + ret, buf_size - ret,
2195 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2196 l->next_out_no - s->sent_info, s->sent_fragments,
2197 s->sent_fragmented, s->sent_bundles,
2198 s->sent_bundled);
2199
2200 profile_total = s->msg_length_counts;
2201 if (!profile_total)
2202 profile_total = 1;
2203
2204 ret += tipc_snprintf(buf + ret, buf_size - ret,
2205 " TX profile sample:%u packets average:%u octets\n"
2206 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2207 "-16384:%u%% -32768:%u%% -66000:%u%%\n",
2208 s->msg_length_counts,
2209 s->msg_lengths_total / profile_total,
2210 percent(s->msg_length_profile[0], profile_total),
2211 percent(s->msg_length_profile[1], profile_total),
2212 percent(s->msg_length_profile[2], profile_total),
2213 percent(s->msg_length_profile[3], profile_total),
2214 percent(s->msg_length_profile[4], profile_total),
2215 percent(s->msg_length_profile[5], profile_total),
2216 percent(s->msg_length_profile[6], profile_total));
2217
2218 ret += tipc_snprintf(buf + ret, buf_size - ret,
2219 " RX states:%u probes:%u naks:%u defs:%u"
2220 " dups:%u\n", s->recv_states, s->recv_probes,
2221 s->recv_nacks, s->deferred_recv, s->duplicates);
2222
2223 ret += tipc_snprintf(buf + ret, buf_size - ret,
2224 " TX states:%u probes:%u naks:%u acks:%u"
2225 " dups:%u\n", s->sent_states, s->sent_probes,
2226 s->sent_nacks, s->sent_acks, s->retransmitted);
2227
2228 ret += tipc_snprintf(buf + ret, buf_size - ret,
2229 " Congestion link:%u Send queue"
2230 " max:%u avg:%u\n", s->link_congs,
2231 s->max_queue_sz, s->queue_sz_counts ?
2232 (s->accu_queue_sz / s->queue_sz_counts) : 0);
2233
2234 tipc_node_unlock(node);
2235 return ret;
2236}
2237
2238struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2239{
2240 struct sk_buff *buf;
2241 struct tlv_desc *rep_tlv;
2242 int str_len;
2243 int pb_len;
2244 char *pb;
2245
2246 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2247 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2248
2249 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2250 if (!buf)
2251 return NULL;
2252
2253 rep_tlv = (struct tlv_desc *)buf->data;
2254 pb = TLV_DATA(rep_tlv);
2255 pb_len = ULTRA_STRING_MAX_LEN;
2256 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
2257 pb, pb_len);
2258 if (!str_len) {
2259 kfree_skb(buf);
2260 return tipc_cfg_reply_error_string("link not found");
2261 }
2262 str_len += 1; /* for "\0" */
2263 skb_put(buf, TLV_SPACE(str_len));
2264 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2265
2266 return buf;
2267}
2268
2269/**
2270 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
2271 * @dest: network address of destination node
2272 * @selector: used to select from set of active links
2273 *
2274 * If no active link can be found, uses default maximum packet size.
2275 */
2276u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
2277{
2278 struct tipc_node *n_ptr;
2279 struct tipc_link *l_ptr;
2280 u32 res = MAX_PKT_DEFAULT;
2281
2282 if (dest == tipc_own_addr)
2283 return MAX_MSG_SIZE;
2284
2285 n_ptr = tipc_node_find(dest);
2286 if (n_ptr) {
2287 tipc_node_lock(n_ptr);
2288 l_ptr = n_ptr->active_links[selector & 1];
2289 if (l_ptr)
2290 res = l_ptr->max_pkt;
2291 tipc_node_unlock(n_ptr);
2292 }
2293 return res;
2294}
2295
2296static void link_print(struct tipc_link *l_ptr, const char *str) 1971static void link_print(struct tipc_link *l_ptr, const char *str)
2297{ 1972{
1973 struct tipc_net *tn = net_generic(l_ptr->owner->net, tipc_net_id);
2298 struct tipc_bearer *b_ptr; 1974 struct tipc_bearer *b_ptr;
2299 1975
2300 rcu_read_lock(); 1976 rcu_read_lock();
2301 b_ptr = rcu_dereference_rtnl(bearer_list[l_ptr->bearer_id]); 1977 b_ptr = rcu_dereference_rtnl(tn->bearer_list[l_ptr->bearer_id]);
2302 if (b_ptr) 1978 if (b_ptr)
2303 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name); 1979 pr_info("%s Link %x<%s>:", str, l_ptr->addr, b_ptr->name);
2304 rcu_read_unlock(); 1980 rcu_read_unlock();
@@ -2362,6 +2038,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2362 struct tipc_link *link; 2038 struct tipc_link *link;
2363 struct tipc_node *node; 2039 struct tipc_node *node;
2364 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2040 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2041 struct net *net = sock_net(skb->sk);
2365 2042
2366 if (!info->attrs[TIPC_NLA_LINK]) 2043 if (!info->attrs[TIPC_NLA_LINK])
2367 return -EINVAL; 2044 return -EINVAL;
@@ -2377,7 +2054,7 @@ int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info)
2377 2054
2378 name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2055 name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2379 2056
2380 node = tipc_link_find_owner(name, &bearer_id); 2057 node = tipc_link_find_owner(net, name, &bearer_id);
2381 if (!node) 2058 if (!node)
2382 return -EINVAL; 2059 return -EINVAL;
2383 2060
@@ -2493,14 +2170,16 @@ msg_full:
2493} 2170}
2494 2171
2495/* Caller should hold appropriate locks to protect the link */ 2172/* Caller should hold appropriate locks to protect the link */
2496static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link) 2173static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
2174 struct tipc_link *link)
2497{ 2175{
2498 int err; 2176 int err;
2499 void *hdr; 2177 void *hdr;
2500 struct nlattr *attrs; 2178 struct nlattr *attrs;
2501 struct nlattr *prop; 2179 struct nlattr *prop;
2180 struct tipc_net *tn = net_generic(net, tipc_net_id);
2502 2181
2503 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 2182 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2504 NLM_F_MULTI, TIPC_NL_LINK_GET); 2183 NLM_F_MULTI, TIPC_NL_LINK_GET);
2505 if (!hdr) 2184 if (!hdr)
2506 return -EMSGSIZE; 2185 return -EMSGSIZE;
@@ -2512,7 +2191,7 @@ static int __tipc_nl_add_link(struct tipc_nl_msg *msg, struct tipc_link *link)
2512 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name)) 2191 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
2513 goto attr_msg_full; 2192 goto attr_msg_full;
2514 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST, 2193 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
2515 tipc_cluster_mask(tipc_own_addr))) 2194 tipc_cluster_mask(tn->own_addr)))
2516 goto attr_msg_full; 2195 goto attr_msg_full;
2517 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt)) 2196 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->max_pkt))
2518 goto attr_msg_full; 2197 goto attr_msg_full;
@@ -2562,9 +2241,8 @@ msg_full:
2562} 2241}
2563 2242
2564/* Caller should hold node lock */ 2243/* Caller should hold node lock */
2565static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg, 2244static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
2566 struct tipc_node *node, 2245 struct tipc_node *node, u32 *prev_link)
2567 u32 *prev_link)
2568{ 2246{
2569 u32 i; 2247 u32 i;
2570 int err; 2248 int err;
@@ -2575,7 +2253,7 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
2575 if (!node->links[i]) 2253 if (!node->links[i])
2576 continue; 2254 continue;
2577 2255
2578 err = __tipc_nl_add_link(msg, node->links[i]); 2256 err = __tipc_nl_add_link(net, msg, node->links[i]);
2579 if (err) 2257 if (err)
2580 return err; 2258 return err;
2581 } 2259 }
@@ -2586,6 +2264,8 @@ static int __tipc_nl_add_node_links(struct tipc_nl_msg *msg,
2586 2264
2587int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb) 2265int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2588{ 2266{
2267 struct net *net = sock_net(skb->sk);
2268 struct tipc_net *tn = net_generic(net, tipc_net_id);
2589 struct tipc_node *node; 2269 struct tipc_node *node;
2590 struct tipc_nl_msg msg; 2270 struct tipc_nl_msg msg;
2591 u32 prev_node = cb->args[0]; 2271 u32 prev_node = cb->args[0];
@@ -2603,7 +2283,7 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2603 rcu_read_lock(); 2283 rcu_read_lock();
2604 2284
2605 if (prev_node) { 2285 if (prev_node) {
2606 node = tipc_node_find(prev_node); 2286 node = tipc_node_find(net, prev_node);
2607 if (!node) { 2287 if (!node) {
2608 /* We never set seq or call nl_dump_check_consistent() 2288 /* We never set seq or call nl_dump_check_consistent()
2609 * this means that setting prev_seq here will cause the 2289 * this means that setting prev_seq here will cause the
@@ -2615,9 +2295,11 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2615 goto out; 2295 goto out;
2616 } 2296 }
2617 2297
2618 list_for_each_entry_continue_rcu(node, &tipc_node_list, list) { 2298 list_for_each_entry_continue_rcu(node, &tn->node_list,
2299 list) {
2619 tipc_node_lock(node); 2300 tipc_node_lock(node);
2620 err = __tipc_nl_add_node_links(&msg, node, &prev_link); 2301 err = __tipc_nl_add_node_links(net, &msg, node,
2302 &prev_link);
2621 tipc_node_unlock(node); 2303 tipc_node_unlock(node);
2622 if (err) 2304 if (err)
2623 goto out; 2305 goto out;
@@ -2625,13 +2307,14 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
2625 prev_node = node->addr; 2307 prev_node = node->addr;
2626 } 2308 }
2627 } else { 2309 } else {
2628 err = tipc_nl_add_bc_link(&msg); 2310 err = tipc_nl_add_bc_link(net, &msg);
2629 if (err) 2311 if (err)
2630 goto out; 2312 goto out;
2631 2313
2632 list_for_each_entry_rcu(node, &tipc_node_list, list) { 2314 list_for_each_entry_rcu(node, &tn->node_list, list) {
2633 tipc_node_lock(node); 2315 tipc_node_lock(node);
2634 err = __tipc_nl_add_node_links(&msg, node, &prev_link); 2316 err = __tipc_nl_add_node_links(net, &msg, node,
2317 &prev_link);
2635 tipc_node_unlock(node); 2318 tipc_node_unlock(node);
2636 if (err) 2319 if (err)
2637 goto out; 2320 goto out;
@@ -2652,6 +2335,7 @@ out:
2652 2335
2653int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info) 2336int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2654{ 2337{
2338 struct net *net = genl_info_net(info);
2655 struct sk_buff *ans_skb; 2339 struct sk_buff *ans_skb;
2656 struct tipc_nl_msg msg; 2340 struct tipc_nl_msg msg;
2657 struct tipc_link *link; 2341 struct tipc_link *link;
@@ -2664,7 +2348,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2664 return -EINVAL; 2348 return -EINVAL;
2665 2349
2666 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]); 2350 name = nla_data(info->attrs[TIPC_NLA_LINK_NAME]);
2667 node = tipc_link_find_owner(name, &bearer_id); 2351 node = tipc_link_find_owner(net, name, &bearer_id);
2668 if (!node) 2352 if (!node)
2669 return -EINVAL; 2353 return -EINVAL;
2670 2354
@@ -2683,7 +2367,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
2683 goto err_out; 2367 goto err_out;
2684 } 2368 }
2685 2369
2686 err = __tipc_nl_add_link(&msg, link); 2370 err = __tipc_nl_add_link(net, &msg, link);
2687 if (err) 2371 if (err)
2688 goto err_out; 2372 goto err_out;
2689 2373
@@ -2706,6 +2390,7 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2706 struct tipc_link *link; 2390 struct tipc_link *link;
2707 struct tipc_node *node; 2391 struct tipc_node *node;
2708 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1]; 2392 struct nlattr *attrs[TIPC_NLA_LINK_MAX + 1];
2393 struct net *net = sock_net(skb->sk);
2709 2394
2710 if (!info->attrs[TIPC_NLA_LINK]) 2395 if (!info->attrs[TIPC_NLA_LINK])
2711 return -EINVAL; 2396 return -EINVAL;
@@ -2722,13 +2407,13 @@ int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info)
2722 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]); 2407 link_name = nla_data(attrs[TIPC_NLA_LINK_NAME]);
2723 2408
2724 if (strcmp(link_name, tipc_bclink_name) == 0) { 2409 if (strcmp(link_name, tipc_bclink_name) == 0) {
2725 err = tipc_bclink_reset_stats(); 2410 err = tipc_bclink_reset_stats(net);
2726 if (err) 2411 if (err)
2727 return err; 2412 return err;
2728 return 0; 2413 return 0;
2729 } 2414 }
2730 2415
2731 node = tipc_link_find_owner(link_name, &bearer_id); 2416 node = tipc_link_find_owner(net, link_name, &bearer_id);
2732 if (!node) 2417 if (!node)
2733 return -EINVAL; 2418 return -EINVAL;
2734 2419
diff --git a/net/tipc/link.h b/net/tipc/link.h
index 55812e87ca1e..7aeb52092bf3 100644
--- a/net/tipc/link.h
+++ b/net/tipc/link.h
@@ -41,6 +41,10 @@
41#include "msg.h" 41#include "msg.h"
42#include "node.h" 42#include "node.h"
43 43
44/* TIPC-specific error codes
45*/
46#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
47
44/* Out-of-range value for link sequence numbers 48/* Out-of-range value for link sequence numbers
45 */ 49 */
46#define INVALID_LINK_SEQ 0x10000 50#define INVALID_LINK_SEQ 0x10000
@@ -99,13 +103,14 @@ struct tipc_stats {
99 * @media_addr: media address to use when sending messages over link 103 * @media_addr: media address to use when sending messages over link
100 * @timer: link timer 104 * @timer: link timer
101 * @owner: pointer to peer node 105 * @owner: pointer to peer node
106 * @refcnt: reference counter for permanent references (owner node & timer)
102 * @flags: execution state flags for link endpoint instance 107 * @flags: execution state flags for link endpoint instance
103 * @checkpoint: reference point for triggering link continuity checking 108 * @checkpoint: reference point for triggering link continuity checking
104 * @peer_session: link session # being used by peer end of link 109 * @peer_session: link session # being used by peer end of link
105 * @peer_bearer_id: bearer id used by link's peer endpoint 110 * @peer_bearer_id: bearer id used by link's peer endpoint
106 * @bearer_id: local bearer id used by link 111 * @bearer_id: local bearer id used by link
107 * @tolerance: minimum link continuity loss needed to reset link [in ms] 112 * @tolerance: minimum link continuity loss needed to reset link [in ms]
108 * @continuity_interval: link continuity testing interval [in ms] 113 * @cont_intv: link continuity testing interval
109 * @abort_limit: # of unacknowledged continuity probes needed to reset link 114 * @abort_limit: # of unacknowledged continuity probes needed to reset link
110 * @state: current state of link FSM 115 * @state: current state of link FSM
111 * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state 116 * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
@@ -126,8 +131,10 @@ struct tipc_stats {
126 * @next_in_no: next sequence number to expect for inbound messages 131 * @next_in_no: next sequence number to expect for inbound messages
127 * @deferred_queue: deferred queue saved OOS b'cast message received from node 132 * @deferred_queue: deferred queue saved OOS b'cast message received from node
128 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer 133 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
134 * @inputq: buffer queue for messages to be delivered upwards
135 * @namedq: buffer queue for name table messages to be delivered upwards
129 * @next_out: ptr to first unsent outbound message in queue 136 * @next_out: ptr to first unsent outbound message in queue
130 * @waiting_sks: linked list of sockets waiting for link congestion to abate 137 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
131 * @long_msg_seq_no: next identifier to use for outbound fragmented messages 138 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
132 * @reasm_buf: head of partially reassembled inbound message fragments 139 * @reasm_buf: head of partially reassembled inbound message fragments
133 * @stats: collects statistics regarding link activity 140 * @stats: collects statistics regarding link activity
@@ -138,6 +145,7 @@ struct tipc_link {
138 struct tipc_media_addr media_addr; 145 struct tipc_media_addr media_addr;
139 struct timer_list timer; 146 struct timer_list timer;
140 struct tipc_node *owner; 147 struct tipc_node *owner;
148 struct kref ref;
141 149
142 /* Management and link supervision data */ 150 /* Management and link supervision data */
143 unsigned int flags; 151 unsigned int flags;
@@ -146,7 +154,7 @@ struct tipc_link {
146 u32 peer_bearer_id; 154 u32 peer_bearer_id;
147 u32 bearer_id; 155 u32 bearer_id;
148 u32 tolerance; 156 u32 tolerance;
149 u32 continuity_interval; 157 unsigned long cont_intv;
150 u32 abort_limit; 158 u32 abort_limit;
151 int state; 159 int state;
152 u32 fsm_msg_cnt; 160 u32 fsm_msg_cnt;
@@ -178,10 +186,12 @@ struct tipc_link {
178 u32 next_in_no; 186 u32 next_in_no;
179 struct sk_buff_head deferred_queue; 187 struct sk_buff_head deferred_queue;
180 u32 unacked_window; 188 u32 unacked_window;
189 struct sk_buff_head inputq;
190 struct sk_buff_head namedq;
181 191
182 /* Congestion handling */ 192 /* Congestion handling */
183 struct sk_buff *next_out; 193 struct sk_buff *next_out;
184 struct sk_buff_head waiting_sks; 194 struct sk_buff_head wakeupq;
185 195
186 /* Fragmentation/reassembly */ 196 /* Fragmentation/reassembly */
187 u32 long_msg_seq_no; 197 u32 long_msg_seq_no;
@@ -196,28 +206,24 @@ struct tipc_port;
196struct tipc_link *tipc_link_create(struct tipc_node *n_ptr, 206struct tipc_link *tipc_link_create(struct tipc_node *n_ptr,
197 struct tipc_bearer *b_ptr, 207 struct tipc_bearer *b_ptr,
198 const struct tipc_media_addr *media_addr); 208 const struct tipc_media_addr *media_addr);
199void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down); 209void tipc_link_delete(struct tipc_link *link);
210void tipc_link_delete_list(struct net *net, unsigned int bearer_id,
211 bool shutting_down);
200void tipc_link_failover_send_queue(struct tipc_link *l_ptr); 212void tipc_link_failover_send_queue(struct tipc_link *l_ptr);
201void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest); 213void tipc_link_dup_queue_xmit(struct tipc_link *l_ptr, struct tipc_link *dest);
202void tipc_link_reset_fragments(struct tipc_link *l_ptr); 214void tipc_link_reset_fragments(struct tipc_link *l_ptr);
203int tipc_link_is_up(struct tipc_link *l_ptr); 215int tipc_link_is_up(struct tipc_link *l_ptr);
204int tipc_link_is_active(struct tipc_link *l_ptr); 216int tipc_link_is_active(struct tipc_link *l_ptr);
205void tipc_link_purge_queues(struct tipc_link *l_ptr); 217void tipc_link_purge_queues(struct tipc_link *l_ptr);
206struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area,
207 int req_tlv_space,
208 u16 cmd);
209struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area,
210 int req_tlv_space);
211struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area,
212 int req_tlv_space);
213void tipc_link_reset_all(struct tipc_node *node); 218void tipc_link_reset_all(struct tipc_node *node);
214void tipc_link_reset(struct tipc_link *l_ptr); 219void tipc_link_reset(struct tipc_link *l_ptr);
215void tipc_link_reset_list(unsigned int bearer_id); 220void tipc_link_reset_list(struct net *net, unsigned int bearer_id);
216int tipc_link_xmit_skb(struct sk_buff *skb, u32 dest, u32 selector); 221int tipc_link_xmit_skb(struct net *net, struct sk_buff *skb, u32 dest,
217int tipc_link_xmit(struct sk_buff_head *list, u32 dest, u32 selector); 222 u32 selector);
218int __tipc_link_xmit(struct tipc_link *link, struct sk_buff_head *list); 223int tipc_link_xmit(struct net *net, struct sk_buff_head *list, u32 dest,
219u32 tipc_link_get_max_pkt(u32 dest, u32 selector); 224 u32 selector);
220void tipc_link_bundle_rcv(struct sk_buff *buf); 225int __tipc_link_xmit(struct net *net, struct tipc_link *link,
226 struct sk_buff_head *list);
221void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob, 227void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
222 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu); 228 u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
223void tipc_link_push_packets(struct tipc_link *l_ptr); 229void tipc_link_push_packets(struct tipc_link *l_ptr);
@@ -233,6 +239,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info);
233int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info); 239int tipc_nl_link_set(struct sk_buff *skb, struct genl_info *info);
234int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info); 240int tipc_nl_link_reset_stats(struct sk_buff *skb, struct genl_info *info);
235int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]); 241int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[]);
242void link_prepare_wakeup(struct tipc_link *l);
236 243
237/* 244/*
238 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic) 245 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
@@ -267,6 +274,10 @@ static inline u32 lesser(u32 left, u32 right)
267 return less_eq(left, right) ? left : right; 274 return less_eq(left, right) ? left : right;
268} 275}
269 276
277static inline u32 link_own_addr(struct tipc_link *l)
278{
279 return msg_prevnode(l->pmsg);
280}
270 281
271/* 282/*
272 * Link status checking routines 283 * Link status checking routines
diff --git a/net/tipc/log.c b/net/tipc/log.c
deleted file mode 100644
index abef644f27d8..000000000000
--- a/net/tipc/log.c
+++ /dev/null
@@ -1,55 +0,0 @@
1/*
2 * net/tipc/log.c: TIPC print buffer routines for debugging
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005-2007, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39
40/**
41 * tipc_snprintf - append formatted output to print buffer
42 * @buf: pointer to print buffer
43 * @len: buffer length
44 * @fmt: formatted info to be printed
45 */
46int tipc_snprintf(char *buf, int len, const char *fmt, ...)
47{
48 int i;
49 va_list args;
50
51 va_start(args, fmt);
52 i = vscnprintf(buf, len, fmt, args);
53 va_end(args);
54 return i;
55}
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a687b30a699c..b6eb90cd3ef7 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -34,6 +34,7 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <net/sock.h>
37#include "core.h" 38#include "core.h"
38#include "msg.h" 39#include "msg.h"
39#include "addr.h" 40#include "addr.h"
@@ -46,25 +47,48 @@ static unsigned int align(unsigned int i)
46 return (i + 3) & ~3u; 47 return (i + 3) & ~3u;
47} 48}
48 49
49void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, 50/**
50 u32 destnode) 51 * tipc_buf_acquire - creates a TIPC message buffer
52 * @size: message size (including TIPC header)
53 *
54 * Returns a new buffer with data pointers set to the specified size.
55 *
56 * NOTE: Headroom is reserved to allow prepending of a data link header.
57 * There may also be unrequested tailroom present at the buffer's end.
58 */
59struct sk_buff *tipc_buf_acquire(u32 size)
60{
61 struct sk_buff *skb;
62 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
63
64 skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
65 if (skb) {
66 skb_reserve(skb, BUF_HEADROOM);
67 skb_put(skb, size);
68 skb->next = NULL;
69 }
70 return skb;
71}
72
73void tipc_msg_init(u32 own_node, struct tipc_msg *m, u32 user, u32 type,
74 u32 hsize, u32 dnode)
51{ 75{
52 memset(m, 0, hsize); 76 memset(m, 0, hsize);
53 msg_set_version(m); 77 msg_set_version(m);
54 msg_set_user(m, user); 78 msg_set_user(m, user);
55 msg_set_hdr_sz(m, hsize); 79 msg_set_hdr_sz(m, hsize);
56 msg_set_size(m, hsize); 80 msg_set_size(m, hsize);
57 msg_set_prevnode(m, tipc_own_addr); 81 msg_set_prevnode(m, own_node);
58 msg_set_type(m, type); 82 msg_set_type(m, type);
59 if (hsize > SHORT_H_SIZE) { 83 if (hsize > SHORT_H_SIZE) {
60 msg_set_orignode(m, tipc_own_addr); 84 msg_set_orignode(m, own_node);
61 msg_set_destnode(m, destnode); 85 msg_set_destnode(m, dnode);
62 } 86 }
63} 87}
64 88
65struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, 89struct sk_buff *tipc_msg_create(uint user, uint type,
66 uint data_sz, u32 dnode, u32 onode, 90 uint hdr_sz, uint data_sz, u32 dnode,
67 u32 dport, u32 oport, int errcode) 91 u32 onode, u32 dport, u32 oport, int errcode)
68{ 92{
69 struct tipc_msg *msg; 93 struct tipc_msg *msg;
70 struct sk_buff *buf; 94 struct sk_buff *buf;
@@ -74,9 +98,8 @@ struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
74 return NULL; 98 return NULL;
75 99
76 msg = buf_msg(buf); 100 msg = buf_msg(buf);
77 tipc_msg_init(msg, user, type, hdr_sz, dnode); 101 tipc_msg_init(onode, msg, user, type, hdr_sz, dnode);
78 msg_set_size(msg, hdr_sz + data_sz); 102 msg_set_size(msg, hdr_sz + data_sz);
79 msg_set_prevnode(msg, onode);
80 msg_set_origport(msg, oport); 103 msg_set_origport(msg, oport);
81 msg_set_destport(msg, dport); 104 msg_set_destport(msg, dport);
82 msg_set_errcode(msg, errcode); 105 msg_set_errcode(msg, errcode);
@@ -163,15 +186,14 @@ err:
163 * tipc_msg_build - create buffer chain containing specified header and data 186 * tipc_msg_build - create buffer chain containing specified header and data
164 * @mhdr: Message header, to be prepended to data 187 * @mhdr: Message header, to be prepended to data
165 * @m: User message 188 * @m: User message
166 * @offset: Posision in iov to start copying from
167 * @dsz: Total length of user data 189 * @dsz: Total length of user data
168 * @pktmax: Max packet size that can be used 190 * @pktmax: Max packet size that can be used
169 * @list: Buffer or chain of buffers to be returned to caller 191 * @list: Buffer or chain of buffers to be returned to caller
170 * 192 *
171 * Returns message data size or errno: -ENOMEM, -EFAULT 193 * Returns message data size or errno: -ENOMEM, -EFAULT
172 */ 194 */
173int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, 195int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
174 int dsz, int pktmax, struct sk_buff_head *list) 196 int offset, int dsz, int pktmax, struct sk_buff_head *list)
175{ 197{
176 int mhsz = msg_hdr_sz(mhdr); 198 int mhsz = msg_hdr_sz(mhdr);
177 int msz = mhsz + dsz; 199 int msz = mhsz + dsz;
@@ -191,19 +213,19 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
191 skb = tipc_buf_acquire(msz); 213 skb = tipc_buf_acquire(msz);
192 if (unlikely(!skb)) 214 if (unlikely(!skb))
193 return -ENOMEM; 215 return -ENOMEM;
216 skb_orphan(skb);
194 __skb_queue_tail(list, skb); 217 __skb_queue_tail(list, skb);
195 skb_copy_to_linear_data(skb, mhdr, mhsz); 218 skb_copy_to_linear_data(skb, mhdr, mhsz);
196 pktpos = skb->data + mhsz; 219 pktpos = skb->data + mhsz;
197 if (!dsz || !memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, 220 if (copy_from_iter(pktpos, dsz, &m->msg_iter) == dsz)
198 dsz))
199 return dsz; 221 return dsz;
200 rc = -EFAULT; 222 rc = -EFAULT;
201 goto error; 223 goto error;
202 } 224 }
203 225
204 /* Prepare reusable fragment header */ 226 /* Prepare reusable fragment header */
205 tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT, 227 tipc_msg_init(msg_prevnode(mhdr), &pkthdr, MSG_FRAGMENTER,
206 INT_H_SIZE, msg_destnode(mhdr)); 228 FIRST_FRAGMENT, INT_H_SIZE, msg_destnode(mhdr));
207 msg_set_size(&pkthdr, pktmax); 229 msg_set_size(&pkthdr, pktmax);
208 msg_set_fragm_no(&pkthdr, pktno); 230 msg_set_fragm_no(&pkthdr, pktno);
209 231
@@ -211,6 +233,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
211 skb = tipc_buf_acquire(pktmax); 233 skb = tipc_buf_acquire(pktmax);
212 if (!skb) 234 if (!skb)
213 return -ENOMEM; 235 return -ENOMEM;
236 skb_orphan(skb);
214 __skb_queue_tail(list, skb); 237 __skb_queue_tail(list, skb);
215 pktpos = skb->data; 238 pktpos = skb->data;
216 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE); 239 skb_copy_to_linear_data(skb, &pkthdr, INT_H_SIZE);
@@ -224,12 +247,11 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
224 if (drem < pktrem) 247 if (drem < pktrem)
225 pktrem = drem; 248 pktrem = drem;
226 249
227 if (memcpy_fromiovecend(pktpos, m->msg_iter.iov, offset, pktrem)) { 250 if (copy_from_iter(pktpos, pktrem, &m->msg_iter) != pktrem) {
228 rc = -EFAULT; 251 rc = -EFAULT;
229 goto error; 252 goto error;
230 } 253 }
231 drem -= pktrem; 254 drem -= pktrem;
232 offset += pktrem;
233 255
234 if (!drem) 256 if (!drem)
235 break; 257 break;
@@ -244,6 +266,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset,
244 rc = -ENOMEM; 266 rc = -ENOMEM;
245 goto error; 267 goto error;
246 } 268 }
269 skb_orphan(skb);
247 __skb_queue_tail(list, skb); 270 __skb_queue_tail(list, skb);
248 msg_set_type(&pkthdr, FRAGMENT); 271 msg_set_type(&pkthdr, FRAGMENT);
249 msg_set_size(&pkthdr, pktsz); 272 msg_set_size(&pkthdr, pktsz);
@@ -304,6 +327,40 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
304} 327}
305 328
306/** 329/**
330 * tipc_msg_extract(): extract bundled inner packet from buffer
331 * @skb: linear outer buffer, to be extracted from.
332 * @iskb: extracted inner buffer, to be returned
333 * @pos: position of msg to be extracted. Returns with pointer of next msg
334 * Consumes outer buffer when last packet extracted
335 * Returns true when when there is an extracted buffer, otherwise false
336 */
337bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos)
338{
339 struct tipc_msg *msg = buf_msg(skb);
340 int imsz;
341 struct tipc_msg *imsg = (struct tipc_msg *)(msg_data(msg) + *pos);
342
343 /* Is there space left for shortest possible message? */
344 if (*pos > (msg_data_sz(msg) - SHORT_H_SIZE))
345 goto none;
346 imsz = msg_size(imsg);
347
348 /* Is there space left for current message ? */
349 if ((*pos + imsz) > msg_data_sz(msg))
350 goto none;
351 *iskb = tipc_buf_acquire(imsz);
352 if (!*iskb)
353 goto none;
354 skb_copy_to_linear_data(*iskb, imsg, imsz);
355 *pos += align(imsz);
356 return true;
357none:
358 kfree_skb(skb);
359 *iskb = NULL;
360 return false;
361}
362
363/**
307 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail 364 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
308 * @list: the buffer chain 365 * @list: the buffer chain
309 * @skb: buffer to be appended and replaced 366 * @skb: buffer to be appended and replaced
@@ -312,8 +369,8 @@ bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu)
312 * Replaces buffer if successful 369 * Replaces buffer if successful
313 * Returns true if success, otherwise false 370 * Returns true if success, otherwise false
314 */ 371 */
315bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, 372bool tipc_msg_make_bundle(struct sk_buff_head *list,
316 u32 mtu, u32 dnode) 373 struct sk_buff *skb, u32 mtu, u32 dnode)
317{ 374{
318 struct sk_buff *bskb; 375 struct sk_buff *bskb;
319 struct tipc_msg *bmsg; 376 struct tipc_msg *bmsg;
@@ -336,7 +393,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
336 393
337 skb_trim(bskb, INT_H_SIZE); 394 skb_trim(bskb, INT_H_SIZE);
338 bmsg = buf_msg(bskb); 395 bmsg = buf_msg(bskb);
339 tipc_msg_init(bmsg, MSG_BUNDLER, 0, INT_H_SIZE, dnode); 396 tipc_msg_init(msg_prevnode(msg), bmsg, MSG_BUNDLER, 0,
397 INT_H_SIZE, dnode);
340 msg_set_seqno(bmsg, msg_seqno(msg)); 398 msg_set_seqno(bmsg, msg_seqno(msg));
341 msg_set_ack(bmsg, msg_ack(msg)); 399 msg_set_ack(bmsg, msg_ack(msg));
342 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg)); 400 msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
@@ -353,7 +411,8 @@ bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb,
353 * Consumes buffer if failure 411 * Consumes buffer if failure
354 * Returns true if success, otherwise false 412 * Returns true if success, otherwise false
355 */ 413 */
356bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err) 414bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
415 int err)
357{ 416{
358 struct tipc_msg *msg = buf_msg(buf); 417 struct tipc_msg *msg = buf_msg(buf);
359 uint imp = msg_importance(msg); 418 uint imp = msg_importance(msg);
@@ -374,7 +433,7 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
374 msg_set_errcode(msg, err); 433 msg_set_errcode(msg, err);
375 msg_set_origport(msg, msg_destport(&ohdr)); 434 msg_set_origport(msg, msg_destport(&ohdr));
376 msg_set_destport(msg, msg_origport(&ohdr)); 435 msg_set_destport(msg, msg_origport(&ohdr));
377 msg_set_prevnode(msg, tipc_own_addr); 436 msg_set_prevnode(msg, own_addr);
378 if (!msg_short(msg)) { 437 if (!msg_short(msg)) {
379 msg_set_orignode(msg, msg_destnode(&ohdr)); 438 msg_set_orignode(msg, msg_destnode(&ohdr));
380 msg_set_destnode(msg, msg_orignode(&ohdr)); 439 msg_set_destnode(msg, msg_orignode(&ohdr));
@@ -386,43 +445,43 @@ bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
386 return true; 445 return true;
387exit: 446exit:
388 kfree_skb(buf); 447 kfree_skb(buf);
448 *dnode = 0;
389 return false; 449 return false;
390} 450}
391 451
392/** 452/**
393 * tipc_msg_eval: determine fate of message that found no destination 453 * tipc_msg_lookup_dest(): try to find new destination for named message
394 * @buf: the buffer containing the message. 454 * @skb: the buffer containing the message.
395 * @dnode: return value: next-hop node, if message to be forwarded 455 * @dnode: return value: next-hop node, if destination found
396 * @err: error code to use, if message to be rejected 456 * @err: return value: error code to use, if message to be rejected
397 *
398 * Does not consume buffer 457 * Does not consume buffer
399 * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error 458 * Returns true if a destination is found, false otherwise
400 * code if message to be rejected
401 */ 459 */
402int tipc_msg_eval(struct sk_buff *buf, u32 *dnode) 460bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb,
461 u32 *dnode, int *err)
403{ 462{
404 struct tipc_msg *msg = buf_msg(buf); 463 struct tipc_msg *msg = buf_msg(skb);
405 u32 dport; 464 u32 dport;
406 465
407 if (msg_type(msg) != TIPC_NAMED_MSG) 466 if (!msg_isdata(msg))
408 return -TIPC_ERR_NO_PORT; 467 return false;
409 if (skb_linearize(buf)) 468 if (!msg_named(msg))
410 return -TIPC_ERR_NO_NAME; 469 return false;
411 if (msg_data_sz(msg) > MAX_FORWARD_SIZE) 470 *err = -TIPC_ERR_NO_NAME;
412 return -TIPC_ERR_NO_NAME; 471 if (skb_linearize(skb))
472 return false;
413 if (msg_reroute_cnt(msg) > 0) 473 if (msg_reroute_cnt(msg) > 0)
414 return -TIPC_ERR_NO_NAME; 474 return false;
415 475 *dnode = addr_domain(net, msg_lookup_scope(msg));
416 *dnode = addr_domain(msg_lookup_scope(msg)); 476 dport = tipc_nametbl_translate(net, msg_nametype(msg),
417 dport = tipc_nametbl_translate(msg_nametype(msg), 477 msg_nameinst(msg), dnode);
418 msg_nameinst(msg),
419 dnode);
420 if (!dport) 478 if (!dport)
421 return -TIPC_ERR_NO_NAME; 479 return false;
422 msg_incr_reroute_cnt(msg); 480 msg_incr_reroute_cnt(msg);
423 msg_set_destnode(msg, *dnode); 481 msg_set_destnode(msg, *dnode);
424 msg_set_destport(msg, dport); 482 msg_set_destport(msg, dport);
425 return TIPC_OK; 483 *err = TIPC_OK;
484 return true;
426} 485}
427 486
428/* tipc_msg_reassemble() - clone a buffer chain of fragments and 487/* tipc_msg_reassemble() - clone a buffer chain of fragments and
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index d5c83d7ecb47..9ace47f44a69 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -37,7 +37,7 @@
37#ifndef _TIPC_MSG_H 37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H 38#define _TIPC_MSG_H
39 39
40#include "bearer.h" 40#include <linux/tipc.h>
41 41
42/* 42/*
43 * Constants and routines used to read and write TIPC payload message headers 43 * Constants and routines used to read and write TIPC payload message headers
@@ -45,6 +45,7 @@
45 * Note: Some items are also used with TIPC internal message headers 45 * Note: Some items are also used with TIPC internal message headers
46 */ 46 */
47#define TIPC_VERSION 2 47#define TIPC_VERSION 2
48struct plist;
48 49
49/* 50/*
50 * Payload message users are defined in TIPC's public API: 51 * Payload message users are defined in TIPC's public API:
@@ -77,11 +78,37 @@
77 78
78#define TIPC_MEDIA_ADDR_OFFSET 5 79#define TIPC_MEDIA_ADDR_OFFSET 5
79 80
81/**
82 * TIPC message buffer code
83 *
84 * TIPC message buffer headroom reserves space for the worst-case
85 * link-level device header (in case the message is sent off-node).
86 *
87 * Note: Headroom should be a multiple of 4 to ensure the TIPC header fields
88 * are word aligned for quicker access
89 */
90#define BUF_HEADROOM LL_MAX_HEADER
91
92struct tipc_skb_cb {
93 void *handle;
94 struct sk_buff *tail;
95 bool deferred;
96 bool wakeup_pending;
97 bool bundling;
98 u16 chain_sz;
99 u16 chain_imp;
100};
101
102#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
80 103
81struct tipc_msg { 104struct tipc_msg {
82 __be32 hdr[15]; 105 __be32 hdr[15];
83}; 106};
84 107
108static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
109{
110 return (struct tipc_msg *)skb->data;
111}
85 112
86static inline u32 msg_word(struct tipc_msg *m, u32 pos) 113static inline u32 msg_word(struct tipc_msg *m, u32 pos)
87{ 114{
@@ -721,27 +748,111 @@ static inline u32 msg_tot_origport(struct tipc_msg *m)
721 return msg_origport(m); 748 return msg_origport(m);
722} 749}
723 750
724bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err); 751struct sk_buff *tipc_buf_acquire(u32 size);
725 752bool tipc_msg_reverse(u32 own_addr, struct sk_buff *buf, u32 *dnode,
726int tipc_msg_eval(struct sk_buff *buf, u32 *dnode); 753 int err);
727 754void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
728void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, 755 u32 hsize, u32 destnode);
729 u32 destnode);
730
731struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz, 756struct sk_buff *tipc_msg_create(uint user, uint type, uint hdr_sz,
732 uint data_sz, u32 dnode, u32 onode, 757 uint data_sz, u32 dnode, u32 onode,
733 u32 dport, u32 oport, int errcode); 758 u32 dport, u32 oport, int errcode);
734
735int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf); 759int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
736
737bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu); 760bool tipc_msg_bundle(struct sk_buff_head *list, struct sk_buff *skb, u32 mtu);
761bool tipc_msg_make_bundle(struct sk_buff_head *list,
762 struct sk_buff *skb, u32 mtu, u32 dnode);
763bool tipc_msg_extract(struct sk_buff *skb, struct sk_buff **iskb, int *pos);
764int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
765 int offset, int dsz, int mtu, struct sk_buff_head *list);
766bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, u32 *dnode,
767 int *err);
768struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list);
738 769
739bool tipc_msg_make_bundle(struct sk_buff_head *list, struct sk_buff *skb, 770/* tipc_skb_peek(): peek and reserve first buffer in list
740 u32 mtu, u32 dnode); 771 * @list: list to be peeked in
741 772 * Returns pointer to first buffer in list, if any
742int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m, int offset, 773 */
743 int dsz, int mtu, struct sk_buff_head *list); 774static inline struct sk_buff *tipc_skb_peek(struct sk_buff_head *list,
775 spinlock_t *lock)
776{
777 struct sk_buff *skb;
778
779 spin_lock_bh(lock);
780 skb = skb_peek(list);
781 if (skb)
782 skb_get(skb);
783 spin_unlock_bh(lock);
784 return skb;
785}
786
787/* tipc_skb_peek_port(): find a destination port, ignoring all destinations
788 * up to and including 'filter'.
789 * Note: ignoring previously tried destinations minimizes the risk of
790 * contention on the socket lock
791 * @list: list to be peeked in
792 * @filter: last destination to be ignored from search
793 * Returns a destination port number, of applicable.
794 */
795static inline u32 tipc_skb_peek_port(struct sk_buff_head *list, u32 filter)
796{
797 struct sk_buff *skb;
798 u32 dport = 0;
799 bool ignore = true;
800
801 spin_lock_bh(&list->lock);
802 skb_queue_walk(list, skb) {
803 dport = msg_destport(buf_msg(skb));
804 if (!filter || skb_queue_is_last(list, skb))
805 break;
806 if (dport == filter)
807 ignore = false;
808 else if (!ignore)
809 break;
810 }
811 spin_unlock_bh(&list->lock);
812 return dport;
813}
814
815/* tipc_skb_dequeue(): unlink first buffer with dest 'dport' from list
816 * @list: list to be unlinked from
817 * @dport: selection criteria for buffer to unlink
818 */
819static inline struct sk_buff *tipc_skb_dequeue(struct sk_buff_head *list,
820 u32 dport)
821{
822 struct sk_buff *_skb, *tmp, *skb = NULL;
823
824 spin_lock_bh(&list->lock);
825 skb_queue_walk_safe(list, _skb, tmp) {
826 if (msg_destport(buf_msg(_skb)) == dport) {
827 __skb_unlink(_skb, list);
828 skb = _skb;
829 break;
830 }
831 }
832 spin_unlock_bh(&list->lock);
833 return skb;
834}
835
836/* tipc_skb_queue_tail(): add buffer to tail of list;
837 * @list: list to be appended to
838 * @skb: buffer to append. Always appended
839 * @dport: the destination port of the buffer
840 * returns true if dport differs from previous destination
841 */
842static inline bool tipc_skb_queue_tail(struct sk_buff_head *list,
843 struct sk_buff *skb, u32 dport)
844{
845 struct sk_buff *_skb = NULL;
846 bool rv = false;
744 847
745struct sk_buff *tipc_msg_reassemble(struct sk_buff_head *list); 848 spin_lock_bh(&list->lock);
849 _skb = skb_peek_tail(list);
850 if (!_skb || (msg_destport(buf_msg(_skb)) != dport) ||
851 (skb_queue_len(list) > 32))
852 rv = true;
853 __skb_queue_tail(list, skb);
854 spin_unlock_bh(&list->lock);
855 return rv;
856}
746 857
747#endif 858#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index ba6083dca95b..fcb07915aaac 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -68,29 +68,33 @@ static void publ_to_item(struct distr_item *i, struct publication *p)
68/** 68/**
69 * named_prepare_buf - allocate & initialize a publication message 69 * named_prepare_buf - allocate & initialize a publication message
70 */ 70 */
71static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest) 71static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
72 u32 dest)
72{ 73{
74 struct tipc_net *tn = net_generic(net, tipc_net_id);
73 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size); 75 struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
74 struct tipc_msg *msg; 76 struct tipc_msg *msg;
75 77
76 if (buf != NULL) { 78 if (buf != NULL) {
77 msg = buf_msg(buf); 79 msg = buf_msg(buf);
78 tipc_msg_init(msg, NAME_DISTRIBUTOR, type, INT_H_SIZE, dest); 80 tipc_msg_init(tn->own_addr, msg, NAME_DISTRIBUTOR, type,
81 INT_H_SIZE, dest);
79 msg_set_size(msg, INT_H_SIZE + size); 82 msg_set_size(msg, INT_H_SIZE + size);
80 } 83 }
81 return buf; 84 return buf;
82} 85}
83 86
84void named_cluster_distribute(struct sk_buff *skb) 87void named_cluster_distribute(struct net *net, struct sk_buff *skb)
85{ 88{
89 struct tipc_net *tn = net_generic(net, tipc_net_id);
86 struct sk_buff *oskb; 90 struct sk_buff *oskb;
87 struct tipc_node *node; 91 struct tipc_node *node;
88 u32 dnode; 92 u32 dnode;
89 93
90 rcu_read_lock(); 94 rcu_read_lock();
91 list_for_each_entry_rcu(node, &tipc_node_list, list) { 95 list_for_each_entry_rcu(node, &tn->node_list, list) {
92 dnode = node->addr; 96 dnode = node->addr;
93 if (in_own_node(dnode)) 97 if (in_own_node(net, dnode))
94 continue; 98 continue;
95 if (!tipc_node_active_links(node)) 99 if (!tipc_node_active_links(node))
96 continue; 100 continue;
@@ -98,7 +102,7 @@ void named_cluster_distribute(struct sk_buff *skb)
98 if (!oskb) 102 if (!oskb)
99 break; 103 break;
100 msg_set_destnode(buf_msg(oskb), dnode); 104 msg_set_destnode(buf_msg(oskb), dnode);
101 tipc_link_xmit_skb(oskb, dnode, dnode); 105 tipc_link_xmit_skb(net, oskb, dnode, dnode);
102 } 106 }
103 rcu_read_unlock(); 107 rcu_read_unlock();
104 108
@@ -108,18 +112,19 @@ void named_cluster_distribute(struct sk_buff *skb)
108/** 112/**
109 * tipc_named_publish - tell other nodes about a new publication by this node 113 * tipc_named_publish - tell other nodes about a new publication by this node
110 */ 114 */
111struct sk_buff *tipc_named_publish(struct publication *publ) 115struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ)
112{ 116{
117 struct tipc_net *tn = net_generic(net, tipc_net_id);
113 struct sk_buff *buf; 118 struct sk_buff *buf;
114 struct distr_item *item; 119 struct distr_item *item;
115 120
116 list_add_tail_rcu(&publ->local_list, 121 list_add_tail_rcu(&publ->local_list,
117 &tipc_nametbl->publ_list[publ->scope]); 122 &tn->nametbl->publ_list[publ->scope]);
118 123
119 if (publ->scope == TIPC_NODE_SCOPE) 124 if (publ->scope == TIPC_NODE_SCOPE)
120 return NULL; 125 return NULL;
121 126
122 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0); 127 buf = named_prepare_buf(net, PUBLICATION, ITEM_SIZE, 0);
123 if (!buf) { 128 if (!buf) {
124 pr_warn("Publication distribution failure\n"); 129 pr_warn("Publication distribution failure\n");
125 return NULL; 130 return NULL;
@@ -133,7 +138,7 @@ struct sk_buff *tipc_named_publish(struct publication *publ)
133/** 138/**
134 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node 139 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
135 */ 140 */
136struct sk_buff *tipc_named_withdraw(struct publication *publ) 141struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ)
137{ 142{
138 struct sk_buff *buf; 143 struct sk_buff *buf;
139 struct distr_item *item; 144 struct distr_item *item;
@@ -143,7 +148,7 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
143 if (publ->scope == TIPC_NODE_SCOPE) 148 if (publ->scope == TIPC_NODE_SCOPE)
144 return NULL; 149 return NULL;
145 150
146 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0); 151 buf = named_prepare_buf(net, WITHDRAWAL, ITEM_SIZE, 0);
147 if (!buf) { 152 if (!buf) {
148 pr_warn("Withdrawal distribution failure\n"); 153 pr_warn("Withdrawal distribution failure\n");
149 return NULL; 154 return NULL;
@@ -160,19 +165,21 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
160 * @dnode: node to be updated 165 * @dnode: node to be updated
161 * @pls: linked list of publication items to be packed into buffer chain 166 * @pls: linked list of publication items to be packed into buffer chain
162 */ 167 */
163static void named_distribute(struct sk_buff_head *list, u32 dnode, 168static void named_distribute(struct net *net, struct sk_buff_head *list,
164 struct list_head *pls) 169 u32 dnode, struct list_head *pls)
165{ 170{
166 struct publication *publ; 171 struct publication *publ;
167 struct sk_buff *skb = NULL; 172 struct sk_buff *skb = NULL;
168 struct distr_item *item = NULL; 173 struct distr_item *item = NULL;
169 uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE; 174 uint msg_dsz = (tipc_node_get_mtu(net, dnode, 0) / ITEM_SIZE) *
175 ITEM_SIZE;
170 uint msg_rem = msg_dsz; 176 uint msg_rem = msg_dsz;
171 177
172 list_for_each_entry(publ, pls, local_list) { 178 list_for_each_entry(publ, pls, local_list) {
173 /* Prepare next buffer: */ 179 /* Prepare next buffer: */
174 if (!skb) { 180 if (!skb) {
175 skb = named_prepare_buf(PUBLICATION, msg_rem, dnode); 181 skb = named_prepare_buf(net, PUBLICATION, msg_rem,
182 dnode);
176 if (!skb) { 183 if (!skb) {
177 pr_warn("Bulk publication failure\n"); 184 pr_warn("Bulk publication failure\n");
178 return; 185 return;
@@ -202,30 +209,32 @@ static void named_distribute(struct sk_buff_head *list, u32 dnode,
202/** 209/**
203 * tipc_named_node_up - tell specified node about all publications by this node 210 * tipc_named_node_up - tell specified node about all publications by this node
204 */ 211 */
205void tipc_named_node_up(u32 dnode) 212void tipc_named_node_up(struct net *net, u32 dnode)
206{ 213{
214 struct tipc_net *tn = net_generic(net, tipc_net_id);
207 struct sk_buff_head head; 215 struct sk_buff_head head;
208 216
209 __skb_queue_head_init(&head); 217 __skb_queue_head_init(&head);
210 218
211 rcu_read_lock(); 219 rcu_read_lock();
212 named_distribute(&head, dnode, 220 named_distribute(net, &head, dnode,
213 &tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); 221 &tn->nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
214 named_distribute(&head, dnode, 222 named_distribute(net, &head, dnode,
215 &tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); 223 &tn->nametbl->publ_list[TIPC_ZONE_SCOPE]);
216 rcu_read_unlock(); 224 rcu_read_unlock();
217 225
218 tipc_link_xmit(&head, dnode, dnode); 226 tipc_link_xmit(net, &head, dnode, dnode);
219} 227}
220 228
221static void tipc_publ_subscribe(struct publication *publ, u32 addr) 229static void tipc_publ_subscribe(struct net *net, struct publication *publ,
230 u32 addr)
222{ 231{
223 struct tipc_node *node; 232 struct tipc_node *node;
224 233
225 if (in_own_node(addr)) 234 if (in_own_node(net, addr))
226 return; 235 return;
227 236
228 node = tipc_node_find(addr); 237 node = tipc_node_find(net, addr);
229 if (!node) { 238 if (!node) {
230 pr_warn("Node subscription rejected, unknown node 0x%x\n", 239 pr_warn("Node subscription rejected, unknown node 0x%x\n",
231 addr); 240 addr);
@@ -237,11 +246,12 @@ static void tipc_publ_subscribe(struct publication *publ, u32 addr)
237 tipc_node_unlock(node); 246 tipc_node_unlock(node);
238} 247}
239 248
240static void tipc_publ_unsubscribe(struct publication *publ, u32 addr) 249static void tipc_publ_unsubscribe(struct net *net, struct publication *publ,
250 u32 addr)
241{ 251{
242 struct tipc_node *node; 252 struct tipc_node *node;
243 253
244 node = tipc_node_find(addr); 254 node = tipc_node_find(net, addr);
245 if (!node) 255 if (!node)
246 return; 256 return;
247 257
@@ -256,16 +266,17 @@ static void tipc_publ_unsubscribe(struct publication *publ, u32 addr)
256 * Invoked for each publication issued by a newly failed node. 266 * Invoked for each publication issued by a newly failed node.
257 * Removes publication structure from name table & deletes it. 267 * Removes publication structure from name table & deletes it.
258 */ 268 */
259static void tipc_publ_purge(struct publication *publ, u32 addr) 269static void tipc_publ_purge(struct net *net, struct publication *publ, u32 addr)
260{ 270{
271 struct tipc_net *tn = net_generic(net, tipc_net_id);
261 struct publication *p; 272 struct publication *p;
262 273
263 spin_lock_bh(&tipc_nametbl_lock); 274 spin_lock_bh(&tn->nametbl_lock);
264 p = tipc_nametbl_remove_publ(publ->type, publ->lower, 275 p = tipc_nametbl_remove_publ(net, publ->type, publ->lower,
265 publ->node, publ->ref, publ->key); 276 publ->node, publ->ref, publ->key);
266 if (p) 277 if (p)
267 tipc_publ_unsubscribe(p, addr); 278 tipc_publ_unsubscribe(net, p, addr);
268 spin_unlock_bh(&tipc_nametbl_lock); 279 spin_unlock_bh(&tn->nametbl_lock);
269 280
270 if (p != publ) { 281 if (p != publ) {
271 pr_err("Unable to remove publication from failed node\n" 282 pr_err("Unable to remove publication from failed node\n"
@@ -277,12 +288,12 @@ static void tipc_publ_purge(struct publication *publ, u32 addr)
277 kfree_rcu(p, rcu); 288 kfree_rcu(p, rcu);
278} 289}
279 290
280void tipc_publ_notify(struct list_head *nsub_list, u32 addr) 291void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr)
281{ 292{
282 struct publication *publ, *tmp; 293 struct publication *publ, *tmp;
283 294
284 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list) 295 list_for_each_entry_safe(publ, tmp, nsub_list, nodesub_list)
285 tipc_publ_purge(publ, addr); 296 tipc_publ_purge(net, publ, addr);
286} 297}
287 298
288/** 299/**
@@ -292,25 +303,28 @@ void tipc_publ_notify(struct list_head *nsub_list, u32 addr)
292 * tipc_nametbl_lock must be held. 303 * tipc_nametbl_lock must be held.
293 * Returns the publication item if successful, otherwise NULL. 304 * Returns the publication item if successful, otherwise NULL.
294 */ 305 */
295static bool tipc_update_nametbl(struct distr_item *i, u32 node, u32 dtype) 306static bool tipc_update_nametbl(struct net *net, struct distr_item *i,
307 u32 node, u32 dtype)
296{ 308{
297 struct publication *publ = NULL; 309 struct publication *publ = NULL;
298 310
299 if (dtype == PUBLICATION) { 311 if (dtype == PUBLICATION) {
300 publ = tipc_nametbl_insert_publ(ntohl(i->type), ntohl(i->lower), 312 publ = tipc_nametbl_insert_publ(net, ntohl(i->type),
313 ntohl(i->lower),
301 ntohl(i->upper), 314 ntohl(i->upper),
302 TIPC_CLUSTER_SCOPE, node, 315 TIPC_CLUSTER_SCOPE, node,
303 ntohl(i->ref), ntohl(i->key)); 316 ntohl(i->ref), ntohl(i->key));
304 if (publ) { 317 if (publ) {
305 tipc_publ_subscribe(publ, node); 318 tipc_publ_subscribe(net, publ, node);
306 return true; 319 return true;
307 } 320 }
308 } else if (dtype == WITHDRAWAL) { 321 } else if (dtype == WITHDRAWAL) {
309 publ = tipc_nametbl_remove_publ(ntohl(i->type), ntohl(i->lower), 322 publ = tipc_nametbl_remove_publ(net, ntohl(i->type),
323 ntohl(i->lower),
310 node, ntohl(i->ref), 324 node, ntohl(i->ref),
311 ntohl(i->key)); 325 ntohl(i->key));
312 if (publ) { 326 if (publ) {
313 tipc_publ_unsubscribe(publ, node); 327 tipc_publ_unsubscribe(net, publ, node);
314 kfree_rcu(publ, rcu); 328 kfree_rcu(publ, rcu);
315 return true; 329 return true;
316 } 330 }
@@ -343,7 +357,7 @@ static void tipc_named_add_backlog(struct distr_item *i, u32 type, u32 node)
343 * tipc_named_process_backlog - try to process any pending name table updates 357 * tipc_named_process_backlog - try to process any pending name table updates
344 * from the network. 358 * from the network.
345 */ 359 */
346void tipc_named_process_backlog(void) 360void tipc_named_process_backlog(struct net *net)
347{ 361{
348 struct distr_queue_item *e, *tmp; 362 struct distr_queue_item *e, *tmp;
349 char addr[16]; 363 char addr[16];
@@ -351,7 +365,7 @@ void tipc_named_process_backlog(void)
351 365
352 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) { 366 list_for_each_entry_safe(e, tmp, &tipc_dist_queue, next) {
353 if (time_after(e->expires, now)) { 367 if (time_after(e->expires, now)) {
354 if (!tipc_update_nametbl(&e->i, e->node, e->dtype)) 368 if (!tipc_update_nametbl(net, &e->i, e->node, e->dtype))
355 continue; 369 continue;
356 } else { 370 } else {
357 tipc_addr_string_fill(addr, e->node); 371 tipc_addr_string_fill(addr, e->node);
@@ -367,24 +381,34 @@ void tipc_named_process_backlog(void)
367} 381}
368 382
369/** 383/**
370 * tipc_named_rcv - process name table update message sent by another node 384 * tipc_named_rcv - process name table update messages sent by another node
371 */ 385 */
372void tipc_named_rcv(struct sk_buff *buf) 386void tipc_named_rcv(struct net *net, struct sk_buff_head *inputq)
373{ 387{
374 struct tipc_msg *msg = buf_msg(buf); 388 struct tipc_net *tn = net_generic(net, tipc_net_id);
375 struct distr_item *item = (struct distr_item *)msg_data(msg); 389 struct tipc_msg *msg;
376 u32 count = msg_data_sz(msg) / ITEM_SIZE; 390 struct distr_item *item;
377 u32 node = msg_orignode(msg); 391 uint count;
378 392 u32 node;
379 spin_lock_bh(&tipc_nametbl_lock); 393 struct sk_buff *skb;
380 while (count--) { 394 int mtype;
381 if (!tipc_update_nametbl(item, node, msg_type(msg))) 395
382 tipc_named_add_backlog(item, msg_type(msg), node); 396 spin_lock_bh(&tn->nametbl_lock);
383 item++; 397 for (skb = skb_dequeue(inputq); skb; skb = skb_dequeue(inputq)) {
398 msg = buf_msg(skb);
399 mtype = msg_type(msg);
400 item = (struct distr_item *)msg_data(msg);
401 count = msg_data_sz(msg) / ITEM_SIZE;
402 node = msg_orignode(msg);
403 while (count--) {
404 if (!tipc_update_nametbl(net, item, node, mtype))
405 tipc_named_add_backlog(item, mtype, node);
406 item++;
407 }
408 kfree_skb(skb);
409 tipc_named_process_backlog(net);
384 } 410 }
385 tipc_named_process_backlog(); 411 spin_unlock_bh(&tn->nametbl_lock);
386 spin_unlock_bh(&tipc_nametbl_lock);
387 kfree_skb(buf);
388} 412}
389 413
390/** 414/**
@@ -394,17 +418,18 @@ void tipc_named_rcv(struct sk_buff *buf)
394 * All name table entries published by this node are updated to reflect 418 * All name table entries published by this node are updated to reflect
395 * the node's new network address. 419 * the node's new network address.
396 */ 420 */
397void tipc_named_reinit(void) 421void tipc_named_reinit(struct net *net)
398{ 422{
423 struct tipc_net *tn = net_generic(net, tipc_net_id);
399 struct publication *publ; 424 struct publication *publ;
400 int scope; 425 int scope;
401 426
402 spin_lock_bh(&tipc_nametbl_lock); 427 spin_lock_bh(&tn->nametbl_lock);
403 428
404 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++) 429 for (scope = TIPC_ZONE_SCOPE; scope <= TIPC_NODE_SCOPE; scope++)
405 list_for_each_entry_rcu(publ, &tipc_nametbl->publ_list[scope], 430 list_for_each_entry_rcu(publ, &tn->nametbl->publ_list[scope],
406 local_list) 431 local_list)
407 publ->node = tipc_own_addr; 432 publ->node = tn->own_addr;
408 433
409 spin_unlock_bh(&tipc_nametbl_lock); 434 spin_unlock_bh(&tn->nametbl_lock);
410} 435}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
index cef55cedcfb2..dd2d9fd80da2 100644
--- a/net/tipc/name_distr.h
+++ b/net/tipc/name_distr.h
@@ -67,13 +67,13 @@ struct distr_item {
67 __be32 key; 67 __be32 key;
68}; 68};
69 69
70struct sk_buff *tipc_named_publish(struct publication *publ); 70struct sk_buff *tipc_named_publish(struct net *net, struct publication *publ);
71struct sk_buff *tipc_named_withdraw(struct publication *publ); 71struct sk_buff *tipc_named_withdraw(struct net *net, struct publication *publ);
72void named_cluster_distribute(struct sk_buff *buf); 72void named_cluster_distribute(struct net *net, struct sk_buff *buf);
73void tipc_named_node_up(u32 dnode); 73void tipc_named_node_up(struct net *net, u32 dnode);
74void tipc_named_rcv(struct sk_buff *buf); 74void tipc_named_rcv(struct net *net, struct sk_buff_head *msg_queue);
75void tipc_named_reinit(void); 75void tipc_named_reinit(struct net *net);
76void tipc_named_process_backlog(void); 76void tipc_named_process_backlog(struct net *net);
77void tipc_publ_notify(struct list_head *nsub_list, u32 addr); 77void tipc_publ_notify(struct net *net, struct list_head *nsub_list, u32 addr);
78 78
79#endif 79#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index c8df0223371a..105ba7adf06f 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/name_table.c: TIPC name table code 2 * net/tipc/name_table.c: TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, 2014, Ericsson AB 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -34,11 +34,15 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <net/sock.h>
37#include "core.h" 38#include "core.h"
38#include "config.h" 39#include "netlink.h"
39#include "name_table.h" 40#include "name_table.h"
40#include "name_distr.h" 41#include "name_distr.h"
41#include "subscr.h" 42#include "subscr.h"
43#include "bcast.h"
44#include "addr.h"
45#include <net/genetlink.h>
42 46
43#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */ 47#define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
44 48
@@ -105,9 +109,6 @@ struct name_seq {
105 struct rcu_head rcu; 109 struct rcu_head rcu;
106}; 110};
107 111
108struct name_table *tipc_nametbl;
109DEFINE_SPINLOCK(tipc_nametbl_lock);
110
111static int hash(int x) 112static int hash(int x)
112{ 113{
113 return x & (TIPC_NAMETBL_SIZE - 1); 114 return x & (TIPC_NAMETBL_SIZE - 1);
@@ -228,9 +229,11 @@ static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
228/** 229/**
229 * tipc_nameseq_insert_publ 230 * tipc_nameseq_insert_publ
230 */ 231 */
231static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq, 232static struct publication *tipc_nameseq_insert_publ(struct net *net,
232 u32 type, u32 lower, u32 upper, 233 struct name_seq *nseq,
233 u32 scope, u32 node, u32 port, u32 key) 234 u32 type, u32 lower,
235 u32 upper, u32 scope,
236 u32 node, u32 port, u32 key)
234{ 237{
235 struct tipc_subscription *s; 238 struct tipc_subscription *s;
236 struct tipc_subscription *st; 239 struct tipc_subscription *st;
@@ -315,12 +318,12 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
315 list_add(&publ->zone_list, &info->zone_list); 318 list_add(&publ->zone_list, &info->zone_list);
316 info->zone_list_size++; 319 info->zone_list_size++;
317 320
318 if (in_own_cluster(node)) { 321 if (in_own_cluster(net, node)) {
319 list_add(&publ->cluster_list, &info->cluster_list); 322 list_add(&publ->cluster_list, &info->cluster_list);
320 info->cluster_list_size++; 323 info->cluster_list_size++;
321 } 324 }
322 325
323 if (in_own_node(node)) { 326 if (in_own_node(net, node)) {
324 list_add(&publ->node_list, &info->node_list); 327 list_add(&publ->node_list, &info->node_list);
325 info->node_list_size++; 328 info->node_list_size++;
326 } 329 }
@@ -349,8 +352,10 @@ static struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
349 * A failed withdraw request simply returns a failure indication and lets the 352 * A failed withdraw request simply returns a failure indication and lets the
350 * caller issue any error or warning messages associated with such a problem. 353 * caller issue any error or warning messages associated with such a problem.
351 */ 354 */
352static struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst, 355static struct publication *tipc_nameseq_remove_publ(struct net *net,
353 u32 node, u32 ref, u32 key) 356 struct name_seq *nseq,
357 u32 inst, u32 node,
358 u32 ref, u32 key)
354{ 359{
355 struct publication *publ; 360 struct publication *publ;
356 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst); 361 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
@@ -378,13 +383,13 @@ found:
378 info->zone_list_size--; 383 info->zone_list_size--;
379 384
380 /* Remove publication from cluster scope list, if present */ 385 /* Remove publication from cluster scope list, if present */
381 if (in_own_cluster(node)) { 386 if (in_own_cluster(net, node)) {
382 list_del(&publ->cluster_list); 387 list_del(&publ->cluster_list);
383 info->cluster_list_size--; 388 info->cluster_list_size--;
384 } 389 }
385 390
386 /* Remove publication from node scope list, if present */ 391 /* Remove publication from node scope list, if present */
387 if (in_own_node(node)) { 392 if (in_own_node(net, node)) {
388 list_del(&publ->node_list); 393 list_del(&publ->node_list);
389 info->node_list_size--; 394 info->node_list_size--;
390 } 395 }
@@ -447,12 +452,13 @@ static void tipc_nameseq_subscribe(struct name_seq *nseq,
447 } 452 }
448} 453}
449 454
450static struct name_seq *nametbl_find_seq(u32 type) 455static struct name_seq *nametbl_find_seq(struct net *net, u32 type)
451{ 456{
457 struct tipc_net *tn = net_generic(net, tipc_net_id);
452 struct hlist_head *seq_head; 458 struct hlist_head *seq_head;
453 struct name_seq *ns; 459 struct name_seq *ns;
454 460
455 seq_head = &tipc_nametbl->seq_hlist[hash(type)]; 461 seq_head = &tn->nametbl->seq_hlist[hash(type)];
456 hlist_for_each_entry_rcu(ns, seq_head, ns_list) { 462 hlist_for_each_entry_rcu(ns, seq_head, ns_list) {
457 if (ns->type == type) 463 if (ns->type == type)
458 return ns; 464 return ns;
@@ -461,11 +467,13 @@ static struct name_seq *nametbl_find_seq(u32 type)
461 return NULL; 467 return NULL;
462}; 468};
463 469
464struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, 470struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
465 u32 scope, u32 node, u32 port, u32 key) 471 u32 lower, u32 upper, u32 scope,
472 u32 node, u32 port, u32 key)
466{ 473{
474 struct tipc_net *tn = net_generic(net, tipc_net_id);
467 struct publication *publ; 475 struct publication *publ;
468 struct name_seq *seq = nametbl_find_seq(type); 476 struct name_seq *seq = nametbl_find_seq(net, type);
469 int index = hash(type); 477 int index = hash(type);
470 478
471 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) || 479 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE) ||
@@ -476,29 +484,29 @@ struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
476 } 484 }
477 485
478 if (!seq) 486 if (!seq)
479 seq = tipc_nameseq_create(type, 487 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
480 &tipc_nametbl->seq_hlist[index]);
481 if (!seq) 488 if (!seq)
482 return NULL; 489 return NULL;
483 490
484 spin_lock_bh(&seq->lock); 491 spin_lock_bh(&seq->lock);
485 publ = tipc_nameseq_insert_publ(seq, type, lower, upper, 492 publ = tipc_nameseq_insert_publ(net, seq, type, lower, upper,
486 scope, node, port, key); 493 scope, node, port, key);
487 spin_unlock_bh(&seq->lock); 494 spin_unlock_bh(&seq->lock);
488 return publ; 495 return publ;
489} 496}
490 497
491struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, 498struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
492 u32 node, u32 ref, u32 key) 499 u32 lower, u32 node, u32 ref,
500 u32 key)
493{ 501{
494 struct publication *publ; 502 struct publication *publ;
495 struct name_seq *seq = nametbl_find_seq(type); 503 struct name_seq *seq = nametbl_find_seq(net, type);
496 504
497 if (!seq) 505 if (!seq)
498 return NULL; 506 return NULL;
499 507
500 spin_lock_bh(&seq->lock); 508 spin_lock_bh(&seq->lock);
501 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key); 509 publ = tipc_nameseq_remove_publ(net, seq, lower, node, ref, key);
502 if (!seq->first_free && list_empty(&seq->subscriptions)) { 510 if (!seq->first_free && list_empty(&seq->subscriptions)) {
503 hlist_del_init_rcu(&seq->ns_list); 511 hlist_del_init_rcu(&seq->ns_list);
504 kfree(seq->sseqs); 512 kfree(seq->sseqs);
@@ -523,8 +531,10 @@ struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
523 * - if name translation is attempted and fails, sets 'destnode' to 0 531 * - if name translation is attempted and fails, sets 'destnode' to 0
524 * and returns 0 532 * and returns 0
525 */ 533 */
526u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode) 534u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance,
535 u32 *destnode)
527{ 536{
537 struct tipc_net *tn = net_generic(net, tipc_net_id);
528 struct sub_seq *sseq; 538 struct sub_seq *sseq;
529 struct name_info *info; 539 struct name_info *info;
530 struct publication *publ; 540 struct publication *publ;
@@ -532,11 +542,11 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
532 u32 ref = 0; 542 u32 ref = 0;
533 u32 node = 0; 543 u32 node = 0;
534 544
535 if (!tipc_in_scope(*destnode, tipc_own_addr)) 545 if (!tipc_in_scope(*destnode, tn->own_addr))
536 return 0; 546 return 0;
537 547
538 rcu_read_lock(); 548 rcu_read_lock();
539 seq = nametbl_find_seq(type); 549 seq = nametbl_find_seq(net, type);
540 if (unlikely(!seq)) 550 if (unlikely(!seq))
541 goto not_found; 551 goto not_found;
542 spin_lock_bh(&seq->lock); 552 spin_lock_bh(&seq->lock);
@@ -569,13 +579,13 @@ u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
569 } 579 }
570 580
571 /* Round-Robin Algorithm */ 581 /* Round-Robin Algorithm */
572 else if (*destnode == tipc_own_addr) { 582 else if (*destnode == tn->own_addr) {
573 if (list_empty(&info->node_list)) 583 if (list_empty(&info->node_list))
574 goto no_match; 584 goto no_match;
575 publ = list_first_entry(&info->node_list, struct publication, 585 publ = list_first_entry(&info->node_list, struct publication,
576 node_list); 586 node_list);
577 list_move_tail(&publ->node_list, &info->node_list); 587 list_move_tail(&publ->node_list, &info->node_list);
578 } else if (in_own_cluster_exact(*destnode)) { 588 } else if (in_own_cluster_exact(net, *destnode)) {
579 if (list_empty(&info->cluster_list)) 589 if (list_empty(&info->cluster_list))
580 goto no_match; 590 goto no_match;
581 publ = list_first_entry(&info->cluster_list, struct publication, 591 publ = list_first_entry(&info->cluster_list, struct publication,
@@ -609,8 +619,8 @@ not_found:
609 * 619 *
610 * Returns non-zero if any off-node ports overlap 620 * Returns non-zero if any off-node ports overlap
611 */ 621 */
612int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 622int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
613 struct tipc_port_list *dports) 623 u32 limit, struct tipc_plist *dports)
614{ 624{
615 struct name_seq *seq; 625 struct name_seq *seq;
616 struct sub_seq *sseq; 626 struct sub_seq *sseq;
@@ -619,7 +629,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
619 int res = 0; 629 int res = 0;
620 630
621 rcu_read_lock(); 631 rcu_read_lock();
622 seq = nametbl_find_seq(type); 632 seq = nametbl_find_seq(net, type);
623 if (!seq) 633 if (!seq)
624 goto exit; 634 goto exit;
625 635
@@ -635,7 +645,7 @@ int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
635 info = sseq->info; 645 info = sseq->info;
636 list_for_each_entry(publ, &info->node_list, node_list) { 646 list_for_each_entry(publ, &info->node_list, node_list) {
637 if (publ->scope <= limit) 647 if (publ->scope <= limit)
638 tipc_port_list_add(dports, publ->ref); 648 tipc_plist_push(dports, publ->ref);
639 } 649 }
640 650
641 if (info->cluster_list_size != info->node_list_size) 651 if (info->cluster_list_size != info->node_list_size)
@@ -650,50 +660,55 @@ exit:
650/* 660/*
651 * tipc_nametbl_publish - add name publication to network name tables 661 * tipc_nametbl_publish - add name publication to network name tables
652 */ 662 */
653struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 663struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
654 u32 scope, u32 port_ref, u32 key) 664 u32 upper, u32 scope, u32 port_ref,
665 u32 key)
655{ 666{
656 struct publication *publ; 667 struct publication *publ;
657 struct sk_buff *buf = NULL; 668 struct sk_buff *buf = NULL;
669 struct tipc_net *tn = net_generic(net, tipc_net_id);
658 670
659 spin_lock_bh(&tipc_nametbl_lock); 671 spin_lock_bh(&tn->nametbl_lock);
660 if (tipc_nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) { 672 if (tn->nametbl->local_publ_count >= TIPC_MAX_PUBLICATIONS) {
661 pr_warn("Publication failed, local publication limit reached (%u)\n", 673 pr_warn("Publication failed, local publication limit reached (%u)\n",
662 TIPC_MAX_PUBLICATIONS); 674 TIPC_MAX_PUBLICATIONS);
663 spin_unlock_bh(&tipc_nametbl_lock); 675 spin_unlock_bh(&tn->nametbl_lock);
664 return NULL; 676 return NULL;
665 } 677 }
666 678
667 publ = tipc_nametbl_insert_publ(type, lower, upper, scope, 679 publ = tipc_nametbl_insert_publ(net, type, lower, upper, scope,
668 tipc_own_addr, port_ref, key); 680 tn->own_addr, port_ref, key);
669 if (likely(publ)) { 681 if (likely(publ)) {
670 tipc_nametbl->local_publ_count++; 682 tn->nametbl->local_publ_count++;
671 buf = tipc_named_publish(publ); 683 buf = tipc_named_publish(net, publ);
672 /* Any pending external events? */ 684 /* Any pending external events? */
673 tipc_named_process_backlog(); 685 tipc_named_process_backlog(net);
674 } 686 }
675 spin_unlock_bh(&tipc_nametbl_lock); 687 spin_unlock_bh(&tn->nametbl_lock);
676 688
677 if (buf) 689 if (buf)
678 named_cluster_distribute(buf); 690 named_cluster_distribute(net, buf);
679 return publ; 691 return publ;
680} 692}
681 693
682/** 694/**
683 * tipc_nametbl_withdraw - withdraw name publication from network name tables 695 * tipc_nametbl_withdraw - withdraw name publication from network name tables
684 */ 696 */
685int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key) 697int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
698 u32 key)
686{ 699{
687 struct publication *publ; 700 struct publication *publ;
688 struct sk_buff *skb = NULL; 701 struct sk_buff *skb = NULL;
702 struct tipc_net *tn = net_generic(net, tipc_net_id);
689 703
690 spin_lock_bh(&tipc_nametbl_lock); 704 spin_lock_bh(&tn->nametbl_lock);
691 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key); 705 publ = tipc_nametbl_remove_publ(net, type, lower, tn->own_addr,
706 ref, key);
692 if (likely(publ)) { 707 if (likely(publ)) {
693 tipc_nametbl->local_publ_count--; 708 tn->nametbl->local_publ_count--;
694 skb = tipc_named_withdraw(publ); 709 skb = tipc_named_withdraw(net, publ);
695 /* Any pending external events? */ 710 /* Any pending external events? */
696 tipc_named_process_backlog(); 711 tipc_named_process_backlog(net);
697 list_del_init(&publ->pport_list); 712 list_del_init(&publ->pport_list);
698 kfree_rcu(publ, rcu); 713 kfree_rcu(publ, rcu);
699 } else { 714 } else {
@@ -701,10 +716,10 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
701 "(type=%u, lower=%u, ref=%u, key=%u)\n", 716 "(type=%u, lower=%u, ref=%u, key=%u)\n",
702 type, lower, ref, key); 717 type, lower, ref, key);
703 } 718 }
704 spin_unlock_bh(&tipc_nametbl_lock); 719 spin_unlock_bh(&tn->nametbl_lock);
705 720
706 if (skb) { 721 if (skb) {
707 named_cluster_distribute(skb); 722 named_cluster_distribute(net, skb);
708 return 1; 723 return 1;
709 } 724 }
710 return 0; 725 return 0;
@@ -715,15 +730,15 @@ int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
715 */ 730 */
716void tipc_nametbl_subscribe(struct tipc_subscription *s) 731void tipc_nametbl_subscribe(struct tipc_subscription *s)
717{ 732{
733 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
718 u32 type = s->seq.type; 734 u32 type = s->seq.type;
719 int index = hash(type); 735 int index = hash(type);
720 struct name_seq *seq; 736 struct name_seq *seq;
721 737
722 spin_lock_bh(&tipc_nametbl_lock); 738 spin_lock_bh(&tn->nametbl_lock);
723 seq = nametbl_find_seq(type); 739 seq = nametbl_find_seq(s->net, type);
724 if (!seq) 740 if (!seq)
725 seq = tipc_nameseq_create(type, 741 seq = tipc_nameseq_create(type, &tn->nametbl->seq_hlist[index]);
726 &tipc_nametbl->seq_hlist[index]);
727 if (seq) { 742 if (seq) {
728 spin_lock_bh(&seq->lock); 743 spin_lock_bh(&seq->lock);
729 tipc_nameseq_subscribe(seq, s); 744 tipc_nameseq_subscribe(seq, s);
@@ -732,7 +747,7 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
732 pr_warn("Failed to create subscription for {%u,%u,%u}\n", 747 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
733 s->seq.type, s->seq.lower, s->seq.upper); 748 s->seq.type, s->seq.lower, s->seq.upper);
734 } 749 }
735 spin_unlock_bh(&tipc_nametbl_lock); 750 spin_unlock_bh(&tn->nametbl_lock);
736} 751}
737 752
738/** 753/**
@@ -740,10 +755,11 @@ void tipc_nametbl_subscribe(struct tipc_subscription *s)
740 */ 755 */
741void tipc_nametbl_unsubscribe(struct tipc_subscription *s) 756void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
742{ 757{
758 struct tipc_net *tn = net_generic(s->net, tipc_net_id);
743 struct name_seq *seq; 759 struct name_seq *seq;
744 760
745 spin_lock_bh(&tipc_nametbl_lock); 761 spin_lock_bh(&tn->nametbl_lock);
746 seq = nametbl_find_seq(s->seq.type); 762 seq = nametbl_find_seq(s->net, s->seq.type);
747 if (seq != NULL) { 763 if (seq != NULL) {
748 spin_lock_bh(&seq->lock); 764 spin_lock_bh(&seq->lock);
749 list_del_init(&s->nameseq_list); 765 list_del_init(&s->nameseq_list);
@@ -756,193 +772,13 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s)
756 spin_unlock_bh(&seq->lock); 772 spin_unlock_bh(&seq->lock);
757 } 773 }
758 } 774 }
759 spin_unlock_bh(&tipc_nametbl_lock); 775 spin_unlock_bh(&tn->nametbl_lock);
760}
761
762/**
763 * subseq_list - print specified sub-sequence contents into the given buffer
764 */
765static int subseq_list(struct sub_seq *sseq, char *buf, int len, u32 depth,
766 u32 index)
767{
768 char portIdStr[27];
769 const char *scope_str[] = {"", " zone", " cluster", " node"};
770 struct publication *publ;
771 struct name_info *info;
772 int ret;
773
774 ret = tipc_snprintf(buf, len, "%-10u %-10u ", sseq->lower, sseq->upper);
775
776 if (depth == 2) {
777 ret += tipc_snprintf(buf - ret, len + ret, "\n");
778 return ret;
779 }
780
781 info = sseq->info;
782
783 list_for_each_entry(publ, &info->zone_list, zone_list) {
784 sprintf(portIdStr, "<%u.%u.%u:%u>",
785 tipc_zone(publ->node), tipc_cluster(publ->node),
786 tipc_node(publ->node), publ->ref);
787 ret += tipc_snprintf(buf + ret, len - ret, "%-26s ", portIdStr);
788 if (depth > 3) {
789 ret += tipc_snprintf(buf + ret, len - ret, "%-10u %s",
790 publ->key, scope_str[publ->scope]);
791 }
792 if (!list_is_last(&publ->zone_list, &info->zone_list))
793 ret += tipc_snprintf(buf + ret, len - ret,
794 "\n%33s", " ");
795 }
796
797 ret += tipc_snprintf(buf + ret, len - ret, "\n");
798 return ret;
799}
800
801/**
802 * nameseq_list - print specified name sequence contents into the given buffer
803 */
804static int nameseq_list(struct name_seq *seq, char *buf, int len, u32 depth,
805 u32 type, u32 lowbound, u32 upbound, u32 index)
806{
807 struct sub_seq *sseq;
808 char typearea[11];
809 int ret = 0;
810
811 if (seq->first_free == 0)
812 return 0;
813
814 sprintf(typearea, "%-10u", seq->type);
815
816 if (depth == 1) {
817 ret += tipc_snprintf(buf, len, "%s\n", typearea);
818 return ret;
819 }
820
821 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
822 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
823 ret += tipc_snprintf(buf + ret, len - ret, "%s ",
824 typearea);
825 spin_lock_bh(&seq->lock);
826 ret += subseq_list(sseq, buf + ret, len - ret,
827 depth, index);
828 spin_unlock_bh(&seq->lock);
829 sprintf(typearea, "%10s", " ");
830 }
831 }
832 return ret;
833}
834
835/**
836 * nametbl_header - print name table header into the given buffer
837 */
838static int nametbl_header(char *buf, int len, u32 depth)
839{
840 const char *header[] = {
841 "Type ",
842 "Lower Upper ",
843 "Port Identity ",
844 "Publication Scope"
845 };
846
847 int i;
848 int ret = 0;
849
850 if (depth > 4)
851 depth = 4;
852 for (i = 0; i < depth; i++)
853 ret += tipc_snprintf(buf + ret, len - ret, header[i]);
854 ret += tipc_snprintf(buf + ret, len - ret, "\n");
855 return ret;
856}
857
858/**
859 * nametbl_list - print specified name table contents into the given buffer
860 */
861static int nametbl_list(char *buf, int len, u32 depth_info,
862 u32 type, u32 lowbound, u32 upbound)
863{
864 struct hlist_head *seq_head;
865 struct name_seq *seq;
866 int all_types;
867 int ret = 0;
868 u32 depth;
869 u32 i;
870
871 all_types = (depth_info & TIPC_NTQ_ALLTYPES);
872 depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
873
874 if (depth == 0)
875 return 0;
876
877 if (all_types) {
878 /* display all entries in name table to specified depth */
879 ret += nametbl_header(buf, len, depth);
880 lowbound = 0;
881 upbound = ~0;
882 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
883 seq_head = &tipc_nametbl->seq_hlist[i];
884 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
885 ret += nameseq_list(seq, buf + ret, len - ret,
886 depth, seq->type,
887 lowbound, upbound, i);
888 }
889 }
890 } else {
891 /* display only the sequence that matches the specified type */
892 if (upbound < lowbound) {
893 ret += tipc_snprintf(buf + ret, len - ret,
894 "invalid name sequence specified\n");
895 return ret;
896 }
897 ret += nametbl_header(buf + ret, len - ret, depth);
898 i = hash(type);
899 seq_head = &tipc_nametbl->seq_hlist[i];
900 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
901 if (seq->type == type) {
902 ret += nameseq_list(seq, buf + ret, len - ret,
903 depth, type,
904 lowbound, upbound, i);
905 break;
906 }
907 }
908 }
909 return ret;
910} 776}
911 777
912struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space) 778int tipc_nametbl_init(struct net *net)
913{
914 struct sk_buff *buf;
915 struct tipc_name_table_query *argv;
916 struct tlv_desc *rep_tlv;
917 char *pb;
918 int pb_len;
919 int str_len;
920
921 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
922 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
923
924 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
925 if (!buf)
926 return NULL;
927
928 rep_tlv = (struct tlv_desc *)buf->data;
929 pb = TLV_DATA(rep_tlv);
930 pb_len = ULTRA_STRING_MAX_LEN;
931 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
932 rcu_read_lock();
933 str_len = nametbl_list(pb, pb_len, ntohl(argv->depth),
934 ntohl(argv->type),
935 ntohl(argv->lowbound), ntohl(argv->upbound));
936 rcu_read_unlock();
937 str_len += 1; /* for "\0" */
938 skb_put(buf, TLV_SPACE(str_len));
939 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
940
941 return buf;
942}
943
944int tipc_nametbl_init(void)
945{ 779{
780 struct tipc_net *tn = net_generic(net, tipc_net_id);
781 struct name_table *tipc_nametbl;
946 int i; 782 int i;
947 783
948 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC); 784 tipc_nametbl = kzalloc(sizeof(*tipc_nametbl), GFP_ATOMIC);
@@ -955,6 +791,8 @@ int tipc_nametbl_init(void)
955 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]); 791 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_ZONE_SCOPE]);
956 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]); 792 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_CLUSTER_SCOPE]);
957 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]); 793 INIT_LIST_HEAD(&tipc_nametbl->publ_list[TIPC_NODE_SCOPE]);
794 tn->nametbl = tipc_nametbl;
795 spin_lock_init(&tn->nametbl_lock);
958 return 0; 796 return 0;
959} 797}
960 798
@@ -963,7 +801,7 @@ int tipc_nametbl_init(void)
963 * 801 *
964 * tipc_nametbl_lock must be held when calling this function 802 * tipc_nametbl_lock must be held when calling this function
965 */ 803 */
966static void tipc_purge_publications(struct name_seq *seq) 804static void tipc_purge_publications(struct net *net, struct name_seq *seq)
967{ 805{
968 struct publication *publ, *safe; 806 struct publication *publ, *safe;
969 struct sub_seq *sseq; 807 struct sub_seq *sseq;
@@ -973,8 +811,8 @@ static void tipc_purge_publications(struct name_seq *seq)
973 sseq = seq->sseqs; 811 sseq = seq->sseqs;
974 info = sseq->info; 812 info = sseq->info;
975 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { 813 list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
976 tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, 814 tipc_nametbl_remove_publ(net, publ->type, publ->lower,
977 publ->ref, publ->key); 815 publ->node, publ->ref, publ->key);
978 kfree_rcu(publ, rcu); 816 kfree_rcu(publ, rcu);
979 } 817 }
980 hlist_del_init_rcu(&seq->ns_list); 818 hlist_del_init_rcu(&seq->ns_list);
@@ -984,25 +822,27 @@ static void tipc_purge_publications(struct name_seq *seq)
984 kfree_rcu(seq, rcu); 822 kfree_rcu(seq, rcu);
985} 823}
986 824
987void tipc_nametbl_stop(void) 825void tipc_nametbl_stop(struct net *net)
988{ 826{
989 u32 i; 827 u32 i;
990 struct name_seq *seq; 828 struct name_seq *seq;
991 struct hlist_head *seq_head; 829 struct hlist_head *seq_head;
830 struct tipc_net *tn = net_generic(net, tipc_net_id);
831 struct name_table *tipc_nametbl = tn->nametbl;
992 832
993 /* Verify name table is empty and purge any lingering 833 /* Verify name table is empty and purge any lingering
994 * publications, then release the name table 834 * publications, then release the name table
995 */ 835 */
996 spin_lock_bh(&tipc_nametbl_lock); 836 spin_lock_bh(&tn->nametbl_lock);
997 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { 837 for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
998 if (hlist_empty(&tipc_nametbl->seq_hlist[i])) 838 if (hlist_empty(&tipc_nametbl->seq_hlist[i]))
999 continue; 839 continue;
1000 seq_head = &tipc_nametbl->seq_hlist[i]; 840 seq_head = &tipc_nametbl->seq_hlist[i];
1001 hlist_for_each_entry_rcu(seq, seq_head, ns_list) { 841 hlist_for_each_entry_rcu(seq, seq_head, ns_list) {
1002 tipc_purge_publications(seq); 842 tipc_purge_publications(net, seq);
1003 } 843 }
1004 } 844 }
1005 spin_unlock_bh(&tipc_nametbl_lock); 845 spin_unlock_bh(&tn->nametbl_lock);
1006 846
1007 synchronize_net(); 847 synchronize_net();
1008 kfree(tipc_nametbl); 848 kfree(tipc_nametbl);
@@ -1033,7 +873,7 @@ static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg *msg,
1033 *last_publ = p->key; 873 *last_publ = p->key;
1034 874
1035 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, 875 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq,
1036 &tipc_genl_v2_family, NLM_F_MULTI, 876 &tipc_genl_family, NLM_F_MULTI,
1037 TIPC_NL_NAME_TABLE_GET); 877 TIPC_NL_NAME_TABLE_GET);
1038 if (!hdr) 878 if (!hdr)
1039 return -EMSGSIZE; 879 return -EMSGSIZE;
@@ -1106,9 +946,10 @@ static int __tipc_nl_subseq_list(struct tipc_nl_msg *msg, struct name_seq *seq,
1106 return 0; 946 return 0;
1107} 947}
1108 948
1109static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type, 949static int tipc_nl_seq_list(struct net *net, struct tipc_nl_msg *msg,
1110 u32 *last_lower, u32 *last_publ) 950 u32 *last_type, u32 *last_lower, u32 *last_publ)
1111{ 951{
952 struct tipc_net *tn = net_generic(net, tipc_net_id);
1112 struct hlist_head *seq_head; 953 struct hlist_head *seq_head;
1113 struct name_seq *seq = NULL; 954 struct name_seq *seq = NULL;
1114 int err; 955 int err;
@@ -1120,10 +961,10 @@ static int __tipc_nl_seq_list(struct tipc_nl_msg *msg, u32 *last_type,
1120 i = 0; 961 i = 0;
1121 962
1122 for (; i < TIPC_NAMETBL_SIZE; i++) { 963 for (; i < TIPC_NAMETBL_SIZE; i++) {
1123 seq_head = &tipc_nametbl->seq_hlist[i]; 964 seq_head = &tn->nametbl->seq_hlist[i];
1124 965
1125 if (*last_type) { 966 if (*last_type) {
1126 seq = nametbl_find_seq(*last_type); 967 seq = nametbl_find_seq(net, *last_type);
1127 if (!seq) 968 if (!seq)
1128 return -EPIPE; 969 return -EPIPE;
1129 } else { 970 } else {
@@ -1157,6 +998,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
1157 u32 last_type = cb->args[0]; 998 u32 last_type = cb->args[0];
1158 u32 last_lower = cb->args[1]; 999 u32 last_lower = cb->args[1];
1159 u32 last_publ = cb->args[2]; 1000 u32 last_publ = cb->args[2];
1001 struct net *net = sock_net(skb->sk);
1160 struct tipc_nl_msg msg; 1002 struct tipc_nl_msg msg;
1161 1003
1162 if (done) 1004 if (done)
@@ -1167,7 +1009,7 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
1167 msg.seq = cb->nlh->nlmsg_seq; 1009 msg.seq = cb->nlh->nlmsg_seq;
1168 1010
1169 rcu_read_lock(); 1011 rcu_read_lock();
1170 err = __tipc_nl_seq_list(&msg, &last_type, &last_lower, &last_publ); 1012 err = tipc_nl_seq_list(net, &msg, &last_type, &last_lower, &last_publ);
1171 if (!err) { 1013 if (!err) {
1172 done = 1; 1014 done = 1;
1173 } else if (err != -EMSGSIZE) { 1015 } else if (err != -EMSGSIZE) {
@@ -1188,3 +1030,41 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
1188 1030
1189 return skb->len; 1031 return skb->len;
1190} 1032}
1033
1034void tipc_plist_push(struct tipc_plist *pl, u32 port)
1035{
1036 struct tipc_plist *nl;
1037
1038 if (likely(!pl->port)) {
1039 pl->port = port;
1040 return;
1041 }
1042 if (pl->port == port)
1043 return;
1044 list_for_each_entry(nl, &pl->list, list) {
1045 if (nl->port == port)
1046 return;
1047 }
1048 nl = kmalloc(sizeof(*nl), GFP_ATOMIC);
1049 if (nl) {
1050 nl->port = port;
1051 list_add(&nl->list, &pl->list);
1052 }
1053}
1054
1055u32 tipc_plist_pop(struct tipc_plist *pl)
1056{
1057 struct tipc_plist *nl;
1058 u32 port = 0;
1059
1060 if (likely(list_empty(&pl->list))) {
1061 port = pl->port;
1062 pl->port = 0;
1063 return port;
1064 }
1065 nl = list_first_entry(&pl->list, typeof(*nl), list);
1066 port = nl->port;
1067 list_del(&nl->list);
1068 kfree(nl);
1069 return port;
1070}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index 5f0dee92010d..1524a73830f7 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/name_table.h: Include file for TIPC name table code 2 * net/tipc/name_table.h: Include file for TIPC name table code
3 * 3 *
4 * Copyright (c) 2000-2006, 2014, Ericsson AB 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems 5 * Copyright (c) 2004-2005, 2010-2011, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -38,7 +38,7 @@
38#define _TIPC_NAME_TABLE_H 38#define _TIPC_NAME_TABLE_H
39 39
40struct tipc_subscription; 40struct tipc_subscription;
41struct tipc_port_list; 41struct tipc_plist;
42 42
43/* 43/*
44 * TIPC name types reserved for internal TIPC use (both current and planned) 44 * TIPC name types reserved for internal TIPC use (both current and planned)
@@ -95,26 +95,39 @@ struct name_table {
95 u32 local_publ_count; 95 u32 local_publ_count;
96}; 96};
97 97
98extern spinlock_t tipc_nametbl_lock;
99extern struct name_table *tipc_nametbl;
100
101int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb); 98int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
102 99
103struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space); 100u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
104u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node); 101int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
105int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit, 102 u32 limit, struct tipc_plist *dports);
106 struct tipc_port_list *dports); 103struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
107struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper, 104 u32 upper, u32 scope, u32 port_ref,
108 u32 scope, u32 port_ref, u32 key); 105 u32 key);
109int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key); 106int tipc_nametbl_withdraw(struct net *net, u32 type, u32 lower, u32 ref,
110struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper, 107 u32 key);
111 u32 scope, u32 node, u32 ref, 108struct publication *tipc_nametbl_insert_publ(struct net *net, u32 type,
109 u32 lower, u32 upper, u32 scope,
110 u32 node, u32 ref, u32 key);
111struct publication *tipc_nametbl_remove_publ(struct net *net, u32 type,
112 u32 lower, u32 node, u32 ref,
112 u32 key); 113 u32 key);
113struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower, u32 node,
114 u32 ref, u32 key);
115void tipc_nametbl_subscribe(struct tipc_subscription *s); 114void tipc_nametbl_subscribe(struct tipc_subscription *s);
116void tipc_nametbl_unsubscribe(struct tipc_subscription *s); 115void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
117int tipc_nametbl_init(void); 116int tipc_nametbl_init(struct net *net);
118void tipc_nametbl_stop(void); 117void tipc_nametbl_stop(struct net *net);
118
119struct tipc_plist {
120 struct list_head list;
121 u32 port;
122};
123
124static inline void tipc_plist_init(struct tipc_plist *pl)
125{
126 INIT_LIST_HEAD(&pl->list);
127 pl->port = 0;
128}
129
130void tipc_plist_push(struct tipc_plist *pl, u32 port);
131u32 tipc_plist_pop(struct tipc_plist *pl);
119 132
120#endif 133#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
index cf13df3cde8f..a54f3cbe2246 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -40,7 +40,6 @@
40#include "subscr.h" 40#include "subscr.h"
41#include "socket.h" 41#include "socket.h"
42#include "node.h" 42#include "node.h"
43#include "config.h"
44 43
45static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = { 44static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
46 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC }, 45 [TIPC_NLA_NET_UNSPEC] = { .type = NLA_UNSPEC },
@@ -108,48 +107,54 @@ static const struct nla_policy tipc_nl_net_policy[TIPC_NLA_NET_MAX + 1] = {
108 * - A local spin_lock protecting the queue of subscriber events. 107 * - A local spin_lock protecting the queue of subscriber events.
109*/ 108*/
110 109
111int tipc_net_start(u32 addr) 110int tipc_net_start(struct net *net, u32 addr)
112{ 111{
112 struct tipc_net *tn = net_generic(net, tipc_net_id);
113 char addr_string[16]; 113 char addr_string[16];
114 int res; 114 int res;
115 115
116 tipc_own_addr = addr; 116 tn->own_addr = addr;
117 tipc_named_reinit(); 117 tipc_named_reinit(net);
118 tipc_sk_reinit(); 118 tipc_sk_reinit(net);
119 res = tipc_bclink_init(); 119 res = tipc_bclink_init(net);
120 if (res) 120 if (res)
121 return res; 121 return res;
122 122
123 tipc_nametbl_publish(TIPC_CFG_SRV, tipc_own_addr, tipc_own_addr, 123 tipc_nametbl_publish(net, TIPC_CFG_SRV, tn->own_addr, tn->own_addr,
124 TIPC_ZONE_SCOPE, 0, tipc_own_addr); 124 TIPC_ZONE_SCOPE, 0, tn->own_addr);
125 125
126 pr_info("Started in network mode\n"); 126 pr_info("Started in network mode\n");
127 pr_info("Own node address %s, network identity %u\n", 127 pr_info("Own node address %s, network identity %u\n",
128 tipc_addr_string_fill(addr_string, tipc_own_addr), tipc_net_id); 128 tipc_addr_string_fill(addr_string, tn->own_addr),
129 tn->net_id);
129 return 0; 130 return 0;
130} 131}
131 132
132void tipc_net_stop(void) 133void tipc_net_stop(struct net *net)
133{ 134{
134 if (!tipc_own_addr) 135 struct tipc_net *tn = net_generic(net, tipc_net_id);
136
137 if (!tn->own_addr)
135 return; 138 return;
136 139
137 tipc_nametbl_withdraw(TIPC_CFG_SRV, tipc_own_addr, 0, tipc_own_addr); 140 tipc_nametbl_withdraw(net, TIPC_CFG_SRV, tn->own_addr, 0,
141 tn->own_addr);
138 rtnl_lock(); 142 rtnl_lock();
139 tipc_bearer_stop(); 143 tipc_bearer_stop(net);
140 tipc_bclink_stop(); 144 tipc_bclink_stop(net);
141 tipc_node_stop(); 145 tipc_node_stop(net);
142 rtnl_unlock(); 146 rtnl_unlock();
143 147
144 pr_info("Left network mode\n"); 148 pr_info("Left network mode\n");
145} 149}
146 150
147static int __tipc_nl_add_net(struct tipc_nl_msg *msg) 151static int __tipc_nl_add_net(struct net *net, struct tipc_nl_msg *msg)
148{ 152{
153 struct tipc_net *tn = net_generic(net, tipc_net_id);
149 void *hdr; 154 void *hdr;
150 struct nlattr *attrs; 155 struct nlattr *attrs;
151 156
152 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 157 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
153 NLM_F_MULTI, TIPC_NL_NET_GET); 158 NLM_F_MULTI, TIPC_NL_NET_GET);
154 if (!hdr) 159 if (!hdr)
155 return -EMSGSIZE; 160 return -EMSGSIZE;
@@ -158,7 +163,7 @@ static int __tipc_nl_add_net(struct tipc_nl_msg *msg)
158 if (!attrs) 163 if (!attrs)
159 goto msg_full; 164 goto msg_full;
160 165
161 if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tipc_net_id)) 166 if (nla_put_u32(msg->skb, TIPC_NLA_NET_ID, tn->net_id))
162 goto attr_msg_full; 167 goto attr_msg_full;
163 168
164 nla_nest_end(msg->skb, attrs); 169 nla_nest_end(msg->skb, attrs);
@@ -176,6 +181,7 @@ msg_full:
176 181
177int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb) 182int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
178{ 183{
184 struct net *net = sock_net(skb->sk);
179 int err; 185 int err;
180 int done = cb->args[0]; 186 int done = cb->args[0];
181 struct tipc_nl_msg msg; 187 struct tipc_nl_msg msg;
@@ -187,7 +193,7 @@ int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb)
187 msg.portid = NETLINK_CB(cb->skb).portid; 193 msg.portid = NETLINK_CB(cb->skb).portid;
188 msg.seq = cb->nlh->nlmsg_seq; 194 msg.seq = cb->nlh->nlmsg_seq;
189 195
190 err = __tipc_nl_add_net(&msg); 196 err = __tipc_nl_add_net(net, &msg);
191 if (err) 197 if (err)
192 goto out; 198 goto out;
193 199
@@ -200,8 +206,10 @@ out:
200 206
201int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info) 207int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
202{ 208{
203 int err; 209 struct net *net = sock_net(skb->sk);
210 struct tipc_net *tn = net_generic(net, tipc_net_id);
204 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1]; 211 struct nlattr *attrs[TIPC_NLA_NET_MAX + 1];
212 int err;
205 213
206 if (!info->attrs[TIPC_NLA_NET]) 214 if (!info->attrs[TIPC_NLA_NET])
207 return -EINVAL; 215 return -EINVAL;
@@ -216,21 +224,21 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
216 u32 val; 224 u32 val;
217 225
218 /* Can't change net id once TIPC has joined a network */ 226 /* Can't change net id once TIPC has joined a network */
219 if (tipc_own_addr) 227 if (tn->own_addr)
220 return -EPERM; 228 return -EPERM;
221 229
222 val = nla_get_u32(attrs[TIPC_NLA_NET_ID]); 230 val = nla_get_u32(attrs[TIPC_NLA_NET_ID]);
223 if (val < 1 || val > 9999) 231 if (val < 1 || val > 9999)
224 return -EINVAL; 232 return -EINVAL;
225 233
226 tipc_net_id = val; 234 tn->net_id = val;
227 } 235 }
228 236
229 if (attrs[TIPC_NLA_NET_ADDR]) { 237 if (attrs[TIPC_NLA_NET_ADDR]) {
230 u32 addr; 238 u32 addr;
231 239
232 /* Can't change net addr once TIPC has joined a network */ 240 /* Can't change net addr once TIPC has joined a network */
233 if (tipc_own_addr) 241 if (tn->own_addr)
234 return -EPERM; 242 return -EPERM;
235 243
236 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]); 244 addr = nla_get_u32(attrs[TIPC_NLA_NET_ADDR]);
@@ -238,7 +246,7 @@ int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info)
238 return -EINVAL; 246 return -EINVAL;
239 247
240 rtnl_lock(); 248 rtnl_lock();
241 tipc_net_start(addr); 249 tipc_net_start(net, addr);
242 rtnl_unlock(); 250 rtnl_unlock();
243 } 251 }
244 252
diff --git a/net/tipc/net.h b/net/tipc/net.h
index a81c1b9eb150..77a7a118911d 100644
--- a/net/tipc/net.h
+++ b/net/tipc/net.h
@@ -39,9 +39,9 @@
39 39
40#include <net/genetlink.h> 40#include <net/genetlink.h>
41 41
42int tipc_net_start(u32 addr); 42int tipc_net_start(struct net *net, u32 addr);
43 43
44void tipc_net_stop(void); 44void tipc_net_stop(struct net *net);
45 45
46int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb); 46int tipc_nl_net_dump(struct sk_buff *skb, struct netlink_callback *cb);
47int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info); 47int tipc_nl_net_set(struct sk_buff *skb, struct genl_info *info);
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index b891e3905bc4..7f6475efc984 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -35,7 +35,6 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h"
39#include "socket.h" 38#include "socket.h"
40#include "name_table.h" 39#include "name_table.h"
41#include "bearer.h" 40#include "bearer.h"
@@ -44,36 +43,6 @@
44#include "net.h" 43#include "net.h"
45#include <net/genetlink.h> 44#include <net/genetlink.h>
46 45
47static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
48{
49 struct sk_buff *rep_buf;
50 struct nlmsghdr *rep_nlh;
51 struct nlmsghdr *req_nlh = info->nlhdr;
52 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
53 int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
54 u16 cmd;
55
56 if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
57 cmd = TIPC_CMD_NOT_NET_ADMIN;
58 else
59 cmd = req_userhdr->cmd;
60
61 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd,
62 nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
63 nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
64 hdr_space);
65
66 if (rep_buf) {
67 skb_push(rep_buf, hdr_space);
68 rep_nlh = nlmsg_hdr(rep_buf);
69 memcpy(rep_nlh, req_nlh, hdr_space);
70 rep_nlh->nlmsg_len = rep_buf->len;
71 genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).portid);
72 }
73
74 return 0;
75}
76
77static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = { 46static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
78 [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, }, 47 [TIPC_NLA_UNSPEC] = { .type = NLA_UNSPEC, },
79 [TIPC_NLA_BEARER] = { .type = NLA_NESTED, }, 48 [TIPC_NLA_BEARER] = { .type = NLA_NESTED, },
@@ -86,32 +55,16 @@ static const struct nla_policy tipc_nl_policy[TIPC_NLA_MAX + 1] = {
86 [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, } 55 [TIPC_NLA_NAME_TABLE] = { .type = NLA_NESTED, }
87}; 56};
88 57
89/* Legacy ASCII API */
90static struct genl_family tipc_genl_family = {
91 .id = GENL_ID_GENERATE,
92 .name = TIPC_GENL_NAME,
93 .version = TIPC_GENL_VERSION,
94 .hdrsize = TIPC_GENL_HDRLEN,
95 .maxattr = 0,
96};
97
98/* Legacy ASCII API */
99static struct genl_ops tipc_genl_ops[] = {
100 {
101 .cmd = TIPC_GENL_CMD,
102 .doit = handle_cmd,
103 },
104};
105
106/* Users of the legacy API (tipc-config) can't handle that we add operations, 58/* Users of the legacy API (tipc-config) can't handle that we add operations,
107 * so we have a separate genl handling for the new API. 59 * so we have a separate genl handling for the new API.
108 */ 60 */
109struct genl_family tipc_genl_v2_family = { 61struct genl_family tipc_genl_family = {
110 .id = GENL_ID_GENERATE, 62 .id = GENL_ID_GENERATE,
111 .name = TIPC_GENL_V2_NAME, 63 .name = TIPC_GENL_V2_NAME,
112 .version = TIPC_GENL_V2_VERSION, 64 .version = TIPC_GENL_V2_VERSION,
113 .hdrsize = 0, 65 .hdrsize = 0,
114 .maxattr = TIPC_NLA_MAX, 66 .maxattr = TIPC_NLA_MAX,
67 .netnsok = true,
115}; 68};
116 69
117static const struct genl_ops tipc_genl_v2_ops[] = { 70static const struct genl_ops tipc_genl_v2_ops[] = {
@@ -197,9 +150,9 @@ static const struct genl_ops tipc_genl_v2_ops[] = {
197 150
198int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr) 151int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***attr)
199{ 152{
200 u32 maxattr = tipc_genl_v2_family.maxattr; 153 u32 maxattr = tipc_genl_family.maxattr;
201 154
202 *attr = tipc_genl_v2_family.attrbuf; 155 *attr = tipc_genl_family.attrbuf;
203 if (!*attr) 156 if (!*attr)
204 return -EOPNOTSUPP; 157 return -EOPNOTSUPP;
205 158
@@ -210,13 +163,7 @@ int tipc_netlink_start(void)
210{ 163{
211 int res; 164 int res;
212 165
213 res = genl_register_family_with_ops(&tipc_genl_family, tipc_genl_ops); 166 res = genl_register_family_with_ops(&tipc_genl_family,
214 if (res) {
215 pr_err("Failed to register legacy interface\n");
216 return res;
217 }
218
219 res = genl_register_family_with_ops(&tipc_genl_v2_family,
220 tipc_genl_v2_ops); 167 tipc_genl_v2_ops);
221 if (res) { 168 if (res) {
222 pr_err("Failed to register netlink interface\n"); 169 pr_err("Failed to register netlink interface\n");
@@ -228,5 +175,4 @@ int tipc_netlink_start(void)
228void tipc_netlink_stop(void) 175void tipc_netlink_stop(void)
229{ 176{
230 genl_unregister_family(&tipc_genl_family); 177 genl_unregister_family(&tipc_genl_family);
231 genl_unregister_family(&tipc_genl_v2_family);
232} 178}
diff --git a/net/tipc/netlink.h b/net/tipc/netlink.h
index 1425c6869de0..08a1db67b927 100644
--- a/net/tipc/netlink.h
+++ b/net/tipc/netlink.h
@@ -36,7 +36,7 @@
36#ifndef _TIPC_NETLINK_H 36#ifndef _TIPC_NETLINK_H
37#define _TIPC_NETLINK_H 37#define _TIPC_NETLINK_H
38 38
39extern struct genl_family tipc_genl_v2_family; 39extern struct genl_family tipc_genl_family;
40int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf); 40int tipc_nlmsg_parse(const struct nlmsghdr *nlh, struct nlattr ***buf);
41 41
42struct tipc_nl_msg { 42struct tipc_nl_msg {
@@ -45,4 +45,9 @@ struct tipc_nl_msg {
45 u32 seq; 45 u32 seq;
46}; 46};
47 47
48int tipc_netlink_start(void);
49int tipc_netlink_compat_start(void);
50void tipc_netlink_stop(void);
51void tipc_netlink_compat_stop(void);
52
48#endif 53#endif
diff --git a/net/tipc/netlink_compat.c b/net/tipc/netlink_compat.c
new file mode 100644
index 000000000000..ce9121e8e990
--- /dev/null
+++ b/net/tipc/netlink_compat.c
@@ -0,0 +1,1084 @@
1/*
2 * Copyright (c) 2014, Ericsson AB
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the names of the copyright holders nor the names of its
14 * contributors may be used to endorse or promote products derived from
15 * this software without specific prior written permission.
16 *
17 * Alternatively, this software may be distributed under the terms of the
18 * GNU General Public License ("GPL") version 2 as published by the Free
19 * Software Foundation.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
22 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
25 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
26 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
29 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
31 * POSSIBILITY OF SUCH DAMAGE.
32 */
33
34#include "core.h"
35#include "bearer.h"
36#include "link.h"
37#include "name_table.h"
38#include "socket.h"
39#include "node.h"
40#include "net.h"
41#include <net/genetlink.h>
42#include <linux/tipc_config.h>
43
44/* The legacy API had an artificial message length limit called
45 * ULTRA_STRING_MAX_LEN.
46 */
47#define ULTRA_STRING_MAX_LEN 32768
48
49#define TIPC_SKB_MAX TLV_SPACE(ULTRA_STRING_MAX_LEN)
50
51#define REPLY_TRUNCATED "<truncated>\n"
52
53struct tipc_nl_compat_msg {
54 u16 cmd;
55 int rep_type;
56 int rep_size;
57 int req_type;
58 struct sk_buff *rep;
59 struct tlv_desc *req;
60 struct sock *dst_sk;
61};
62
63struct tipc_nl_compat_cmd_dump {
64 int (*header)(struct tipc_nl_compat_msg *);
65 int (*dumpit)(struct sk_buff *, struct netlink_callback *);
66 int (*format)(struct tipc_nl_compat_msg *msg, struct nlattr **attrs);
67};
68
69struct tipc_nl_compat_cmd_doit {
70 int (*doit)(struct sk_buff *skb, struct genl_info *info);
71 int (*transcode)(struct sk_buff *skb, struct tipc_nl_compat_msg *msg);
72};
73
74static int tipc_skb_tailroom(struct sk_buff *skb)
75{
76 int tailroom;
77 int limit;
78
79 tailroom = skb_tailroom(skb);
80 limit = TIPC_SKB_MAX - skb->len;
81
82 if (tailroom < limit)
83 return tailroom;
84
85 return limit;
86}
87
88static int tipc_add_tlv(struct sk_buff *skb, u16 type, void *data, u16 len)
89{
90 struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(skb);
91
92 if (tipc_skb_tailroom(skb) < TLV_SPACE(len))
93 return -EMSGSIZE;
94
95 skb_put(skb, TLV_SPACE(len));
96 tlv->tlv_type = htons(type);
97 tlv->tlv_len = htons(TLV_LENGTH(len));
98 if (len && data)
99 memcpy(TLV_DATA(tlv), data, len);
100
101 return 0;
102}
103
104static void tipc_tlv_init(struct sk_buff *skb, u16 type)
105{
106 struct tlv_desc *tlv = (struct tlv_desc *)skb->data;
107
108 TLV_SET_LEN(tlv, 0);
109 TLV_SET_TYPE(tlv, type);
110 skb_put(skb, sizeof(struct tlv_desc));
111}
112
113static int tipc_tlv_sprintf(struct sk_buff *skb, const char *fmt, ...)
114{
115 int n;
116 u16 len;
117 u32 rem;
118 char *buf;
119 struct tlv_desc *tlv;
120 va_list args;
121
122 rem = tipc_skb_tailroom(skb);
123
124 tlv = (struct tlv_desc *)skb->data;
125 len = TLV_GET_LEN(tlv);
126 buf = TLV_DATA(tlv) + len;
127
128 va_start(args, fmt);
129 n = vscnprintf(buf, rem, fmt, args);
130 va_end(args);
131
132 TLV_SET_LEN(tlv, n + len);
133 skb_put(skb, n);
134
135 return n;
136}
137
138static struct sk_buff *tipc_tlv_alloc(int size)
139{
140 int hdr_len;
141 struct sk_buff *buf;
142
143 size = TLV_SPACE(size);
144 hdr_len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
145
146 buf = alloc_skb(hdr_len + size, GFP_KERNEL);
147 if (!buf)
148 return NULL;
149
150 skb_reserve(buf, hdr_len);
151
152 return buf;
153}
154
155static struct sk_buff *tipc_get_err_tlv(char *str)
156{
157 int str_len = strlen(str) + 1;
158 struct sk_buff *buf;
159
160 buf = tipc_tlv_alloc(TLV_SPACE(str_len));
161 if (buf)
162 tipc_add_tlv(buf, TIPC_TLV_ERROR_STRING, str, str_len);
163
164 return buf;
165}
166
167static int __tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
168 struct tipc_nl_compat_msg *msg,
169 struct sk_buff *arg)
170{
171 int len = 0;
172 int err;
173 struct sk_buff *buf;
174 struct nlmsghdr *nlmsg;
175 struct netlink_callback cb;
176
177 memset(&cb, 0, sizeof(cb));
178 cb.nlh = (struct nlmsghdr *)arg->data;
179 cb.skb = arg;
180
181 buf = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
182 if (!buf)
183 return -ENOMEM;
184
185 buf->sk = msg->dst_sk;
186
187 do {
188 int rem;
189
190 len = (*cmd->dumpit)(buf, &cb);
191
192 nlmsg_for_each_msg(nlmsg, nlmsg_hdr(buf), len, rem) {
193 struct nlattr **attrs;
194
195 err = tipc_nlmsg_parse(nlmsg, &attrs);
196 if (err)
197 goto err_out;
198
199 err = (*cmd->format)(msg, attrs);
200 if (err)
201 goto err_out;
202
203 if (tipc_skb_tailroom(msg->rep) <= 1) {
204 err = -EMSGSIZE;
205 goto err_out;
206 }
207 }
208
209 skb_reset_tail_pointer(buf);
210 buf->len = 0;
211
212 } while (len);
213
214 err = 0;
215
216err_out:
217 kfree_skb(buf);
218
219 if (err == -EMSGSIZE) {
220 /* The legacy API only considered messages filling
221 * "ULTRA_STRING_MAX_LEN" to be truncated.
222 */
223 if ((TIPC_SKB_MAX - msg->rep->len) <= 1) {
224 char *tail = skb_tail_pointer(msg->rep);
225
226 if (*tail != '\0')
227 sprintf(tail - sizeof(REPLY_TRUNCATED) - 1,
228 REPLY_TRUNCATED);
229 }
230
231 return 0;
232 }
233
234 return err;
235}
236
237static int tipc_nl_compat_dumpit(struct tipc_nl_compat_cmd_dump *cmd,
238 struct tipc_nl_compat_msg *msg)
239{
240 int err;
241 struct sk_buff *arg;
242
243 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
244 return -EINVAL;
245
246 msg->rep = tipc_tlv_alloc(msg->rep_size);
247 if (!msg->rep)
248 return -ENOMEM;
249
250 if (msg->rep_type)
251 tipc_tlv_init(msg->rep, msg->rep_type);
252
253 if (cmd->header)
254 (*cmd->header)(msg);
255
256 arg = nlmsg_new(0, GFP_KERNEL);
257 if (!arg) {
258 kfree_skb(msg->rep);
259 return -ENOMEM;
260 }
261
262 err = __tipc_nl_compat_dumpit(cmd, msg, arg);
263 if (err)
264 kfree_skb(msg->rep);
265
266 kfree_skb(arg);
267
268 return err;
269}
270
271static int __tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
272 struct tipc_nl_compat_msg *msg)
273{
274 int err;
275 struct sk_buff *doit_buf;
276 struct sk_buff *trans_buf;
277 struct nlattr **attrbuf;
278 struct genl_info info;
279
280 trans_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
281 if (!trans_buf)
282 return -ENOMEM;
283
284 err = (*cmd->transcode)(trans_buf, msg);
285 if (err)
286 goto trans_out;
287
288 attrbuf = kmalloc((tipc_genl_family.maxattr + 1) *
289 sizeof(struct nlattr *), GFP_KERNEL);
290 if (!attrbuf) {
291 err = -ENOMEM;
292 goto trans_out;
293 }
294
295 err = nla_parse(attrbuf, tipc_genl_family.maxattr,
296 (const struct nlattr *)trans_buf->data,
297 trans_buf->len, NULL);
298 if (err)
299 goto parse_out;
300
301 doit_buf = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
302 if (!doit_buf) {
303 err = -ENOMEM;
304 goto parse_out;
305 }
306
307 doit_buf->sk = msg->dst_sk;
308
309 memset(&info, 0, sizeof(info));
310 info.attrs = attrbuf;
311
312 err = (*cmd->doit)(doit_buf, &info);
313
314 kfree_skb(doit_buf);
315parse_out:
316 kfree(attrbuf);
317trans_out:
318 kfree_skb(trans_buf);
319
320 return err;
321}
322
323static int tipc_nl_compat_doit(struct tipc_nl_compat_cmd_doit *cmd,
324 struct tipc_nl_compat_msg *msg)
325{
326 int err;
327
328 if (msg->req_type && !TLV_CHECK_TYPE(msg->req, msg->req_type))
329 return -EINVAL;
330
331 err = __tipc_nl_compat_doit(cmd, msg);
332 if (err)
333 return err;
334
335 /* The legacy API considered an empty message a success message */
336 msg->rep = tipc_tlv_alloc(0);
337 if (!msg->rep)
338 return -ENOMEM;
339
340 return 0;
341}
342
343static int tipc_nl_compat_bearer_dump(struct tipc_nl_compat_msg *msg,
344 struct nlattr **attrs)
345{
346 struct nlattr *bearer[TIPC_NLA_BEARER_MAX + 1];
347
348 nla_parse_nested(bearer, TIPC_NLA_BEARER_MAX, attrs[TIPC_NLA_BEARER],
349 NULL);
350
351 return tipc_add_tlv(msg->rep, TIPC_TLV_BEARER_NAME,
352 nla_data(bearer[TIPC_NLA_BEARER_NAME]),
353 nla_len(bearer[TIPC_NLA_BEARER_NAME]));
354}
355
356static int tipc_nl_compat_bearer_enable(struct sk_buff *skb,
357 struct tipc_nl_compat_msg *msg)
358{
359 struct nlattr *prop;
360 struct nlattr *bearer;
361 struct tipc_bearer_config *b;
362
363 b = (struct tipc_bearer_config *)TLV_DATA(msg->req);
364
365 bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
366 if (!bearer)
367 return -EMSGSIZE;
368
369 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, b->name))
370 return -EMSGSIZE;
371
372 if (nla_put_u32(skb, TIPC_NLA_BEARER_DOMAIN, ntohl(b->disc_domain)))
373 return -EMSGSIZE;
374
375 if (ntohl(b->priority) <= TIPC_MAX_LINK_PRI) {
376 prop = nla_nest_start(skb, TIPC_NLA_BEARER_PROP);
377 if (!prop)
378 return -EMSGSIZE;
379 if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(b->priority)))
380 return -EMSGSIZE;
381 nla_nest_end(skb, prop);
382 }
383 nla_nest_end(skb, bearer);
384
385 return 0;
386}
387
388static int tipc_nl_compat_bearer_disable(struct sk_buff *skb,
389 struct tipc_nl_compat_msg *msg)
390{
391 char *name;
392 struct nlattr *bearer;
393
394 name = (char *)TLV_DATA(msg->req);
395
396 bearer = nla_nest_start(skb, TIPC_NLA_BEARER);
397 if (!bearer)
398 return -EMSGSIZE;
399
400 if (nla_put_string(skb, TIPC_NLA_BEARER_NAME, name))
401 return -EMSGSIZE;
402
403 nla_nest_end(skb, bearer);
404
405 return 0;
406}
407
408static inline u32 perc(u32 count, u32 total)
409{
410 return (count * 100 + (total / 2)) / total;
411}
412
413static void __fill_bc_link_stat(struct tipc_nl_compat_msg *msg,
414 struct nlattr *prop[], struct nlattr *stats[])
415{
416 tipc_tlv_sprintf(msg->rep, " Window:%u packets\n",
417 nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
418
419 tipc_tlv_sprintf(msg->rep,
420 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
421 nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
422 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
423 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
424 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
425 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
426
427 tipc_tlv_sprintf(msg->rep,
428 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
429 nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
430 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
431 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
432 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
433 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
434
435 tipc_tlv_sprintf(msg->rep, " RX naks:%u defs:%u dups:%u\n",
436 nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
437 nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
438 nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
439
440 tipc_tlv_sprintf(msg->rep, " TX naks:%u acks:%u dups:%u\n",
441 nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
442 nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
443 nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
444
445 tipc_tlv_sprintf(msg->rep,
446 " Congestion link:%u Send queue max:%u avg:%u",
447 nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
448 nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
449 nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
450}
451
452static int tipc_nl_compat_link_stat_dump(struct tipc_nl_compat_msg *msg,
453 struct nlattr **attrs)
454{
455 char *name;
456 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
457 struct nlattr *prop[TIPC_NLA_PROP_MAX + 1];
458 struct nlattr *stats[TIPC_NLA_STATS_MAX + 1];
459
460 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
461
462 nla_parse_nested(prop, TIPC_NLA_PROP_MAX, link[TIPC_NLA_LINK_PROP],
463 NULL);
464
465 nla_parse_nested(stats, TIPC_NLA_STATS_MAX, link[TIPC_NLA_LINK_STATS],
466 NULL);
467
468 name = (char *)TLV_DATA(msg->req);
469 if (strcmp(name, nla_data(link[TIPC_NLA_LINK_NAME])) != 0)
470 return 0;
471
472 tipc_tlv_sprintf(msg->rep, "\nLink <%s>\n",
473 nla_data(link[TIPC_NLA_LINK_NAME]));
474
475 if (link[TIPC_NLA_LINK_BROADCAST]) {
476 __fill_bc_link_stat(msg, prop, stats);
477 return 0;
478 }
479
480 if (link[TIPC_NLA_LINK_ACTIVE])
481 tipc_tlv_sprintf(msg->rep, " ACTIVE");
482 else if (link[TIPC_NLA_LINK_UP])
483 tipc_tlv_sprintf(msg->rep, " STANDBY");
484 else
485 tipc_tlv_sprintf(msg->rep, " DEFUNCT");
486
487 tipc_tlv_sprintf(msg->rep, " MTU:%u Priority:%u",
488 nla_get_u32(link[TIPC_NLA_LINK_MTU]),
489 nla_get_u32(prop[TIPC_NLA_PROP_PRIO]));
490
491 tipc_tlv_sprintf(msg->rep, " Tolerance:%u ms Window:%u packets\n",
492 nla_get_u32(prop[TIPC_NLA_PROP_TOL]),
493 nla_get_u32(prop[TIPC_NLA_PROP_WIN]));
494
495 tipc_tlv_sprintf(msg->rep,
496 " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
497 nla_get_u32(link[TIPC_NLA_LINK_RX]) -
498 nla_get_u32(stats[TIPC_NLA_STATS_RX_INFO]),
499 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTS]),
500 nla_get_u32(stats[TIPC_NLA_STATS_RX_FRAGMENTED]),
501 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLES]),
502 nla_get_u32(stats[TIPC_NLA_STATS_RX_BUNDLED]));
503
504 tipc_tlv_sprintf(msg->rep,
505 " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
506 nla_get_u32(link[TIPC_NLA_LINK_TX]) -
507 nla_get_u32(stats[TIPC_NLA_STATS_TX_INFO]),
508 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTS]),
509 nla_get_u32(stats[TIPC_NLA_STATS_TX_FRAGMENTED]),
510 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLES]),
511 nla_get_u32(stats[TIPC_NLA_STATS_TX_BUNDLED]));
512
513 tipc_tlv_sprintf(msg->rep,
514 " TX profile sample:%u packets average:%u octets\n",
515 nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_CNT]),
516 nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_TOT]) /
517 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT]));
518
519 tipc_tlv_sprintf(msg->rep,
520 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% ",
521 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P0]),
522 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
523 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P1]),
524 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
525 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P2]),
526 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
527 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P3]),
528 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
529
530 tipc_tlv_sprintf(msg->rep, "-16384:%u%% -32768:%u%% -66000:%u%%\n",
531 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P4]),
532 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
533 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P5]),
534 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])),
535 perc(nla_get_u32(stats[TIPC_NLA_STATS_MSG_LEN_P6]),
536 nla_get_u32(stats[TIPC_NLA_STATS_MSG_PROF_TOT])));
537
538 tipc_tlv_sprintf(msg->rep,
539 " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
540 nla_get_u32(stats[TIPC_NLA_STATS_RX_STATES]),
541 nla_get_u32(stats[TIPC_NLA_STATS_RX_PROBES]),
542 nla_get_u32(stats[TIPC_NLA_STATS_RX_NACKS]),
543 nla_get_u32(stats[TIPC_NLA_STATS_RX_DEFERRED]),
544 nla_get_u32(stats[TIPC_NLA_STATS_DUPLICATES]));
545
546 tipc_tlv_sprintf(msg->rep,
547 " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
548 nla_get_u32(stats[TIPC_NLA_STATS_TX_STATES]),
549 nla_get_u32(stats[TIPC_NLA_STATS_TX_PROBES]),
550 nla_get_u32(stats[TIPC_NLA_STATS_TX_NACKS]),
551 nla_get_u32(stats[TIPC_NLA_STATS_TX_ACKS]),
552 nla_get_u32(stats[TIPC_NLA_STATS_RETRANSMITTED]));
553
554 tipc_tlv_sprintf(msg->rep,
555 " Congestion link:%u Send queue max:%u avg:%u",
556 nla_get_u32(stats[TIPC_NLA_STATS_LINK_CONGS]),
557 nla_get_u32(stats[TIPC_NLA_STATS_MAX_QUEUE]),
558 nla_get_u32(stats[TIPC_NLA_STATS_AVG_QUEUE]));
559
560 return 0;
561}
562
563static int tipc_nl_compat_link_dump(struct tipc_nl_compat_msg *msg,
564 struct nlattr **attrs)
565{
566 struct nlattr *link[TIPC_NLA_LINK_MAX + 1];
567 struct tipc_link_info link_info;
568
569 nla_parse_nested(link, TIPC_NLA_LINK_MAX, attrs[TIPC_NLA_LINK], NULL);
570
571 link_info.dest = nla_get_flag(link[TIPC_NLA_LINK_DEST]);
572 link_info.up = htonl(nla_get_flag(link[TIPC_NLA_LINK_UP]));
573 strcpy(link_info.str, nla_data(link[TIPC_NLA_LINK_NAME]));
574
575 return tipc_add_tlv(msg->rep, TIPC_TLV_LINK_INFO,
576 &link_info, sizeof(link_info));
577}
578
579static int tipc_nl_compat_link_set(struct sk_buff *skb,
580 struct tipc_nl_compat_msg *msg)
581{
582 struct nlattr *link;
583 struct nlattr *prop;
584 struct tipc_link_config *lc;
585
586 lc = (struct tipc_link_config *)TLV_DATA(msg->req);
587
588 link = nla_nest_start(skb, TIPC_NLA_LINK);
589 if (!link)
590 return -EMSGSIZE;
591
592 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, lc->name))
593 return -EMSGSIZE;
594
595 prop = nla_nest_start(skb, TIPC_NLA_LINK_PROP);
596 if (!prop)
597 return -EMSGSIZE;
598
599 if (msg->cmd == TIPC_CMD_SET_LINK_PRI) {
600 if (nla_put_u32(skb, TIPC_NLA_PROP_PRIO, ntohl(lc->value)))
601 return -EMSGSIZE;
602 } else if (msg->cmd == TIPC_CMD_SET_LINK_TOL) {
603 if (nla_put_u32(skb, TIPC_NLA_PROP_TOL, ntohl(lc->value)))
604 return -EMSGSIZE;
605 } else if (msg->cmd == TIPC_CMD_SET_LINK_WINDOW) {
606 if (nla_put_u32(skb, TIPC_NLA_PROP_WIN, ntohl(lc->value)))
607 return -EMSGSIZE;
608 }
609
610 nla_nest_end(skb, prop);
611 nla_nest_end(skb, link);
612
613 return 0;
614}
615
616static int tipc_nl_compat_link_reset_stats(struct sk_buff *skb,
617 struct tipc_nl_compat_msg *msg)
618{
619 char *name;
620 struct nlattr *link;
621
622 name = (char *)TLV_DATA(msg->req);
623
624 link = nla_nest_start(skb, TIPC_NLA_LINK);
625 if (!link)
626 return -EMSGSIZE;
627
628 if (nla_put_string(skb, TIPC_NLA_LINK_NAME, name))
629 return -EMSGSIZE;
630
631 nla_nest_end(skb, link);
632
633 return 0;
634}
635
636static int tipc_nl_compat_name_table_dump_header(struct tipc_nl_compat_msg *msg)
637{
638 int i;
639 u32 depth;
640 struct tipc_name_table_query *ntq;
641 static const char * const header[] = {
642 "Type ",
643 "Lower Upper ",
644 "Port Identity ",
645 "Publication Scope"
646 };
647
648 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
649
650 depth = ntohl(ntq->depth);
651
652 if (depth > 4)
653 depth = 4;
654 for (i = 0; i < depth; i++)
655 tipc_tlv_sprintf(msg->rep, header[i]);
656 tipc_tlv_sprintf(msg->rep, "\n");
657
658 return 0;
659}
660
661static int tipc_nl_compat_name_table_dump(struct tipc_nl_compat_msg *msg,
662 struct nlattr **attrs)
663{
664 char port_str[27];
665 struct tipc_name_table_query *ntq;
666 struct nlattr *nt[TIPC_NLA_NAME_TABLE_MAX + 1];
667 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
668 u32 node, depth, type, lowbound, upbound;
669 static const char * const scope_str[] = {"", " zone", " cluster",
670 " node"};
671
672 nla_parse_nested(nt, TIPC_NLA_NAME_TABLE_MAX,
673 attrs[TIPC_NLA_NAME_TABLE], NULL);
674
675 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, nt[TIPC_NLA_NAME_TABLE_PUBL],
676 NULL);
677
678 ntq = (struct tipc_name_table_query *)TLV_DATA(msg->req);
679
680 depth = ntohl(ntq->depth);
681 type = ntohl(ntq->type);
682 lowbound = ntohl(ntq->lowbound);
683 upbound = ntohl(ntq->upbound);
684
685 if (!(depth & TIPC_NTQ_ALLTYPES) &&
686 (type != nla_get_u32(publ[TIPC_NLA_PUBL_TYPE])))
687 return 0;
688 if (lowbound && (lowbound > nla_get_u32(publ[TIPC_NLA_PUBL_UPPER])))
689 return 0;
690 if (upbound && (upbound < nla_get_u32(publ[TIPC_NLA_PUBL_LOWER])))
691 return 0;
692
693 tipc_tlv_sprintf(msg->rep, "%-10u ",
694 nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]));
695
696 if (depth == 1)
697 goto out;
698
699 tipc_tlv_sprintf(msg->rep, "%-10u %-10u ",
700 nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]),
701 nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]));
702
703 if (depth == 2)
704 goto out;
705
706 node = nla_get_u32(publ[TIPC_NLA_PUBL_NODE]);
707 sprintf(port_str, "<%u.%u.%u:%u>", tipc_zone(node), tipc_cluster(node),
708 tipc_node(node), nla_get_u32(publ[TIPC_NLA_PUBL_REF]));
709 tipc_tlv_sprintf(msg->rep, "%-26s ", port_str);
710
711 if (depth == 3)
712 goto out;
713
714 tipc_tlv_sprintf(msg->rep, "%-10u %s",
715 nla_get_u32(publ[TIPC_NLA_PUBL_REF]),
716 scope_str[nla_get_u32(publ[TIPC_NLA_PUBL_SCOPE])]);
717out:
718 tipc_tlv_sprintf(msg->rep, "\n");
719
720 return 0;
721}
722
723static int __tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg,
724 struct nlattr **attrs)
725{
726 u32 type, lower, upper;
727 struct nlattr *publ[TIPC_NLA_PUBL_MAX + 1];
728
729 nla_parse_nested(publ, TIPC_NLA_PUBL_MAX, attrs[TIPC_NLA_PUBL], NULL);
730
731 type = nla_get_u32(publ[TIPC_NLA_PUBL_TYPE]);
732 lower = nla_get_u32(publ[TIPC_NLA_PUBL_LOWER]);
733 upper = nla_get_u32(publ[TIPC_NLA_PUBL_UPPER]);
734
735 if (lower == upper)
736 tipc_tlv_sprintf(msg->rep, " {%u,%u}", type, lower);
737 else
738 tipc_tlv_sprintf(msg->rep, " {%u,%u,%u}", type, lower, upper);
739
740 return 0;
741}
742
743static int tipc_nl_compat_publ_dump(struct tipc_nl_compat_msg *msg, u32 sock)
744{
745 int err;
746 void *hdr;
747 struct nlattr *nest;
748 struct sk_buff *args;
749 struct tipc_nl_compat_cmd_dump dump;
750
751 args = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
752 if (!args)
753 return -ENOMEM;
754
755 hdr = genlmsg_put(args, 0, 0, &tipc_genl_family, NLM_F_MULTI,
756 TIPC_NL_PUBL_GET);
757
758 nest = nla_nest_start(args, TIPC_NLA_SOCK);
759 if (!nest) {
760 kfree_skb(args);
761 return -EMSGSIZE;
762 }
763
764 if (nla_put_u32(args, TIPC_NLA_SOCK_REF, sock)) {
765 kfree_skb(args);
766 return -EMSGSIZE;
767 }
768
769 nla_nest_end(args, nest);
770 genlmsg_end(args, hdr);
771
772 dump.dumpit = tipc_nl_publ_dump;
773 dump.format = __tipc_nl_compat_publ_dump;
774
775 err = __tipc_nl_compat_dumpit(&dump, msg, args);
776
777 kfree_skb(args);
778
779 return err;
780}
781
782static int tipc_nl_compat_sk_dump(struct tipc_nl_compat_msg *msg,
783 struct nlattr **attrs)
784{
785 int err;
786 u32 sock_ref;
787 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
788
789 nla_parse_nested(sock, TIPC_NLA_SOCK_MAX, attrs[TIPC_NLA_SOCK], NULL);
790
791 sock_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
792 tipc_tlv_sprintf(msg->rep, "%u:", sock_ref);
793
794 if (sock[TIPC_NLA_SOCK_CON]) {
795 u32 node;
796 struct nlattr *con[TIPC_NLA_CON_MAX + 1];
797
798 nla_parse_nested(con, TIPC_NLA_CON_MAX, sock[TIPC_NLA_SOCK_CON],
799 NULL);
800
801 node = nla_get_u32(con[TIPC_NLA_CON_NODE]);
802 tipc_tlv_sprintf(msg->rep, " connected to <%u.%u.%u:%u>",
803 tipc_zone(node),
804 tipc_cluster(node),
805 tipc_node(node),
806 nla_get_u32(con[TIPC_NLA_CON_SOCK]));
807
808 if (con[TIPC_NLA_CON_FLAG])
809 tipc_tlv_sprintf(msg->rep, " via {%u,%u}\n",
810 nla_get_u32(con[TIPC_NLA_CON_TYPE]),
811 nla_get_u32(con[TIPC_NLA_CON_INST]));
812 else
813 tipc_tlv_sprintf(msg->rep, "\n");
814 } else if (sock[TIPC_NLA_SOCK_HAS_PUBL]) {
815 tipc_tlv_sprintf(msg->rep, " bound to");
816
817 err = tipc_nl_compat_publ_dump(msg, sock_ref);
818 if (err)
819 return err;
820 }
821 tipc_tlv_sprintf(msg->rep, "\n");
822
823 return 0;
824}
825
826static int tipc_nl_compat_media_dump(struct tipc_nl_compat_msg *msg,
827 struct nlattr **attrs)
828{
829 struct nlattr *media[TIPC_NLA_MEDIA_MAX + 1];
830
831 nla_parse_nested(media, TIPC_NLA_MEDIA_MAX, attrs[TIPC_NLA_MEDIA],
832 NULL);
833
834 return tipc_add_tlv(msg->rep, TIPC_TLV_MEDIA_NAME,
835 nla_data(media[TIPC_NLA_MEDIA_NAME]),
836 nla_len(media[TIPC_NLA_MEDIA_NAME]));
837}
838
839static int tipc_nl_compat_node_dump(struct tipc_nl_compat_msg *msg,
840 struct nlattr **attrs)
841{
842 struct tipc_node_info node_info;
843 struct nlattr *node[TIPC_NLA_NODE_MAX + 1];
844
845 nla_parse_nested(node, TIPC_NLA_NODE_MAX, attrs[TIPC_NLA_NODE], NULL);
846
847 node_info.addr = htonl(nla_get_u32(node[TIPC_NLA_NODE_ADDR]));
848 node_info.up = htonl(nla_get_flag(node[TIPC_NLA_NODE_UP]));
849
850 return tipc_add_tlv(msg->rep, TIPC_TLV_NODE_INFO, &node_info,
851 sizeof(node_info));
852}
853
854static int tipc_nl_compat_net_set(struct sk_buff *skb,
855 struct tipc_nl_compat_msg *msg)
856{
857 u32 val;
858 struct nlattr *net;
859
860 val = ntohl(*(__be32 *)TLV_DATA(msg->req));
861
862 net = nla_nest_start(skb, TIPC_NLA_NET);
863 if (!net)
864 return -EMSGSIZE;
865
866 if (msg->cmd == TIPC_CMD_SET_NODE_ADDR) {
867 if (nla_put_u32(skb, TIPC_NLA_NET_ADDR, val))
868 return -EMSGSIZE;
869 } else if (msg->cmd == TIPC_CMD_SET_NETID) {
870 if (nla_put_u32(skb, TIPC_NLA_NET_ID, val))
871 return -EMSGSIZE;
872 }
873 nla_nest_end(skb, net);
874
875 return 0;
876}
877
878static int tipc_nl_compat_net_dump(struct tipc_nl_compat_msg *msg,
879 struct nlattr **attrs)
880{
881 __be32 id;
882 struct nlattr *net[TIPC_NLA_NET_MAX + 1];
883
884 nla_parse_nested(net, TIPC_NLA_NET_MAX, attrs[TIPC_NLA_NET], NULL);
885 id = htonl(nla_get_u32(net[TIPC_NLA_NET_ID]));
886
887 return tipc_add_tlv(msg->rep, TIPC_TLV_UNSIGNED, &id, sizeof(id));
888}
889
890static int tipc_cmd_show_stats_compat(struct tipc_nl_compat_msg *msg)
891{
892 msg->rep = tipc_tlv_alloc(ULTRA_STRING_MAX_LEN);
893 if (!msg->rep)
894 return -ENOMEM;
895
896 tipc_tlv_init(msg->rep, TIPC_TLV_ULTRA_STRING);
897 tipc_tlv_sprintf(msg->rep, "TIPC version " TIPC_MOD_VER "\n");
898
899 return 0;
900}
901
902static int tipc_nl_compat_handle(struct tipc_nl_compat_msg *msg)
903{
904 struct tipc_nl_compat_cmd_dump dump;
905 struct tipc_nl_compat_cmd_doit doit;
906
907 memset(&dump, 0, sizeof(dump));
908 memset(&doit, 0, sizeof(doit));
909
910 switch (msg->cmd) {
911 case TIPC_CMD_NOOP:
912 msg->rep = tipc_tlv_alloc(0);
913 if (!msg->rep)
914 return -ENOMEM;
915 return 0;
916 case TIPC_CMD_GET_BEARER_NAMES:
917 msg->rep_size = MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME);
918 dump.dumpit = tipc_nl_bearer_dump;
919 dump.format = tipc_nl_compat_bearer_dump;
920 return tipc_nl_compat_dumpit(&dump, msg);
921 case TIPC_CMD_ENABLE_BEARER:
922 msg->req_type = TIPC_TLV_BEARER_CONFIG;
923 doit.doit = tipc_nl_bearer_enable;
924 doit.transcode = tipc_nl_compat_bearer_enable;
925 return tipc_nl_compat_doit(&doit, msg);
926 case TIPC_CMD_DISABLE_BEARER:
927 msg->req_type = TIPC_TLV_BEARER_NAME;
928 doit.doit = tipc_nl_bearer_disable;
929 doit.transcode = tipc_nl_compat_bearer_disable;
930 return tipc_nl_compat_doit(&doit, msg);
931 case TIPC_CMD_SHOW_LINK_STATS:
932 msg->req_type = TIPC_TLV_LINK_NAME;
933 msg->rep_size = ULTRA_STRING_MAX_LEN;
934 msg->rep_type = TIPC_TLV_ULTRA_STRING;
935 dump.dumpit = tipc_nl_link_dump;
936 dump.format = tipc_nl_compat_link_stat_dump;
937 return tipc_nl_compat_dumpit(&dump, msg);
938 case TIPC_CMD_GET_LINKS:
939 msg->req_type = TIPC_TLV_NET_ADDR;
940 msg->rep_size = ULTRA_STRING_MAX_LEN;
941 dump.dumpit = tipc_nl_link_dump;
942 dump.format = tipc_nl_compat_link_dump;
943 return tipc_nl_compat_dumpit(&dump, msg);
944 case TIPC_CMD_SET_LINK_TOL:
945 case TIPC_CMD_SET_LINK_PRI:
946 case TIPC_CMD_SET_LINK_WINDOW:
947 msg->req_type = TIPC_TLV_LINK_CONFIG;
948 doit.doit = tipc_nl_link_set;
949 doit.transcode = tipc_nl_compat_link_set;
950 return tipc_nl_compat_doit(&doit, msg);
951 case TIPC_CMD_RESET_LINK_STATS:
952 msg->req_type = TIPC_TLV_LINK_NAME;
953 doit.doit = tipc_nl_link_reset_stats;
954 doit.transcode = tipc_nl_compat_link_reset_stats;
955 return tipc_nl_compat_doit(&doit, msg);
956 case TIPC_CMD_SHOW_NAME_TABLE:
957 msg->req_type = TIPC_TLV_NAME_TBL_QUERY;
958 msg->rep_size = ULTRA_STRING_MAX_LEN;
959 msg->rep_type = TIPC_TLV_ULTRA_STRING;
960 dump.header = tipc_nl_compat_name_table_dump_header;
961 dump.dumpit = tipc_nl_name_table_dump;
962 dump.format = tipc_nl_compat_name_table_dump;
963 return tipc_nl_compat_dumpit(&dump, msg);
964 case TIPC_CMD_SHOW_PORTS:
965 msg->rep_size = ULTRA_STRING_MAX_LEN;
966 msg->rep_type = TIPC_TLV_ULTRA_STRING;
967 dump.dumpit = tipc_nl_sk_dump;
968 dump.format = tipc_nl_compat_sk_dump;
969 return tipc_nl_compat_dumpit(&dump, msg);
970 case TIPC_CMD_GET_MEDIA_NAMES:
971 msg->rep_size = MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME);
972 dump.dumpit = tipc_nl_media_dump;
973 dump.format = tipc_nl_compat_media_dump;
974 return tipc_nl_compat_dumpit(&dump, msg);
975 case TIPC_CMD_GET_NODES:
976 msg->rep_size = ULTRA_STRING_MAX_LEN;
977 dump.dumpit = tipc_nl_node_dump;
978 dump.format = tipc_nl_compat_node_dump;
979 return tipc_nl_compat_dumpit(&dump, msg);
980 case TIPC_CMD_SET_NODE_ADDR:
981 msg->req_type = TIPC_TLV_NET_ADDR;
982 doit.doit = tipc_nl_net_set;
983 doit.transcode = tipc_nl_compat_net_set;
984 return tipc_nl_compat_doit(&doit, msg);
985 case TIPC_CMD_SET_NETID:
986 msg->req_type = TIPC_TLV_UNSIGNED;
987 doit.doit = tipc_nl_net_set;
988 doit.transcode = tipc_nl_compat_net_set;
989 return tipc_nl_compat_doit(&doit, msg);
990 case TIPC_CMD_GET_NETID:
991 msg->rep_size = sizeof(u32);
992 dump.dumpit = tipc_nl_net_dump;
993 dump.format = tipc_nl_compat_net_dump;
994 return tipc_nl_compat_dumpit(&dump, msg);
995 case TIPC_CMD_SHOW_STATS:
996 return tipc_cmd_show_stats_compat(msg);
997 }
998
999 return -EOPNOTSUPP;
1000}
1001
1002static int tipc_nl_compat_recv(struct sk_buff *skb, struct genl_info *info)
1003{
1004 int err;
1005 int len;
1006 struct tipc_nl_compat_msg msg;
1007 struct nlmsghdr *req_nlh;
1008 struct nlmsghdr *rep_nlh;
1009 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
1010 struct net *net = genl_info_net(info);
1011
1012 memset(&msg, 0, sizeof(msg));
1013
1014 req_nlh = (struct nlmsghdr *)skb->data;
1015 msg.req = nlmsg_data(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN;
1016 msg.cmd = req_userhdr->cmd;
1017 msg.dst_sk = info->dst_sk;
1018
1019 if ((msg.cmd & 0xC000) && (!netlink_net_capable(skb, CAP_NET_ADMIN))) {
1020 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_NET_ADMIN);
1021 err = -EACCES;
1022 goto send;
1023 }
1024
1025 len = nlmsg_attrlen(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN);
1026 if (TLV_GET_LEN(msg.req) && !TLV_OK(msg.req, len)) {
1027 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1028 err = -EOPNOTSUPP;
1029 goto send;
1030 }
1031
1032 err = tipc_nl_compat_handle(&msg);
1033 if (err == -EOPNOTSUPP)
1034 msg.rep = tipc_get_err_tlv(TIPC_CFG_NOT_SUPPORTED);
1035 else if (err == -EINVAL)
1036 msg.rep = tipc_get_err_tlv(TIPC_CFG_TLV_ERROR);
1037send:
1038 if (!msg.rep)
1039 return err;
1040
1041 len = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
1042 skb_push(msg.rep, len);
1043 rep_nlh = nlmsg_hdr(msg.rep);
1044 memcpy(rep_nlh, info->nlhdr, len);
1045 rep_nlh->nlmsg_len = msg.rep->len;
1046 genlmsg_unicast(net, msg.rep, NETLINK_CB(skb).portid);
1047
1048 return err;
1049}
1050
1051static struct genl_family tipc_genl_compat_family = {
1052 .id = GENL_ID_GENERATE,
1053 .name = TIPC_GENL_NAME,
1054 .version = TIPC_GENL_VERSION,
1055 .hdrsize = TIPC_GENL_HDRLEN,
1056 .maxattr = 0,
1057 .netnsok = true,
1058};
1059
1060static struct genl_ops tipc_genl_compat_ops[] = {
1061 {
1062 .cmd = TIPC_GENL_CMD,
1063 .doit = tipc_nl_compat_recv,
1064 },
1065};
1066
1067int tipc_netlink_compat_start(void)
1068{
1069 int res;
1070
1071 res = genl_register_family_with_ops(&tipc_genl_compat_family,
1072 tipc_genl_compat_ops);
1073 if (res) {
1074 pr_err("Failed to register legacy compat interface\n");
1075 return res;
1076 }
1077
1078 return 0;
1079}
1080
1081void tipc_netlink_compat_stop(void)
1082{
1083 genl_unregister_family(&tipc_genl_compat_family);
1084}
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 8d353ec77a66..86152de8248d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -35,22 +35,14 @@
35 */ 35 */
36 36
37#include "core.h" 37#include "core.h"
38#include "config.h" 38#include "link.h"
39#include "node.h" 39#include "node.h"
40#include "name_distr.h" 40#include "name_distr.h"
41#include "socket.h" 41#include "socket.h"
42 42
43#define NODE_HTABLE_SIZE 512
44
45static void node_lost_contact(struct tipc_node *n_ptr); 43static void node_lost_contact(struct tipc_node *n_ptr);
46static void node_established_contact(struct tipc_node *n_ptr); 44static void node_established_contact(struct tipc_node *n_ptr);
47 45
48static struct hlist_head node_htable[NODE_HTABLE_SIZE];
49LIST_HEAD(tipc_node_list);
50static u32 tipc_num_nodes;
51static u32 tipc_num_links;
52static DEFINE_SPINLOCK(node_list_lock);
53
54struct tipc_sock_conn { 46struct tipc_sock_conn {
55 u32 port; 47 u32 port;
56 u32 peer_port; 48 u32 peer_port;
@@ -78,15 +70,17 @@ static unsigned int tipc_hashfn(u32 addr)
78/* 70/*
79 * tipc_node_find - locate specified node object, if it exists 71 * tipc_node_find - locate specified node object, if it exists
80 */ 72 */
81struct tipc_node *tipc_node_find(u32 addr) 73struct tipc_node *tipc_node_find(struct net *net, u32 addr)
82{ 74{
75 struct tipc_net *tn = net_generic(net, tipc_net_id);
83 struct tipc_node *node; 76 struct tipc_node *node;
84 77
85 if (unlikely(!in_own_cluster_exact(addr))) 78 if (unlikely(!in_own_cluster_exact(net, addr)))
86 return NULL; 79 return NULL;
87 80
88 rcu_read_lock(); 81 rcu_read_lock();
89 hlist_for_each_entry_rcu(node, &node_htable[tipc_hashfn(addr)], hash) { 82 hlist_for_each_entry_rcu(node, &tn->node_htable[tipc_hashfn(addr)],
83 hash) {
90 if (node->addr == addr) { 84 if (node->addr == addr) {
91 rcu_read_unlock(); 85 rcu_read_unlock();
92 return node; 86 return node;
@@ -96,72 +90,68 @@ struct tipc_node *tipc_node_find(u32 addr)
96 return NULL; 90 return NULL;
97} 91}
98 92
99struct tipc_node *tipc_node_create(u32 addr) 93struct tipc_node *tipc_node_create(struct net *net, u32 addr)
100{ 94{
95 struct tipc_net *tn = net_generic(net, tipc_net_id);
101 struct tipc_node *n_ptr, *temp_node; 96 struct tipc_node *n_ptr, *temp_node;
102 97
103 spin_lock_bh(&node_list_lock); 98 spin_lock_bh(&tn->node_list_lock);
104 99 n_ptr = tipc_node_find(net, addr);
100 if (n_ptr)
101 goto exit;
105 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC); 102 n_ptr = kzalloc(sizeof(*n_ptr), GFP_ATOMIC);
106 if (!n_ptr) { 103 if (!n_ptr) {
107 spin_unlock_bh(&node_list_lock);
108 pr_warn("Node creation failed, no memory\n"); 104 pr_warn("Node creation failed, no memory\n");
109 return NULL; 105 goto exit;
110 } 106 }
111
112 n_ptr->addr = addr; 107 n_ptr->addr = addr;
108 n_ptr->net = net;
113 spin_lock_init(&n_ptr->lock); 109 spin_lock_init(&n_ptr->lock);
114 INIT_HLIST_NODE(&n_ptr->hash); 110 INIT_HLIST_NODE(&n_ptr->hash);
115 INIT_LIST_HEAD(&n_ptr->list); 111 INIT_LIST_HEAD(&n_ptr->list);
116 INIT_LIST_HEAD(&n_ptr->publ_list); 112 INIT_LIST_HEAD(&n_ptr->publ_list);
117 INIT_LIST_HEAD(&n_ptr->conn_sks); 113 INIT_LIST_HEAD(&n_ptr->conn_sks);
118 skb_queue_head_init(&n_ptr->waiting_sks);
119 __skb_queue_head_init(&n_ptr->bclink.deferred_queue); 114 __skb_queue_head_init(&n_ptr->bclink.deferred_queue);
120 115 hlist_add_head_rcu(&n_ptr->hash, &tn->node_htable[tipc_hashfn(addr)]);
121 hlist_add_head_rcu(&n_ptr->hash, &node_htable[tipc_hashfn(addr)]); 116 list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
122
123 list_for_each_entry_rcu(temp_node, &tipc_node_list, list) {
124 if (n_ptr->addr < temp_node->addr) 117 if (n_ptr->addr < temp_node->addr)
125 break; 118 break;
126 } 119 }
127 list_add_tail_rcu(&n_ptr->list, &temp_node->list); 120 list_add_tail_rcu(&n_ptr->list, &temp_node->list);
128 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN; 121 n_ptr->action_flags = TIPC_WAIT_PEER_LINKS_DOWN;
129 n_ptr->signature = INVALID_NODE_SIG; 122 n_ptr->signature = INVALID_NODE_SIG;
130 123exit:
131 tipc_num_nodes++; 124 spin_unlock_bh(&tn->node_list_lock);
132
133 spin_unlock_bh(&node_list_lock);
134 return n_ptr; 125 return n_ptr;
135} 126}
136 127
137static void tipc_node_delete(struct tipc_node *n_ptr) 128static void tipc_node_delete(struct tipc_net *tn, struct tipc_node *n_ptr)
138{ 129{
139 list_del_rcu(&n_ptr->list); 130 list_del_rcu(&n_ptr->list);
140 hlist_del_rcu(&n_ptr->hash); 131 hlist_del_rcu(&n_ptr->hash);
141 kfree_rcu(n_ptr, rcu); 132 kfree_rcu(n_ptr, rcu);
142
143 tipc_num_nodes--;
144} 133}
145 134
146void tipc_node_stop(void) 135void tipc_node_stop(struct net *net)
147{ 136{
137 struct tipc_net *tn = net_generic(net, tipc_net_id);
148 struct tipc_node *node, *t_node; 138 struct tipc_node *node, *t_node;
149 139
150 spin_lock_bh(&node_list_lock); 140 spin_lock_bh(&tn->node_list_lock);
151 list_for_each_entry_safe(node, t_node, &tipc_node_list, list) 141 list_for_each_entry_safe(node, t_node, &tn->node_list, list)
152 tipc_node_delete(node); 142 tipc_node_delete(tn, node);
153 spin_unlock_bh(&node_list_lock); 143 spin_unlock_bh(&tn->node_list_lock);
154} 144}
155 145
156int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port) 146int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port)
157{ 147{
158 struct tipc_node *node; 148 struct tipc_node *node;
159 struct tipc_sock_conn *conn; 149 struct tipc_sock_conn *conn;
160 150
161 if (in_own_node(dnode)) 151 if (in_own_node(net, dnode))
162 return 0; 152 return 0;
163 153
164 node = tipc_node_find(dnode); 154 node = tipc_node_find(net, dnode);
165 if (!node) { 155 if (!node) {
166 pr_warn("Connecting sock to node 0x%x failed\n", dnode); 156 pr_warn("Connecting sock to node 0x%x failed\n", dnode);
167 return -EHOSTUNREACH; 157 return -EHOSTUNREACH;
@@ -179,15 +169,15 @@ int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port)
179 return 0; 169 return 0;
180} 170}
181 171
182void tipc_node_remove_conn(u32 dnode, u32 port) 172void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port)
183{ 173{
184 struct tipc_node *node; 174 struct tipc_node *node;
185 struct tipc_sock_conn *conn, *safe; 175 struct tipc_sock_conn *conn, *safe;
186 176
187 if (in_own_node(dnode)) 177 if (in_own_node(net, dnode))
188 return; 178 return;
189 179
190 node = tipc_node_find(dnode); 180 node = tipc_node_find(net, dnode);
191 if (!node) 181 if (!node)
192 return; 182 return;
193 183
@@ -201,23 +191,6 @@ void tipc_node_remove_conn(u32 dnode, u32 port)
201 tipc_node_unlock(node); 191 tipc_node_unlock(node);
202} 192}
203 193
204void tipc_node_abort_sock_conns(struct list_head *conns)
205{
206 struct tipc_sock_conn *conn, *safe;
207 struct sk_buff *buf;
208
209 list_for_each_entry_safe(conn, safe, conns, list) {
210 buf = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
211 SHORT_H_SIZE, 0, tipc_own_addr,
212 conn->peer_node, conn->port,
213 conn->peer_port, TIPC_ERR_NO_NODE);
214 if (likely(buf))
215 tipc_sk_rcv(buf);
216 list_del(&conn->list);
217 kfree(conn);
218 }
219}
220
221/** 194/**
222 * tipc_node_link_up - handle addition of link 195 * tipc_node_link_up - handle addition of link
223 * 196 *
@@ -231,8 +204,8 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
231 n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP; 204 n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
232 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; 205 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
233 206
234 pr_info("Established link <%s> on network plane %c\n", 207 pr_debug("Established link <%s> on network plane %c\n",
235 l_ptr->name, l_ptr->net_plane); 208 l_ptr->name, l_ptr->net_plane);
236 209
237 if (!active[0]) { 210 if (!active[0]) {
238 active[0] = active[1] = l_ptr; 211 active[0] = active[1] = l_ptr;
@@ -240,7 +213,7 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
240 goto exit; 213 goto exit;
241 } 214 }
242 if (l_ptr->priority < active[0]->priority) { 215 if (l_ptr->priority < active[0]->priority) {
243 pr_info("New link <%s> becomes standby\n", l_ptr->name); 216 pr_debug("New link <%s> becomes standby\n", l_ptr->name);
244 goto exit; 217 goto exit;
245 } 218 }
246 tipc_link_dup_queue_xmit(active[0], l_ptr); 219 tipc_link_dup_queue_xmit(active[0], l_ptr);
@@ -248,9 +221,9 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
248 active[0] = l_ptr; 221 active[0] = l_ptr;
249 goto exit; 222 goto exit;
250 } 223 }
251 pr_info("Old link <%s> becomes standby\n", active[0]->name); 224 pr_debug("Old link <%s> becomes standby\n", active[0]->name);
252 if (active[1] != active[0]) 225 if (active[1] != active[0])
253 pr_info("Old link <%s> becomes standby\n", active[1]->name); 226 pr_debug("Old link <%s> becomes standby\n", active[1]->name);
254 active[0] = active[1] = l_ptr; 227 active[0] = active[1] = l_ptr;
255exit: 228exit:
256 /* Leave room for changeover header when returning 'mtu' to users: */ 229 /* Leave room for changeover header when returning 'mtu' to users: */
@@ -290,6 +263,7 @@ static void node_select_active_links(struct tipc_node *n_ptr)
290 */ 263 */
291void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 264void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
292{ 265{
266 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
293 struct tipc_link **active; 267 struct tipc_link **active;
294 268
295 n_ptr->working_links--; 269 n_ptr->working_links--;
@@ -297,12 +271,12 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
297 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id; 271 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
298 272
299 if (!tipc_link_is_active(l_ptr)) { 273 if (!tipc_link_is_active(l_ptr)) {
300 pr_info("Lost standby link <%s> on network plane %c\n", 274 pr_debug("Lost standby link <%s> on network plane %c\n",
301 l_ptr->name, l_ptr->net_plane); 275 l_ptr->name, l_ptr->net_plane);
302 return; 276 return;
303 } 277 }
304 pr_info("Lost link <%s> on network plane %c\n", 278 pr_debug("Lost link <%s> on network plane %c\n",
305 l_ptr->name, l_ptr->net_plane); 279 l_ptr->name, l_ptr->net_plane);
306 280
307 active = &n_ptr->active_links[0]; 281 active = &n_ptr->active_links[0];
308 if (active[0] == l_ptr) 282 if (active[0] == l_ptr)
@@ -324,7 +298,7 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
324 } 298 }
325 299
326 /* Loopback link went down? No fragmentation needed from now on. */ 300 /* Loopback link went down? No fragmentation needed from now on. */
327 if (n_ptr->addr == tipc_own_addr) { 301 if (n_ptr->addr == tn->own_addr) {
328 n_ptr->act_mtus[0] = MAX_MSG_SIZE; 302 n_ptr->act_mtus[0] = MAX_MSG_SIZE;
329 n_ptr->act_mtus[1] = MAX_MSG_SIZE; 303 n_ptr->act_mtus[1] = MAX_MSG_SIZE;
330 } 304 }
@@ -343,9 +317,6 @@ int tipc_node_is_up(struct tipc_node *n_ptr)
343void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 317void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
344{ 318{
345 n_ptr->links[l_ptr->bearer_id] = l_ptr; 319 n_ptr->links[l_ptr->bearer_id] = l_ptr;
346 spin_lock_bh(&node_list_lock);
347 tipc_num_links++;
348 spin_unlock_bh(&node_list_lock);
349 n_ptr->link_cnt++; 320 n_ptr->link_cnt++;
350} 321}
351 322
@@ -357,9 +328,6 @@ void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
357 if (l_ptr != n_ptr->links[i]) 328 if (l_ptr != n_ptr->links[i])
358 continue; 329 continue;
359 n_ptr->links[i] = NULL; 330 n_ptr->links[i] = NULL;
360 spin_lock_bh(&node_list_lock);
361 tipc_num_links--;
362 spin_unlock_bh(&node_list_lock);
363 n_ptr->link_cnt--; 331 n_ptr->link_cnt--;
364 } 332 }
365} 333}
@@ -368,17 +336,21 @@ static void node_established_contact(struct tipc_node *n_ptr)
368{ 336{
369 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP; 337 n_ptr->action_flags |= TIPC_NOTIFY_NODE_UP;
370 n_ptr->bclink.oos_state = 0; 338 n_ptr->bclink.oos_state = 0;
371 n_ptr->bclink.acked = tipc_bclink_get_last_sent(); 339 n_ptr->bclink.acked = tipc_bclink_get_last_sent(n_ptr->net);
372 tipc_bclink_add_node(n_ptr->addr); 340 tipc_bclink_add_node(n_ptr->net, n_ptr->addr);
373} 341}
374 342
375static void node_lost_contact(struct tipc_node *n_ptr) 343static void node_lost_contact(struct tipc_node *n_ptr)
376{ 344{
377 char addr_string[16]; 345 char addr_string[16];
378 u32 i; 346 struct tipc_sock_conn *conn, *safe;
347 struct list_head *conns = &n_ptr->conn_sks;
348 struct sk_buff *skb;
349 struct tipc_net *tn = net_generic(n_ptr->net, tipc_net_id);
350 uint i;
379 351
380 pr_info("Lost contact with %s\n", 352 pr_debug("Lost contact with %s\n",
381 tipc_addr_string_fill(addr_string, n_ptr->addr)); 353 tipc_addr_string_fill(addr_string, n_ptr->addr));
382 354
383 /* Flush broadcast link info associated with lost node */ 355 /* Flush broadcast link info associated with lost node */
384 if (n_ptr->bclink.recv_permitted) { 356 if (n_ptr->bclink.recv_permitted) {
@@ -389,7 +361,7 @@ static void node_lost_contact(struct tipc_node *n_ptr)
389 n_ptr->bclink.reasm_buf = NULL; 361 n_ptr->bclink.reasm_buf = NULL;
390 } 362 }
391 363
392 tipc_bclink_remove_node(n_ptr->addr); 364 tipc_bclink_remove_node(n_ptr->net, n_ptr->addr);
393 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ); 365 tipc_bclink_acknowledge(n_ptr, INVALID_LINK_SEQ);
394 366
395 n_ptr->bclink.recv_permitted = false; 367 n_ptr->bclink.recv_permitted = false;
@@ -403,126 +375,33 @@ static void node_lost_contact(struct tipc_node *n_ptr)
403 l_ptr->reset_checkpoint = l_ptr->next_in_no; 375 l_ptr->reset_checkpoint = l_ptr->next_in_no;
404 l_ptr->exp_msg_count = 0; 376 l_ptr->exp_msg_count = 0;
405 tipc_link_reset_fragments(l_ptr); 377 tipc_link_reset_fragments(l_ptr);
406 }
407
408 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
409
410 /* Notify subscribers and prevent re-contact with node until
411 * cleanup is done.
412 */
413 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN |
414 TIPC_NOTIFY_NODE_DOWN;
415}
416 378
417struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space) 379 /* Link marked for deletion after failover? => do it now */
418{ 380 if (l_ptr->flags & LINK_STOPPED)
419 u32 domain; 381 tipc_link_delete(l_ptr);
420 struct sk_buff *buf;
421 struct tipc_node *n_ptr;
422 struct tipc_node_info node_info;
423 u32 payload_size;
424
425 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
426 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
427
428 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
429 if (!tipc_addr_domain_valid(domain))
430 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
431 " (network address)");
432
433 spin_lock_bh(&node_list_lock);
434 if (!tipc_num_nodes) {
435 spin_unlock_bh(&node_list_lock);
436 return tipc_cfg_reply_none();
437 } 382 }
438 383
439 /* For now, get space for all other nodes */ 384 n_ptr->action_flags &= ~TIPC_WAIT_OWN_LINKS_DOWN;
440 payload_size = TLV_SPACE(sizeof(node_info)) * tipc_num_nodes;
441 if (payload_size > 32768u) {
442 spin_unlock_bh(&node_list_lock);
443 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
444 " (too many nodes)");
445 }
446 spin_unlock_bh(&node_list_lock);
447
448 buf = tipc_cfg_reply_alloc(payload_size);
449 if (!buf)
450 return NULL;
451
452 /* Add TLVs for all nodes in scope */
453 rcu_read_lock();
454 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
455 if (!tipc_in_scope(domain, n_ptr->addr))
456 continue;
457 node_info.addr = htonl(n_ptr->addr);
458 node_info.up = htonl(tipc_node_is_up(n_ptr));
459 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
460 &node_info, sizeof(node_info));
461 }
462 rcu_read_unlock();
463 return buf;
464}
465
466struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
467{
468 u32 domain;
469 struct sk_buff *buf;
470 struct tipc_node *n_ptr;
471 struct tipc_link_info link_info;
472 u32 payload_size;
473
474 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
475 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
476
477 domain = ntohl(*(__be32 *)TLV_DATA(req_tlv_area));
478 if (!tipc_addr_domain_valid(domain))
479 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
480 " (network address)");
481
482 if (!tipc_own_addr)
483 return tipc_cfg_reply_none();
484
485 spin_lock_bh(&node_list_lock);
486 /* Get space for all unicast links + broadcast link */
487 payload_size = TLV_SPACE((sizeof(link_info)) * (tipc_num_links + 1));
488 if (payload_size > 32768u) {
489 spin_unlock_bh(&node_list_lock);
490 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
491 " (too many links)");
492 }
493 spin_unlock_bh(&node_list_lock);
494
495 buf = tipc_cfg_reply_alloc(payload_size);
496 if (!buf)
497 return NULL;
498 385
499 /* Add TLV for broadcast link */ 386 /* Prevent re-contact with node until cleanup is done */
500 link_info.dest = htonl(tipc_cluster_mask(tipc_own_addr)); 387 n_ptr->action_flags |= TIPC_WAIT_PEER_LINKS_DOWN;
501 link_info.up = htonl(1);
502 strlcpy(link_info.str, tipc_bclink_name, TIPC_MAX_LINK_NAME);
503 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
504 388
505 /* Add TLVs for any other links in scope */ 389 /* Notify publications from this node */
506 rcu_read_lock(); 390 n_ptr->action_flags |= TIPC_NOTIFY_NODE_DOWN;
507 list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
508 u32 i;
509 391
510 if (!tipc_in_scope(domain, n_ptr->addr)) 392 /* Notify sockets connected to node */
511 continue; 393 list_for_each_entry_safe(conn, safe, conns, list) {
512 tipc_node_lock(n_ptr); 394 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG,
513 for (i = 0; i < MAX_BEARERS; i++) { 395 SHORT_H_SIZE, 0, tn->own_addr,
514 if (!n_ptr->links[i]) 396 conn->peer_node, conn->port,
515 continue; 397 conn->peer_port, TIPC_ERR_NO_NODE);
516 link_info.dest = htonl(n_ptr->addr); 398 if (likely(skb)) {
517 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i])); 399 skb_queue_tail(n_ptr->inputq, skb);
518 strcpy(link_info.str, n_ptr->links[i]->name); 400 n_ptr->action_flags |= TIPC_MSG_EVT;
519 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
520 &link_info, sizeof(link_info));
521 } 401 }
522 tipc_node_unlock(n_ptr); 402 list_del(&conn->list);
403 kfree(conn);
523 } 404 }
524 rcu_read_unlock();
525 return buf;
526} 405}
527 406
528/** 407/**
@@ -534,10 +413,11 @@ struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
534 * 413 *
535 * Returns 0 on success 414 * Returns 0 on success
536 */ 415 */
537int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len) 416int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 addr,
417 char *linkname, size_t len)
538{ 418{
539 struct tipc_link *link; 419 struct tipc_link *link;
540 struct tipc_node *node = tipc_node_find(addr); 420 struct tipc_node *node = tipc_node_find(net, addr);
541 421
542 if ((bearer_id >= MAX_BEARERS) || !node) 422 if ((bearer_id >= MAX_BEARERS) || !node)
543 return -EINVAL; 423 return -EINVAL;
@@ -554,58 +434,60 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
554 434
555void tipc_node_unlock(struct tipc_node *node) 435void tipc_node_unlock(struct tipc_node *node)
556{ 436{
557 LIST_HEAD(nsub_list); 437 struct net *net = node->net;
558 LIST_HEAD(conn_sks);
559 struct sk_buff_head waiting_sks;
560 u32 addr = 0; 438 u32 addr = 0;
561 int flags = node->action_flags; 439 u32 flags = node->action_flags;
562 u32 link_id = 0; 440 u32 link_id = 0;
441 struct list_head *publ_list;
442 struct sk_buff_head *inputq = node->inputq;
443 struct sk_buff_head *namedq;
563 444
564 if (likely(!flags)) { 445 if (likely(!flags || (flags == TIPC_MSG_EVT))) {
446 node->action_flags = 0;
565 spin_unlock_bh(&node->lock); 447 spin_unlock_bh(&node->lock);
448 if (flags == TIPC_MSG_EVT)
449 tipc_sk_rcv(net, inputq);
566 return; 450 return;
567 } 451 }
568 452
569 addr = node->addr; 453 addr = node->addr;
570 link_id = node->link_id; 454 link_id = node->link_id;
571 __skb_queue_head_init(&waiting_sks); 455 namedq = node->namedq;
456 publ_list = &node->publ_list;
572 457
573 if (flags & TIPC_WAKEUP_USERS) 458 node->action_flags &= ~(TIPC_MSG_EVT |
574 skb_queue_splice_init(&node->waiting_sks, &waiting_sks); 459 TIPC_NOTIFY_NODE_DOWN | TIPC_NOTIFY_NODE_UP |
575 460 TIPC_NOTIFY_LINK_DOWN | TIPC_NOTIFY_LINK_UP |
576 if (flags & TIPC_NOTIFY_NODE_DOWN) { 461 TIPC_WAKEUP_BCAST_USERS | TIPC_BCAST_MSG_EVT |
577 list_replace_init(&node->publ_list, &nsub_list); 462 TIPC_NAMED_MSG_EVT);
578 list_replace_init(&node->conn_sks, &conn_sks);
579 }
580 node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
581 TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
582 TIPC_NOTIFY_LINK_DOWN |
583 TIPC_WAKEUP_BCAST_USERS);
584 463
585 spin_unlock_bh(&node->lock); 464 spin_unlock_bh(&node->lock);
586 465
587 while (!skb_queue_empty(&waiting_sks)) 466 if (flags & TIPC_NOTIFY_NODE_DOWN)
588 tipc_sk_rcv(__skb_dequeue(&waiting_sks)); 467 tipc_publ_notify(net, publ_list, addr);
589
590 if (!list_empty(&conn_sks))
591 tipc_node_abort_sock_conns(&conn_sks);
592
593 if (!list_empty(&nsub_list))
594 tipc_publ_notify(&nsub_list, addr);
595 468
596 if (flags & TIPC_WAKEUP_BCAST_USERS) 469 if (flags & TIPC_WAKEUP_BCAST_USERS)
597 tipc_bclink_wakeup_users(); 470 tipc_bclink_wakeup_users(net);
598 471
599 if (flags & TIPC_NOTIFY_NODE_UP) 472 if (flags & TIPC_NOTIFY_NODE_UP)
600 tipc_named_node_up(addr); 473 tipc_named_node_up(net, addr);
601 474
602 if (flags & TIPC_NOTIFY_LINK_UP) 475 if (flags & TIPC_NOTIFY_LINK_UP)
603 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, 476 tipc_nametbl_publish(net, TIPC_LINK_STATE, addr, addr,
604 TIPC_NODE_SCOPE, link_id, addr); 477 TIPC_NODE_SCOPE, link_id, addr);
605 478
606 if (flags & TIPC_NOTIFY_LINK_DOWN) 479 if (flags & TIPC_NOTIFY_LINK_DOWN)
607 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, 480 tipc_nametbl_withdraw(net, TIPC_LINK_STATE, addr,
608 link_id, addr); 481 link_id, addr);
482
483 if (flags & TIPC_MSG_EVT)
484 tipc_sk_rcv(net, inputq);
485
486 if (flags & TIPC_NAMED_MSG_EVT)
487 tipc_named_rcv(net, namedq);
488
489 if (flags & TIPC_BCAST_MSG_EVT)
490 tipc_bclink_input(net);
609} 491}
610 492
611/* Caller should hold node lock for the passed node */ 493/* Caller should hold node lock for the passed node */
@@ -614,7 +496,7 @@ static int __tipc_nl_add_node(struct tipc_nl_msg *msg, struct tipc_node *node)
614 void *hdr; 496 void *hdr;
615 struct nlattr *attrs; 497 struct nlattr *attrs;
616 498
617 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_v2_family, 499 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
618 NLM_F_MULTI, TIPC_NL_NODE_GET); 500 NLM_F_MULTI, TIPC_NL_NODE_GET);
619 if (!hdr) 501 if (!hdr)
620 return -EMSGSIZE; 502 return -EMSGSIZE;
@@ -645,6 +527,8 @@ msg_full:
645int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb) 527int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
646{ 528{
647 int err; 529 int err;
530 struct net *net = sock_net(skb->sk);
531 struct tipc_net *tn = net_generic(net, tipc_net_id);
648 int done = cb->args[0]; 532 int done = cb->args[0];
649 int last_addr = cb->args[1]; 533 int last_addr = cb->args[1];
650 struct tipc_node *node; 534 struct tipc_node *node;
@@ -659,7 +543,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
659 543
660 rcu_read_lock(); 544 rcu_read_lock();
661 545
662 if (last_addr && !tipc_node_find(last_addr)) { 546 if (last_addr && !tipc_node_find(net, last_addr)) {
663 rcu_read_unlock(); 547 rcu_read_unlock();
664 /* We never set seq or call nl_dump_check_consistent() this 548 /* We never set seq or call nl_dump_check_consistent() this
665 * means that setting prev_seq here will cause the consistence 549 * means that setting prev_seq here will cause the consistence
@@ -671,7 +555,7 @@ int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb)
671 return -EPIPE; 555 return -EPIPE;
672 } 556 }
673 557
674 list_for_each_entry_rcu(node, &tipc_node_list, list) { 558 list_for_each_entry_rcu(node, &tn->node_list, list) {
675 if (last_addr) { 559 if (last_addr) {
676 if (node->addr == last_addr) 560 if (node->addr == last_addr)
677 last_addr = 0; 561 last_addr = 0;
diff --git a/net/tipc/node.h b/net/tipc/node.h
index cbe0e950f1cc..3d18c66b7f78 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/node.h: Include file for TIPC node management routines 2 * net/tipc/node.h: Include file for TIPC node management routines
3 * 3 *
4 * Copyright (c) 2000-2006, 2014, Ericsson AB 4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2005, 2010-2014, Wind River Systems 5 * Copyright (c) 2005, 2010-2014, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -42,10 +42,10 @@
42#include "bearer.h" 42#include "bearer.h"
43#include "msg.h" 43#include "msg.h"
44 44
45/* 45/* Out-of-range value for node signature */
46 * Out-of-range value for node signature 46#define INVALID_NODE_SIG 0x10000
47 */ 47
48#define INVALID_NODE_SIG 0x10000 48#define NODE_HTABLE_SIZE 512
49 49
50/* Flags used to take different actions according to flag type 50/* Flags used to take different actions according to flag type
51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down 51 * TIPC_WAIT_PEER_LINKS_DOWN: wait to see that peer's links are down
@@ -55,14 +55,16 @@
55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type 55 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */ 56 */
57enum { 57enum {
58 TIPC_MSG_EVT = 1,
58 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), 59 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
59 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2), 60 TIPC_WAIT_OWN_LINKS_DOWN = (1 << 2),
60 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 61 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
61 TIPC_NOTIFY_NODE_UP = (1 << 4), 62 TIPC_NOTIFY_NODE_UP = (1 << 4),
62 TIPC_WAKEUP_USERS = (1 << 5), 63 TIPC_WAKEUP_BCAST_USERS = (1 << 5),
63 TIPC_WAKEUP_BCAST_USERS = (1 << 6), 64 TIPC_NOTIFY_LINK_UP = (1 << 6),
64 TIPC_NOTIFY_LINK_UP = (1 << 7), 65 TIPC_NOTIFY_LINK_DOWN = (1 << 7),
65 TIPC_NOTIFY_LINK_DOWN = (1 << 8) 66 TIPC_NAMED_MSG_EVT = (1 << 8),
67 TIPC_BCAST_MSG_EVT = (1 << 9)
66}; 68};
67 69
68/** 70/**
@@ -73,6 +75,7 @@ enum {
73 * @oos_state: state tracker for handling OOS b'cast messages 75 * @oos_state: state tracker for handling OOS b'cast messages
74 * @deferred_queue: deferred queue saved OOS b'cast message received from node 76 * @deferred_queue: deferred queue saved OOS b'cast message received from node
75 * @reasm_buf: broadcast reassembly queue head from node 77 * @reasm_buf: broadcast reassembly queue head from node
78 * @inputq_map: bitmap indicating which inqueues should be kicked
76 * @recv_permitted: true if node is allowed to receive b'cast messages 79 * @recv_permitted: true if node is allowed to receive b'cast messages
77 */ 80 */
78struct tipc_node_bclink { 81struct tipc_node_bclink {
@@ -83,6 +86,7 @@ struct tipc_node_bclink {
83 u32 deferred_size; 86 u32 deferred_size;
84 struct sk_buff_head deferred_queue; 87 struct sk_buff_head deferred_queue;
85 struct sk_buff *reasm_buf; 88 struct sk_buff *reasm_buf;
89 int inputq_map;
86 bool recv_permitted; 90 bool recv_permitted;
87}; 91};
88 92
@@ -90,7 +94,11 @@ struct tipc_node_bclink {
90 * struct tipc_node - TIPC node structure 94 * struct tipc_node - TIPC node structure
91 * @addr: network address of node 95 * @addr: network address of node
92 * @lock: spinlock governing access to structure 96 * @lock: spinlock governing access to structure
97 * @net: the applicable net namespace
93 * @hash: links to adjacent nodes in unsorted hash chain 98 * @hash: links to adjacent nodes in unsorted hash chain
99 * @inputq: pointer to input queue containing messages for msg event
100 * @namedq: pointer to name table input queue with name table messages
101 * @curr_link: the link holding the node lock, if any
94 * @active_links: pointers to active links to node 102 * @active_links: pointers to active links to node
95 * @links: pointers to all links to node 103 * @links: pointers to all links to node
96 * @action_flags: bit mask of different types of node actions 104 * @action_flags: bit mask of different types of node actions
@@ -106,11 +114,14 @@ struct tipc_node_bclink {
106struct tipc_node { 114struct tipc_node {
107 u32 addr; 115 u32 addr;
108 spinlock_t lock; 116 spinlock_t lock;
117 struct net *net;
109 struct hlist_node hash; 118 struct hlist_node hash;
119 struct sk_buff_head *inputq;
120 struct sk_buff_head *namedq;
110 struct tipc_link *active_links[2]; 121 struct tipc_link *active_links[2];
111 u32 act_mtus[2]; 122 u32 act_mtus[2];
112 struct tipc_link *links[MAX_BEARERS]; 123 struct tipc_link *links[MAX_BEARERS];
113 unsigned int action_flags; 124 int action_flags;
114 struct tipc_node_bclink bclink; 125 struct tipc_node_bclink bclink;
115 struct list_head list; 126 struct list_head list;
116 int link_cnt; 127 int link_cnt;
@@ -118,28 +129,24 @@ struct tipc_node {
118 u32 signature; 129 u32 signature;
119 u32 link_id; 130 u32 link_id;
120 struct list_head publ_list; 131 struct list_head publ_list;
121 struct sk_buff_head waiting_sks;
122 struct list_head conn_sks; 132 struct list_head conn_sks;
123 struct rcu_head rcu; 133 struct rcu_head rcu;
124}; 134};
125 135
126extern struct list_head tipc_node_list; 136struct tipc_node *tipc_node_find(struct net *net, u32 addr);
127 137struct tipc_node *tipc_node_create(struct net *net, u32 addr);
128struct tipc_node *tipc_node_find(u32 addr); 138void tipc_node_stop(struct net *net);
129struct tipc_node *tipc_node_create(u32 addr);
130void tipc_node_stop(void);
131void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 139void tipc_node_attach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
132void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 140void tipc_node_detach_link(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
133void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 141void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
134void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr); 142void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr);
135int tipc_node_active_links(struct tipc_node *n_ptr); 143int tipc_node_active_links(struct tipc_node *n_ptr);
136int tipc_node_is_up(struct tipc_node *n_ptr); 144int tipc_node_is_up(struct tipc_node *n_ptr);
137struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space); 145int tipc_node_get_linkname(struct net *net, u32 bearer_id, u32 node,
138struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space); 146 char *linkname, size_t len);
139int tipc_node_get_linkname(u32 bearer_id, u32 node, char *linkname, size_t len);
140void tipc_node_unlock(struct tipc_node *node); 147void tipc_node_unlock(struct tipc_node *node);
141int tipc_node_add_conn(u32 dnode, u32 port, u32 peer_port); 148int tipc_node_add_conn(struct net *net, u32 dnode, u32 port, u32 peer_port);
142void tipc_node_remove_conn(u32 dnode, u32 port); 149void tipc_node_remove_conn(struct net *net, u32 dnode, u32 port);
143 150
144int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb); 151int tipc_nl_node_dump(struct sk_buff *skb, struct netlink_callback *cb);
145 152
@@ -154,12 +161,12 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
154 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN)); 161 TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
155} 162}
156 163
157static inline uint tipc_node_get_mtu(u32 addr, u32 selector) 164static inline uint tipc_node_get_mtu(struct net *net, u32 addr, u32 selector)
158{ 165{
159 struct tipc_node *node; 166 struct tipc_node *node;
160 u32 mtu; 167 u32 mtu;
161 168
162 node = tipc_node_find(addr); 169 node = tipc_node_find(net, addr);
163 170
164 if (likely(node)) 171 if (likely(node))
165 mtu = node->act_mtus[selector & 1]; 172 mtu = node->act_mtus[selector & 1];
diff --git a/net/tipc/server.c b/net/tipc/server.c
index a538a02f869b..eadd4ed45905 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -35,6 +35,7 @@
35 35
36#include "server.h" 36#include "server.h"
37#include "core.h" 37#include "core.h"
38#include "socket.h"
38#include <net/sock.h> 39#include <net/sock.h>
39 40
40/* Number of messages to send before rescheduling */ 41/* Number of messages to send before rescheduling */
@@ -255,7 +256,8 @@ static int tipc_receive_from_sock(struct tipc_conn *con)
255 goto out_close; 256 goto out_close;
256 } 257 }
257 258
258 s->tipc_conn_recvmsg(con->conid, &addr, con->usr_data, buf, ret); 259 s->tipc_conn_recvmsg(sock_net(con->sock->sk), con->conid, &addr,
260 con->usr_data, buf, ret);
259 261
260 kmem_cache_free(s->rcvbuf_cache, buf); 262 kmem_cache_free(s->rcvbuf_cache, buf);
261 263
@@ -307,7 +309,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
307 struct socket *sock = NULL; 309 struct socket *sock = NULL;
308 int ret; 310 int ret;
309 311
310 ret = tipc_sock_create_local(s->type, &sock); 312 ret = tipc_sock_create_local(s->net, s->type, &sock);
311 if (ret < 0) 313 if (ret < 0)
312 return NULL; 314 return NULL;
313 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE, 315 ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
diff --git a/net/tipc/server.h b/net/tipc/server.h
index be817b0b547e..9015faedb1b0 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -36,7 +36,9 @@
36#ifndef _TIPC_SERVER_H 36#ifndef _TIPC_SERVER_H
37#define _TIPC_SERVER_H 37#define _TIPC_SERVER_H
38 38
39#include "core.h" 39#include <linux/idr.h>
40#include <linux/tipc.h>
41#include <net/net_namespace.h>
40 42
41#define TIPC_SERVER_NAME_LEN 32 43#define TIPC_SERVER_NAME_LEN 32
42 44
@@ -45,6 +47,7 @@
45 * @conn_idr: identifier set of connection 47 * @conn_idr: identifier set of connection
46 * @idr_lock: protect the connection identifier set 48 * @idr_lock: protect the connection identifier set
47 * @idr_in_use: amount of allocated identifier entry 49 * @idr_in_use: amount of allocated identifier entry
50 * @net: network namspace instance
48 * @rcvbuf_cache: memory cache of server receive buffer 51 * @rcvbuf_cache: memory cache of server receive buffer
49 * @rcv_wq: receive workqueue 52 * @rcv_wq: receive workqueue
50 * @send_wq: send workqueue 53 * @send_wq: send workqueue
@@ -61,16 +64,18 @@ struct tipc_server {
61 struct idr conn_idr; 64 struct idr conn_idr;
62 spinlock_t idr_lock; 65 spinlock_t idr_lock;
63 int idr_in_use; 66 int idr_in_use;
67 struct net *net;
64 struct kmem_cache *rcvbuf_cache; 68 struct kmem_cache *rcvbuf_cache;
65 struct workqueue_struct *rcv_wq; 69 struct workqueue_struct *rcv_wq;
66 struct workqueue_struct *send_wq; 70 struct workqueue_struct *send_wq;
67 int max_rcvbuf_size; 71 int max_rcvbuf_size;
68 void *(*tipc_conn_new) (int conid); 72 void *(*tipc_conn_new)(int conid);
69 void (*tipc_conn_shutdown) (int conid, void *usr_data); 73 void (*tipc_conn_shutdown)(int conid, void *usr_data);
70 void (*tipc_conn_recvmsg) (int conid, struct sockaddr_tipc *addr, 74 void (*tipc_conn_recvmsg)(struct net *net, int conid,
71 void *usr_data, void *buf, size_t len); 75 struct sockaddr_tipc *addr, void *usr_data,
76 void *buf, size_t len);
72 struct sockaddr_tipc *saddr; 77 struct sockaddr_tipc *saddr;
73 const char name[TIPC_SERVER_NAME_LEN]; 78 char name[TIPC_SERVER_NAME_LEN];
74 int imp; 79 int imp;
75 int type; 80 int type;
76}; 81};
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 4731cad99d1c..f73e975af80b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * net/tipc/socket.c: TIPC socket API 2 * net/tipc/socket.c: TIPC socket API
3 * 3 *
4 * Copyright (c) 2001-2007, 2012-2014, Ericsson AB 4 * Copyright (c) 2001-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems 5 * Copyright (c) 2004-2008, 2010-2013, Wind River Systems
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
@@ -34,22 +34,25 @@
34 * POSSIBILITY OF SUCH DAMAGE. 34 * POSSIBILITY OF SUCH DAMAGE.
35 */ 35 */
36 36
37#include <linux/rhashtable.h>
38#include <linux/jhash.h>
37#include "core.h" 39#include "core.h"
38#include "name_table.h" 40#include "name_table.h"
39#include "node.h" 41#include "node.h"
40#include "link.h" 42#include "link.h"
41#include <linux/export.h> 43#include "name_distr.h"
42#include "config.h"
43#include "socket.h" 44#include "socket.h"
44 45
45#define SS_LISTENING -1 /* socket is listening */ 46#define SS_LISTENING -1 /* socket is listening */
46#define SS_READY -2 /* socket is connectionless */ 47#define SS_READY -2 /* socket is connectionless */
47 48
48#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */ 49#define CONN_TIMEOUT_DEFAULT 8000 /* default connect timeout = 8s */
49#define CONN_PROBING_INTERVAL 3600000 /* [ms] => 1 h */ 50#define CONN_PROBING_INTERVAL msecs_to_jiffies(3600000) /* [ms] => 1 h */
50#define TIPC_FWD_MSG 1 51#define TIPC_FWD_MSG 1
51#define TIPC_CONN_OK 0 52#define TIPC_CONN_OK 0
52#define TIPC_CONN_PROBING 1 53#define TIPC_CONN_PROBING 1
54#define TIPC_MAX_PORT 0xffffffff
55#define TIPC_MIN_PORT 1
53 56
54/** 57/**
55 * struct tipc_sock - TIPC socket structure 58 * struct tipc_sock - TIPC socket structure
@@ -59,21 +62,20 @@
59 * @conn_instance: TIPC instance used when connection was established 62 * @conn_instance: TIPC instance used when connection was established
60 * @published: non-zero if port has one or more associated names 63 * @published: non-zero if port has one or more associated names
61 * @max_pkt: maximum packet size "hint" used when building messages sent by port 64 * @max_pkt: maximum packet size "hint" used when building messages sent by port
62 * @ref: unique reference to port in TIPC object registry 65 * @portid: unique port identity in TIPC socket hash table
63 * @phdr: preformatted message header used when sending messages 66 * @phdr: preformatted message header used when sending messages
64 * @port_list: adjacent ports in TIPC's global list of ports 67 * @port_list: adjacent ports in TIPC's global list of ports
65 * @publications: list of publications for port 68 * @publications: list of publications for port
66 * @pub_count: total # of publications port has made during its lifetime 69 * @pub_count: total # of publications port has made during its lifetime
67 * @probing_state: 70 * @probing_state:
68 * @probing_interval: 71 * @probing_intv:
69 * @timer:
70 * @port: port - interacts with 'sk' and with the rest of the TIPC stack
71 * @peer_name: the peer of the connection, if any
72 * @conn_timeout: the time we can wait for an unresponded setup request 72 * @conn_timeout: the time we can wait for an unresponded setup request
73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue 73 * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
74 * @link_cong: non-zero if owner must sleep because of link congestion 74 * @link_cong: non-zero if owner must sleep because of link congestion
75 * @sent_unacked: # messages sent by socket, and not yet acked by peer 75 * @sent_unacked: # messages sent by socket, and not yet acked by peer
76 * @rcv_unacked: # messages read by user, but not yet acked back to peer 76 * @rcv_unacked: # messages read by user, but not yet acked back to peer
77 * @node: hash table node
78 * @rcu: rcu struct for tipc_sock
77 */ 79 */
78struct tipc_sock { 80struct tipc_sock {
79 struct sock sk; 81 struct sock sk;
@@ -82,19 +84,20 @@ struct tipc_sock {
82 u32 conn_instance; 84 u32 conn_instance;
83 int published; 85 int published;
84 u32 max_pkt; 86 u32 max_pkt;
85 u32 ref; 87 u32 portid;
86 struct tipc_msg phdr; 88 struct tipc_msg phdr;
87 struct list_head sock_list; 89 struct list_head sock_list;
88 struct list_head publications; 90 struct list_head publications;
89 u32 pub_count; 91 u32 pub_count;
90 u32 probing_state; 92 u32 probing_state;
91 u32 probing_interval; 93 unsigned long probing_intv;
92 struct timer_list timer;
93 uint conn_timeout; 94 uint conn_timeout;
94 atomic_t dupl_rcvcnt; 95 atomic_t dupl_rcvcnt;
95 bool link_cong; 96 bool link_cong;
96 uint sent_unacked; 97 uint sent_unacked;
97 uint rcv_unacked; 98 uint rcv_unacked;
99 struct rhash_head node;
100 struct rcu_head rcu;
98}; 101};
99 102
100static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb); 103static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
@@ -103,16 +106,14 @@ static void tipc_write_space(struct sock *sk);
103static int tipc_release(struct socket *sock); 106static int tipc_release(struct socket *sock);
104static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags); 107static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
105static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p); 108static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
106static void tipc_sk_timeout(unsigned long ref); 109static void tipc_sk_timeout(unsigned long data);
107static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 110static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
108 struct tipc_name_seq const *seq); 111 struct tipc_name_seq const *seq);
109static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 112static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
110 struct tipc_name_seq const *seq); 113 struct tipc_name_seq const *seq);
111static u32 tipc_sk_ref_acquire(struct tipc_sock *tsk); 114static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid);
112static void tipc_sk_ref_discard(u32 ref); 115static int tipc_sk_insert(struct tipc_sock *tsk);
113static struct tipc_sock *tipc_sk_get(u32 ref); 116static void tipc_sk_remove(struct tipc_sock *tsk);
114static struct tipc_sock *tipc_sk_get_next(u32 *ref);
115static void tipc_sk_put(struct tipc_sock *tsk);
116 117
117static const struct proto_ops packet_ops; 118static const struct proto_ops packet_ops;
118static const struct proto_ops stream_ops; 119static const struct proto_ops stream_ops;
@@ -174,6 +175,11 @@ static const struct nla_policy tipc_nl_sock_policy[TIPC_NLA_SOCK_MAX + 1] = {
174 * - port reference 175 * - port reference
175 */ 176 */
176 177
178static u32 tsk_own_node(struct tipc_sock *tsk)
179{
180 return msg_prevnode(&tsk->phdr);
181}
182
177static u32 tsk_peer_node(struct tipc_sock *tsk) 183static u32 tsk_peer_node(struct tipc_sock *tsk)
178{ 184{
179 return msg_destnode(&tsk->phdr); 185 return msg_destnode(&tsk->phdr);
@@ -246,10 +252,11 @@ static void tsk_rej_rx_queue(struct sock *sk)
246{ 252{
247 struct sk_buff *skb; 253 struct sk_buff *skb;
248 u32 dnode; 254 u32 dnode;
255 u32 own_node = tsk_own_node(tipc_sk(sk));
249 256
250 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) { 257 while ((skb = __skb_dequeue(&sk->sk_receive_queue))) {
251 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) 258 if (tipc_msg_reverse(own_node, skb, &dnode, TIPC_ERR_NO_PORT))
252 tipc_link_xmit_skb(skb, dnode, 0); 259 tipc_link_xmit_skb(sock_net(sk), skb, dnode, 0);
253 } 260 }
254} 261}
255 262
@@ -260,6 +267,7 @@ static void tsk_rej_rx_queue(struct sock *sk)
260 */ 267 */
261static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg) 268static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
262{ 269{
270 struct tipc_net *tn = net_generic(sock_net(&tsk->sk), tipc_net_id);
263 u32 peer_port = tsk_peer_port(tsk); 271 u32 peer_port = tsk_peer_port(tsk);
264 u32 orig_node; 272 u32 orig_node;
265 u32 peer_node; 273 u32 peer_node;
@@ -276,10 +284,10 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
276 if (likely(orig_node == peer_node)) 284 if (likely(orig_node == peer_node))
277 return true; 285 return true;
278 286
279 if (!orig_node && (peer_node == tipc_own_addr)) 287 if (!orig_node && (peer_node == tn->own_addr))
280 return true; 288 return true;
281 289
282 if (!peer_node && (orig_node == tipc_own_addr)) 290 if (!peer_node && (orig_node == tn->own_addr))
283 return true; 291 return true;
284 292
285 return false; 293 return false;
@@ -300,12 +308,12 @@ static bool tsk_peer_msg(struct tipc_sock *tsk, struct tipc_msg *msg)
300static int tipc_sk_create(struct net *net, struct socket *sock, 308static int tipc_sk_create(struct net *net, struct socket *sock,
301 int protocol, int kern) 309 int protocol, int kern)
302{ 310{
311 struct tipc_net *tn;
303 const struct proto_ops *ops; 312 const struct proto_ops *ops;
304 socket_state state; 313 socket_state state;
305 struct sock *sk; 314 struct sock *sk;
306 struct tipc_sock *tsk; 315 struct tipc_sock *tsk;
307 struct tipc_msg *msg; 316 struct tipc_msg *msg;
308 u32 ref;
309 317
310 /* Validate arguments */ 318 /* Validate arguments */
311 if (unlikely(protocol != 0)) 319 if (unlikely(protocol != 0))
@@ -339,24 +347,23 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
339 return -ENOMEM; 347 return -ENOMEM;
340 348
341 tsk = tipc_sk(sk); 349 tsk = tipc_sk(sk);
342 ref = tipc_sk_ref_acquire(tsk);
343 if (!ref) {
344 pr_warn("Socket create failed; reference table exhausted\n");
345 return -ENOMEM;
346 }
347 tsk->max_pkt = MAX_PKT_DEFAULT; 350 tsk->max_pkt = MAX_PKT_DEFAULT;
348 tsk->ref = ref;
349 INIT_LIST_HEAD(&tsk->publications); 351 INIT_LIST_HEAD(&tsk->publications);
350 msg = &tsk->phdr; 352 msg = &tsk->phdr;
351 tipc_msg_init(msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG, 353 tn = net_generic(sock_net(sk), tipc_net_id);
354 tipc_msg_init(tn->own_addr, msg, TIPC_LOW_IMPORTANCE, TIPC_NAMED_MSG,
352 NAMED_H_SIZE, 0); 355 NAMED_H_SIZE, 0);
353 msg_set_origport(msg, ref);
354 356
355 /* Finish initializing socket data structures */ 357 /* Finish initializing socket data structures */
356 sock->ops = ops; 358 sock->ops = ops;
357 sock->state = state; 359 sock->state = state;
358 sock_init_data(sock, sk); 360 sock_init_data(sock, sk);
359 k_init_timer(&tsk->timer, (Handler)tipc_sk_timeout, ref); 361 if (tipc_sk_insert(tsk)) {
362 pr_warn("Socket create failed; port numbrer exhausted\n");
363 return -EINVAL;
364 }
365 msg_set_origport(msg, tsk->portid);
366 setup_timer(&sk->sk_timer, tipc_sk_timeout, (unsigned long)tsk);
360 sk->sk_backlog_rcv = tipc_backlog_rcv; 367 sk->sk_backlog_rcv = tipc_backlog_rcv;
361 sk->sk_rcvbuf = sysctl_tipc_rmem[1]; 368 sk->sk_rcvbuf = sysctl_tipc_rmem[1];
362 sk->sk_data_ready = tipc_data_ready; 369 sk->sk_data_ready = tipc_data_ready;
@@ -384,7 +391,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
384 * 391 *
385 * Returns 0 on success, errno otherwise 392 * Returns 0 on success, errno otherwise
386 */ 393 */
387int tipc_sock_create_local(int type, struct socket **res) 394int tipc_sock_create_local(struct net *net, int type, struct socket **res)
388{ 395{
389 int rc; 396 int rc;
390 397
@@ -393,7 +400,7 @@ int tipc_sock_create_local(int type, struct socket **res)
393 pr_err("Failed to create kernel socket\n"); 400 pr_err("Failed to create kernel socket\n");
394 return rc; 401 return rc;
395 } 402 }
396 tipc_sk_create(&init_net, *res, 0, 1); 403 tipc_sk_create(net, *res, 0, 1);
397 404
398 return 0; 405 return 0;
399} 406}
@@ -442,6 +449,13 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
442 return ret; 449 return ret;
443} 450}
444 451
452static void tipc_sk_callback(struct rcu_head *head)
453{
454 struct tipc_sock *tsk = container_of(head, struct tipc_sock, rcu);
455
456 sock_put(&tsk->sk);
457}
458
445/** 459/**
446 * tipc_release - destroy a TIPC socket 460 * tipc_release - destroy a TIPC socket
447 * @sock: socket to destroy 461 * @sock: socket to destroy
@@ -461,9 +475,10 @@ int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
461static int tipc_release(struct socket *sock) 475static int tipc_release(struct socket *sock)
462{ 476{
463 struct sock *sk = sock->sk; 477 struct sock *sk = sock->sk;
478 struct net *net;
464 struct tipc_sock *tsk; 479 struct tipc_sock *tsk;
465 struct sk_buff *skb; 480 struct sk_buff *skb;
466 u32 dnode; 481 u32 dnode, probing_state;
467 482
468 /* 483 /*
469 * Exit if socket isn't fully initialized (occurs when a failed accept() 484 * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -472,6 +487,7 @@ static int tipc_release(struct socket *sock)
472 if (sk == NULL) 487 if (sk == NULL)
473 return 0; 488 return 0;
474 489
490 net = sock_net(sk);
475 tsk = tipc_sk(sk); 491 tsk = tipc_sk(sk);
476 lock_sock(sk); 492 lock_sock(sk);
477 493
@@ -491,26 +507,29 @@ static int tipc_release(struct socket *sock)
491 (sock->state == SS_CONNECTED)) { 507 (sock->state == SS_CONNECTED)) {
492 sock->state = SS_DISCONNECTING; 508 sock->state = SS_DISCONNECTING;
493 tsk->connected = 0; 509 tsk->connected = 0;
494 tipc_node_remove_conn(dnode, tsk->ref); 510 tipc_node_remove_conn(net, dnode, tsk->portid);
495 } 511 }
496 if (tipc_msg_reverse(skb, &dnode, TIPC_ERR_NO_PORT)) 512 if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
497 tipc_link_xmit_skb(skb, dnode, 0); 513 TIPC_ERR_NO_PORT))
514 tipc_link_xmit_skb(net, skb, dnode, 0);
498 } 515 }
499 } 516 }
500 517
501 tipc_sk_withdraw(tsk, 0, NULL); 518 tipc_sk_withdraw(tsk, 0, NULL);
502 tipc_sk_ref_discard(tsk->ref); 519 probing_state = tsk->probing_state;
503 k_cancel_timer(&tsk->timer); 520 if (del_timer_sync(&sk->sk_timer) &&
521 probing_state != TIPC_CONN_PROBING)
522 sock_put(sk);
523 tipc_sk_remove(tsk);
504 if (tsk->connected) { 524 if (tsk->connected) {
505 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 525 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
506 SHORT_H_SIZE, 0, dnode, tipc_own_addr, 526 TIPC_CONN_MSG, SHORT_H_SIZE, 0, dnode,
507 tsk_peer_port(tsk), 527 tsk_own_node(tsk), tsk_peer_port(tsk),
508 tsk->ref, TIPC_ERR_NO_PORT); 528 tsk->portid, TIPC_ERR_NO_PORT);
509 if (skb) 529 if (skb)
510 tipc_link_xmit_skb(skb, dnode, tsk->ref); 530 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
511 tipc_node_remove_conn(dnode, tsk->ref); 531 tipc_node_remove_conn(net, dnode, tsk->portid);
512 } 532 }
513 k_term_timer(&tsk->timer);
514 533
515 /* Discard any remaining (connection-based) messages in receive queue */ 534 /* Discard any remaining (connection-based) messages in receive queue */
516 __skb_queue_purge(&sk->sk_receive_queue); 535 __skb_queue_purge(&sk->sk_receive_queue);
@@ -518,7 +537,8 @@ static int tipc_release(struct socket *sock)
518 /* Reject any messages that accumulated in backlog queue */ 537 /* Reject any messages that accumulated in backlog queue */
519 sock->state = SS_DISCONNECTING; 538 sock->state = SS_DISCONNECTING;
520 release_sock(sk); 539 release_sock(sk);
521 sock_put(sk); 540
541 call_rcu(&tsk->rcu, tipc_sk_callback);
522 sock->sk = NULL; 542 sock->sk = NULL;
523 543
524 return 0; 544 return 0;
@@ -602,6 +622,7 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
602{ 622{
603 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr; 623 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
604 struct tipc_sock *tsk = tipc_sk(sock->sk); 624 struct tipc_sock *tsk = tipc_sk(sock->sk);
625 struct tipc_net *tn = net_generic(sock_net(sock->sk), tipc_net_id);
605 626
606 memset(addr, 0, sizeof(*addr)); 627 memset(addr, 0, sizeof(*addr));
607 if (peer) { 628 if (peer) {
@@ -611,8 +632,8 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
611 addr->addr.id.ref = tsk_peer_port(tsk); 632 addr->addr.id.ref = tsk_peer_port(tsk);
612 addr->addr.id.node = tsk_peer_node(tsk); 633 addr->addr.id.node = tsk_peer_node(tsk);
613 } else { 634 } else {
614 addr->addr.id.ref = tsk->ref; 635 addr->addr.id.ref = tsk->portid;
615 addr->addr.id.node = tipc_own_addr; 636 addr->addr.id.node = tn->own_addr;
616 } 637 }
617 638
618 *uaddr_len = sizeof(*addr); 639 *uaddr_len = sizeof(*addr);
@@ -711,8 +732,11 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
711 struct msghdr *msg, size_t dsz, long timeo) 732 struct msghdr *msg, size_t dsz, long timeo)
712{ 733{
713 struct sock *sk = sock->sk; 734 struct sock *sk = sock->sk;
714 struct tipc_msg *mhdr = &tipc_sk(sk)->phdr; 735 struct tipc_sock *tsk = tipc_sk(sk);
715 struct sk_buff_head head; 736 struct net *net = sock_net(sk);
737 struct tipc_msg *mhdr = &tsk->phdr;
738 struct sk_buff_head *pktchain = &sk->sk_write_queue;
739 struct iov_iter save = msg->msg_iter;
716 uint mtu; 740 uint mtu;
717 int rc; 741 int rc;
718 742
@@ -727,83 +751,97 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
727 751
728new_mtu: 752new_mtu:
729 mtu = tipc_bclink_get_mtu(); 753 mtu = tipc_bclink_get_mtu();
730 __skb_queue_head_init(&head); 754 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, pktchain);
731 rc = tipc_msg_build(mhdr, msg, 0, dsz, mtu, &head);
732 if (unlikely(rc < 0)) 755 if (unlikely(rc < 0))
733 return rc; 756 return rc;
734 757
735 do { 758 do {
736 rc = tipc_bclink_xmit(&head); 759 rc = tipc_bclink_xmit(net, pktchain);
737 if (likely(rc >= 0)) { 760 if (likely(rc >= 0)) {
738 rc = dsz; 761 rc = dsz;
739 break; 762 break;
740 } 763 }
741 if (rc == -EMSGSIZE) 764 if (rc == -EMSGSIZE) {
765 msg->msg_iter = save;
742 goto new_mtu; 766 goto new_mtu;
767 }
743 if (rc != -ELINKCONG) 768 if (rc != -ELINKCONG)
744 break; 769 break;
745 tipc_sk(sk)->link_cong = 1; 770 tipc_sk(sk)->link_cong = 1;
746 rc = tipc_wait_for_sndmsg(sock, &timeo); 771 rc = tipc_wait_for_sndmsg(sock, &timeo);
747 if (rc) 772 if (rc)
748 __skb_queue_purge(&head); 773 __skb_queue_purge(pktchain);
749 } while (!rc); 774 } while (!rc);
750 return rc; 775 return rc;
751} 776}
752 777
753/* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets 778/**
779 * tipc_sk_mcast_rcv - Deliver multicast messages to all destination sockets
780 * @arrvq: queue with arriving messages, to be cloned after destination lookup
781 * @inputq: queue with cloned messages, delivered to socket after dest lookup
782 *
783 * Multi-threaded: parallel calls with reference to same queues may occur
754 */ 784 */
755void tipc_sk_mcast_rcv(struct sk_buff *buf) 785void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
786 struct sk_buff_head *inputq)
756{ 787{
757 struct tipc_msg *msg = buf_msg(buf); 788 struct tipc_msg *msg;
758 struct tipc_port_list dports = {0, NULL, }; 789 struct tipc_plist dports;
759 struct tipc_port_list *item; 790 u32 portid;
760 struct sk_buff *b;
761 uint i, last, dst = 0;
762 u32 scope = TIPC_CLUSTER_SCOPE; 791 u32 scope = TIPC_CLUSTER_SCOPE;
763 792 struct sk_buff_head tmpq;
764 if (in_own_node(msg_orignode(msg))) 793 uint hsz;
765 scope = TIPC_NODE_SCOPE; 794 struct sk_buff *skb, *_skb;
766 795
767 /* Create destination port list: */ 796 __skb_queue_head_init(&tmpq);
768 tipc_nametbl_mc_translate(msg_nametype(msg), 797 tipc_plist_init(&dports);
769 msg_namelower(msg), 798
770 msg_nameupper(msg), 799 skb = tipc_skb_peek(arrvq, &inputq->lock);
771 scope, 800 for (; skb; skb = tipc_skb_peek(arrvq, &inputq->lock)) {
772 &dports); 801 msg = buf_msg(skb);
773 last = dports.count; 802 hsz = skb_headroom(skb) + msg_hdr_sz(msg);
774 if (!last) { 803
775 kfree_skb(buf); 804 if (in_own_node(net, msg_orignode(msg)))
776 return; 805 scope = TIPC_NODE_SCOPE;
777 } 806
778 807 /* Create destination port list and message clones: */
779 for (item = &dports; item; item = item->next) { 808 tipc_nametbl_mc_translate(net,
780 for (i = 0; i < PLSIZE && ++dst <= last; i++) { 809 msg_nametype(msg), msg_namelower(msg),
781 b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf; 810 msg_nameupper(msg), scope, &dports);
782 if (!b) { 811 portid = tipc_plist_pop(&dports);
783 pr_warn("Failed do clone mcast rcv buffer\n"); 812 for (; portid; portid = tipc_plist_pop(&dports)) {
813 _skb = __pskb_copy(skb, hsz, GFP_ATOMIC);
814 if (_skb) {
815 msg_set_destport(buf_msg(_skb), portid);
816 __skb_queue_tail(&tmpq, _skb);
784 continue; 817 continue;
785 } 818 }
786 msg_set_destport(msg, item->ports[i]); 819 pr_warn("Failed to clone mcast rcv buffer\n");
787 tipc_sk_rcv(b);
788 } 820 }
821 /* Append to inputq if not already done by other thread */
822 spin_lock_bh(&inputq->lock);
823 if (skb_peek(arrvq) == skb) {
824 skb_queue_splice_tail_init(&tmpq, inputq);
825 kfree_skb(__skb_dequeue(arrvq));
826 }
827 spin_unlock_bh(&inputq->lock);
828 __skb_queue_purge(&tmpq);
829 kfree_skb(skb);
789 } 830 }
790 tipc_port_list_free(&dports); 831 tipc_sk_rcv(net, inputq);
791} 832}
792 833
793/** 834/**
794 * tipc_sk_proto_rcv - receive a connection mng protocol message 835 * tipc_sk_proto_rcv - receive a connection mng protocol message
795 * @tsk: receiving socket 836 * @tsk: receiving socket
796 * @dnode: node to send response message to, if any 837 * @skb: pointer to message buffer. Set to NULL if buffer is consumed.
797 * @buf: buffer containing protocol message
798 * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
799 * (CONN_PROBE_REPLY) message should be forwarded.
800 */ 838 */
801static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode, 839static void tipc_sk_proto_rcv(struct tipc_sock *tsk, struct sk_buff **skb)
802 struct sk_buff *buf)
803{ 840{
804 struct tipc_msg *msg = buf_msg(buf); 841 struct tipc_msg *msg = buf_msg(*skb);
805 int conn_cong; 842 int conn_cong;
806 843 u32 dnode;
844 u32 own_node = tsk_own_node(tsk);
807 /* Ignore if connection cannot be validated: */ 845 /* Ignore if connection cannot be validated: */
808 if (!tsk_peer_msg(tsk, msg)) 846 if (!tsk_peer_msg(tsk, msg))
809 goto exit; 847 goto exit;
@@ -816,15 +854,15 @@ static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
816 if (conn_cong) 854 if (conn_cong)
817 tsk->sk.sk_write_space(&tsk->sk); 855 tsk->sk.sk_write_space(&tsk->sk);
818 } else if (msg_type(msg) == CONN_PROBE) { 856 } else if (msg_type(msg) == CONN_PROBE) {
819 if (!tipc_msg_reverse(buf, dnode, TIPC_OK)) 857 if (tipc_msg_reverse(own_node, *skb, &dnode, TIPC_OK)) {
820 return TIPC_OK; 858 msg_set_type(msg, CONN_PROBE_REPLY);
821 msg_set_type(msg, CONN_PROBE_REPLY); 859 return;
822 return TIPC_FWD_MSG; 860 }
823 } 861 }
824 /* Do nothing if msg_type() == CONN_PROBE_REPLY */ 862 /* Do nothing if msg_type() == CONN_PROBE_REPLY */
825exit: 863exit:
826 kfree_skb(buf); 864 kfree_skb(*skb);
827 return TIPC_OK; 865 *skb = NULL;
828} 866}
829 867
830static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p) 868static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
@@ -872,11 +910,13 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
872 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 910 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
873 struct sock *sk = sock->sk; 911 struct sock *sk = sock->sk;
874 struct tipc_sock *tsk = tipc_sk(sk); 912 struct tipc_sock *tsk = tipc_sk(sk);
913 struct net *net = sock_net(sk);
875 struct tipc_msg *mhdr = &tsk->phdr; 914 struct tipc_msg *mhdr = &tsk->phdr;
876 u32 dnode, dport; 915 u32 dnode, dport;
877 struct sk_buff_head head; 916 struct sk_buff_head *pktchain = &sk->sk_write_queue;
878 struct sk_buff *skb; 917 struct sk_buff *skb;
879 struct tipc_name_seq *seq = &dest->addr.nameseq; 918 struct tipc_name_seq *seq = &dest->addr.nameseq;
919 struct iov_iter save;
880 u32 mtu; 920 u32 mtu;
881 long timeo; 921 long timeo;
882 int rc; 922 int rc;
@@ -929,7 +969,7 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
929 msg_set_nametype(mhdr, type); 969 msg_set_nametype(mhdr, type);
930 msg_set_nameinst(mhdr, inst); 970 msg_set_nameinst(mhdr, inst);
931 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain)); 971 msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
932 dport = tipc_nametbl_translate(type, inst, &dnode); 972 dport = tipc_nametbl_translate(net, type, inst, &dnode);
933 msg_set_destnode(mhdr, dnode); 973 msg_set_destnode(mhdr, dnode);
934 msg_set_destport(mhdr, dport); 974 msg_set_destport(mhdr, dport);
935 if (unlikely(!dport && !dnode)) { 975 if (unlikely(!dport && !dnode)) {
@@ -945,31 +985,33 @@ static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
945 msg_set_hdr_sz(mhdr, BASIC_H_SIZE); 985 msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
946 } 986 }
947 987
988 save = m->msg_iter;
948new_mtu: 989new_mtu:
949 mtu = tipc_node_get_mtu(dnode, tsk->ref); 990 mtu = tipc_node_get_mtu(net, dnode, tsk->portid);
950 __skb_queue_head_init(&head); 991 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, pktchain);
951 rc = tipc_msg_build(mhdr, m, 0, dsz, mtu, &head);
952 if (rc < 0) 992 if (rc < 0)
953 goto exit; 993 goto exit;
954 994
955 do { 995 do {
956 skb = skb_peek(&head); 996 skb = skb_peek(pktchain);
957 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong; 997 TIPC_SKB_CB(skb)->wakeup_pending = tsk->link_cong;
958 rc = tipc_link_xmit(&head, dnode, tsk->ref); 998 rc = tipc_link_xmit(net, pktchain, dnode, tsk->portid);
959 if (likely(rc >= 0)) { 999 if (likely(rc >= 0)) {
960 if (sock->state != SS_READY) 1000 if (sock->state != SS_READY)
961 sock->state = SS_CONNECTING; 1001 sock->state = SS_CONNECTING;
962 rc = dsz; 1002 rc = dsz;
963 break; 1003 break;
964 } 1004 }
965 if (rc == -EMSGSIZE) 1005 if (rc == -EMSGSIZE) {
1006 m->msg_iter = save;
966 goto new_mtu; 1007 goto new_mtu;
1008 }
967 if (rc != -ELINKCONG) 1009 if (rc != -ELINKCONG)
968 break; 1010 break;
969 tsk->link_cong = 1; 1011 tsk->link_cong = 1;
970 rc = tipc_wait_for_sndmsg(sock, &timeo); 1012 rc = tipc_wait_for_sndmsg(sock, &timeo);
971 if (rc) 1013 if (rc)
972 __skb_queue_purge(&head); 1014 __skb_queue_purge(pktchain);
973 } while (!rc); 1015 } while (!rc);
974exit: 1016exit:
975 if (iocb) 1017 if (iocb)
@@ -1024,15 +1066,17 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1024 struct msghdr *m, size_t dsz) 1066 struct msghdr *m, size_t dsz)
1025{ 1067{
1026 struct sock *sk = sock->sk; 1068 struct sock *sk = sock->sk;
1069 struct net *net = sock_net(sk);
1027 struct tipc_sock *tsk = tipc_sk(sk); 1070 struct tipc_sock *tsk = tipc_sk(sk);
1028 struct tipc_msg *mhdr = &tsk->phdr; 1071 struct tipc_msg *mhdr = &tsk->phdr;
1029 struct sk_buff_head head; 1072 struct sk_buff_head *pktchain = &sk->sk_write_queue;
1030 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name); 1073 DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
1031 u32 ref = tsk->ref; 1074 u32 portid = tsk->portid;
1032 int rc = -EINVAL; 1075 int rc = -EINVAL;
1033 long timeo; 1076 long timeo;
1034 u32 dnode; 1077 u32 dnode;
1035 uint mtu, send, sent = 0; 1078 uint mtu, send, sent = 0;
1079 struct iov_iter save;
1036 1080
1037 /* Handle implied connection establishment */ 1081 /* Handle implied connection establishment */
1038 if (unlikely(dest)) { 1082 if (unlikely(dest)) {
@@ -1059,15 +1103,15 @@ static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
1059 dnode = tsk_peer_node(tsk); 1103 dnode = tsk_peer_node(tsk);
1060 1104
1061next: 1105next:
1106 save = m->msg_iter;
1062 mtu = tsk->max_pkt; 1107 mtu = tsk->max_pkt;
1063 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE); 1108 send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
1064 __skb_queue_head_init(&head); 1109 rc = tipc_msg_build(mhdr, m, sent, send, mtu, pktchain);
1065 rc = tipc_msg_build(mhdr, m, sent, send, mtu, &head);
1066 if (unlikely(rc < 0)) 1110 if (unlikely(rc < 0))
1067 goto exit; 1111 goto exit;
1068 do { 1112 do {
1069 if (likely(!tsk_conn_cong(tsk))) { 1113 if (likely(!tsk_conn_cong(tsk))) {
1070 rc = tipc_link_xmit(&head, dnode, ref); 1114 rc = tipc_link_xmit(net, pktchain, dnode, portid);
1071 if (likely(!rc)) { 1115 if (likely(!rc)) {
1072 tsk->sent_unacked++; 1116 tsk->sent_unacked++;
1073 sent += send; 1117 sent += send;
@@ -1076,7 +1120,9 @@ next:
1076 goto next; 1120 goto next;
1077 } 1121 }
1078 if (rc == -EMSGSIZE) { 1122 if (rc == -EMSGSIZE) {
1079 tsk->max_pkt = tipc_node_get_mtu(dnode, ref); 1123 tsk->max_pkt = tipc_node_get_mtu(net, dnode,
1124 portid);
1125 m->msg_iter = save;
1080 goto next; 1126 goto next;
1081 } 1127 }
1082 if (rc != -ELINKCONG) 1128 if (rc != -ELINKCONG)
@@ -1085,7 +1131,7 @@ next:
1085 } 1131 }
1086 rc = tipc_wait_for_sndpkt(sock, &timeo); 1132 rc = tipc_wait_for_sndpkt(sock, &timeo);
1087 if (rc) 1133 if (rc)
1088 __skb_queue_purge(&head); 1134 __skb_queue_purge(pktchain);
1089 } while (!rc); 1135 } while (!rc);
1090exit: 1136exit:
1091 if (iocb) 1137 if (iocb)
@@ -1118,6 +1164,8 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
1118static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port, 1164static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1119 u32 peer_node) 1165 u32 peer_node)
1120{ 1166{
1167 struct sock *sk = &tsk->sk;
1168 struct net *net = sock_net(sk);
1121 struct tipc_msg *msg = &tsk->phdr; 1169 struct tipc_msg *msg = &tsk->phdr;
1122 1170
1123 msg_set_destnode(msg, peer_node); 1171 msg_set_destnode(msg, peer_node);
@@ -1126,12 +1174,12 @@ static void tipc_sk_finish_conn(struct tipc_sock *tsk, u32 peer_port,
1126 msg_set_lookup_scope(msg, 0); 1174 msg_set_lookup_scope(msg, 0);
1127 msg_set_hdr_sz(msg, SHORT_H_SIZE); 1175 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1128 1176
1129 tsk->probing_interval = CONN_PROBING_INTERVAL; 1177 tsk->probing_intv = CONN_PROBING_INTERVAL;
1130 tsk->probing_state = TIPC_CONN_OK; 1178 tsk->probing_state = TIPC_CONN_OK;
1131 tsk->connected = 1; 1179 tsk->connected = 1;
1132 k_start_timer(&tsk->timer, tsk->probing_interval); 1180 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
1133 tipc_node_add_conn(peer_node, tsk->ref, peer_port); 1181 tipc_node_add_conn(net, peer_node, tsk->portid, peer_port);
1134 tsk->max_pkt = tipc_node_get_mtu(peer_node, tsk->ref); 1182 tsk->max_pkt = tipc_node_get_mtu(net, peer_node, tsk->portid);
1135} 1183}
1136 1184
1137/** 1185/**
@@ -1230,6 +1278,7 @@ static int tipc_sk_anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
1230 1278
1231static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack) 1279static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1232{ 1280{
1281 struct net *net = sock_net(&tsk->sk);
1233 struct sk_buff *skb = NULL; 1282 struct sk_buff *skb = NULL;
1234 struct tipc_msg *msg; 1283 struct tipc_msg *msg;
1235 u32 peer_port = tsk_peer_port(tsk); 1284 u32 peer_port = tsk_peer_port(tsk);
@@ -1237,13 +1286,14 @@ static void tipc_sk_send_ack(struct tipc_sock *tsk, uint ack)
1237 1286
1238 if (!tsk->connected) 1287 if (!tsk->connected)
1239 return; 1288 return;
1240 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0, dnode, 1289 skb = tipc_msg_create(CONN_MANAGER, CONN_ACK, INT_H_SIZE, 0,
1241 tipc_own_addr, peer_port, tsk->ref, TIPC_OK); 1290 dnode, tsk_own_node(tsk), peer_port,
1291 tsk->portid, TIPC_OK);
1242 if (!skb) 1292 if (!skb)
1243 return; 1293 return;
1244 msg = buf_msg(skb); 1294 msg = buf_msg(skb);
1245 msg_set_msgcnt(msg, ack); 1295 msg_set_msgcnt(msg, ack);
1246 tipc_link_xmit_skb(skb, dnode, msg_link_selector(msg)); 1296 tipc_link_xmit_skb(net, skb, dnode, msg_link_selector(msg));
1247} 1297}
1248 1298
1249static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop) 1299static int tipc_wait_for_rcvmsg(struct socket *sock, long *timeop)
@@ -1529,15 +1579,16 @@ static void tipc_data_ready(struct sock *sk)
1529/** 1579/**
1530 * filter_connect - Handle all incoming messages for a connection-based socket 1580 * filter_connect - Handle all incoming messages for a connection-based socket
1531 * @tsk: TIPC socket 1581 * @tsk: TIPC socket
1532 * @msg: message 1582 * @skb: pointer to message buffer. Set to NULL if buffer is consumed
1533 * 1583 *
1534 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise 1584 * Returns 0 (TIPC_OK) if everything ok, -TIPC_ERR_NO_PORT otherwise
1535 */ 1585 */
1536static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf) 1586static int filter_connect(struct tipc_sock *tsk, struct sk_buff **skb)
1537{ 1587{
1538 struct sock *sk = &tsk->sk; 1588 struct sock *sk = &tsk->sk;
1589 struct net *net = sock_net(sk);
1539 struct socket *sock = sk->sk_socket; 1590 struct socket *sock = sk->sk_socket;
1540 struct tipc_msg *msg = buf_msg(*buf); 1591 struct tipc_msg *msg = buf_msg(*skb);
1541 int retval = -TIPC_ERR_NO_PORT; 1592 int retval = -TIPC_ERR_NO_PORT;
1542 1593
1543 if (msg_mcast(msg)) 1594 if (msg_mcast(msg))
@@ -1551,8 +1602,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1551 sock->state = SS_DISCONNECTING; 1602 sock->state = SS_DISCONNECTING;
1552 tsk->connected = 0; 1603 tsk->connected = 0;
1553 /* let timer expire on it's own */ 1604 /* let timer expire on it's own */
1554 tipc_node_remove_conn(tsk_peer_node(tsk), 1605 tipc_node_remove_conn(net, tsk_peer_node(tsk),
1555 tsk->ref); 1606 tsk->portid);
1556 } 1607 }
1557 retval = TIPC_OK; 1608 retval = TIPC_OK;
1558 } 1609 }
@@ -1587,8 +1638,8 @@ static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
1587 * connect() routine if sleeping. 1638 * connect() routine if sleeping.
1588 */ 1639 */
1589 if (msg_data_sz(msg) == 0) { 1640 if (msg_data_sz(msg) == 0) {
1590 kfree_skb(*buf); 1641 kfree_skb(*skb);
1591 *buf = NULL; 1642 *skb = NULL;
1592 if (waitqueue_active(sk_sleep(sk))) 1643 if (waitqueue_active(sk_sleep(sk)))
1593 wake_up_interruptible(sk_sleep(sk)); 1644 wake_up_interruptible(sk_sleep(sk));
1594 } 1645 }
@@ -1640,32 +1691,33 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
1640/** 1691/**
1641 * filter_rcv - validate incoming message 1692 * filter_rcv - validate incoming message
1642 * @sk: socket 1693 * @sk: socket
1643 * @buf: message 1694 * @skb: pointer to message. Set to NULL if buffer is consumed.
1644 * 1695 *
1645 * Enqueues message on receive queue if acceptable; optionally handles 1696 * Enqueues message on receive queue if acceptable; optionally handles
1646 * disconnect indication for a connected socket. 1697 * disconnect indication for a connected socket.
1647 * 1698 *
1648 * Called with socket lock already taken; port lock may also be taken. 1699 * Called with socket lock already taken
1649 * 1700 *
1650 * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message 1701 * Returns 0 (TIPC_OK) if message was ok, -TIPC error code if rejected
1651 * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
1652 */ 1702 */
1653static int filter_rcv(struct sock *sk, struct sk_buff *buf) 1703static int filter_rcv(struct sock *sk, struct sk_buff **skb)
1654{ 1704{
1655 struct socket *sock = sk->sk_socket; 1705 struct socket *sock = sk->sk_socket;
1656 struct tipc_sock *tsk = tipc_sk(sk); 1706 struct tipc_sock *tsk = tipc_sk(sk);
1657 struct tipc_msg *msg = buf_msg(buf); 1707 struct tipc_msg *msg = buf_msg(*skb);
1658 unsigned int limit = rcvbuf_limit(sk, buf); 1708 unsigned int limit = rcvbuf_limit(sk, *skb);
1659 u32 onode;
1660 int rc = TIPC_OK; 1709 int rc = TIPC_OK;
1661 1710
1662 if (unlikely(msg_user(msg) == CONN_MANAGER)) 1711 if (unlikely(msg_user(msg) == CONN_MANAGER)) {
1663 return tipc_sk_proto_rcv(tsk, &onode, buf); 1712 tipc_sk_proto_rcv(tsk, skb);
1713 return TIPC_OK;
1714 }
1664 1715
1665 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) { 1716 if (unlikely(msg_user(msg) == SOCK_WAKEUP)) {
1666 kfree_skb(buf); 1717 kfree_skb(*skb);
1667 tsk->link_cong = 0; 1718 tsk->link_cong = 0;
1668 sk->sk_write_space(sk); 1719 sk->sk_write_space(sk);
1720 *skb = NULL;
1669 return TIPC_OK; 1721 return TIPC_OK;
1670 } 1722 }
1671 1723
@@ -1677,21 +1729,22 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1677 if (msg_connected(msg)) 1729 if (msg_connected(msg))
1678 return -TIPC_ERR_NO_PORT; 1730 return -TIPC_ERR_NO_PORT;
1679 } else { 1731 } else {
1680 rc = filter_connect(tsk, &buf); 1732 rc = filter_connect(tsk, skb);
1681 if (rc != TIPC_OK || buf == NULL) 1733 if (rc != TIPC_OK || !*skb)
1682 return rc; 1734 return rc;
1683 } 1735 }
1684 1736
1685 /* Reject message if there isn't room to queue it */ 1737 /* Reject message if there isn't room to queue it */
1686 if (sk_rmem_alloc_get(sk) + buf->truesize >= limit) 1738 if (sk_rmem_alloc_get(sk) + (*skb)->truesize >= limit)
1687 return -TIPC_ERR_OVERLOAD; 1739 return -TIPC_ERR_OVERLOAD;
1688 1740
1689 /* Enqueue message */ 1741 /* Enqueue message */
1690 TIPC_SKB_CB(buf)->handle = NULL; 1742 TIPC_SKB_CB(*skb)->handle = NULL;
1691 __skb_queue_tail(&sk->sk_receive_queue, buf); 1743 __skb_queue_tail(&sk->sk_receive_queue, *skb);
1692 skb_set_owner_r(buf, sk); 1744 skb_set_owner_r(*skb, sk);
1693 1745
1694 sk->sk_data_ready(sk); 1746 sk->sk_data_ready(sk);
1747 *skb = NULL;
1695 return TIPC_OK; 1748 return TIPC_OK;
1696} 1749}
1697 1750
@@ -1700,78 +1753,125 @@ static int filter_rcv(struct sock *sk, struct sk_buff *buf)
1700 * @sk: socket 1753 * @sk: socket
1701 * @skb: message 1754 * @skb: message
1702 * 1755 *
1703 * Caller must hold socket lock, but not port lock. 1756 * Caller must hold socket lock
1704 * 1757 *
1705 * Returns 0 1758 * Returns 0
1706 */ 1759 */
1707static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb) 1760static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb)
1708{ 1761{
1709 int rc; 1762 int err;
1710 u32 onode; 1763 atomic_t *dcnt;
1764 u32 dnode;
1711 struct tipc_sock *tsk = tipc_sk(sk); 1765 struct tipc_sock *tsk = tipc_sk(sk);
1766 struct net *net = sock_net(sk);
1712 uint truesize = skb->truesize; 1767 uint truesize = skb->truesize;
1713 1768
1714 rc = filter_rcv(sk, skb); 1769 err = filter_rcv(sk, &skb);
1715 1770 if (likely(!skb)) {
1716 if (likely(!rc)) { 1771 dcnt = &tsk->dupl_rcvcnt;
1717 if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT) 1772 if (atomic_read(dcnt) < TIPC_CONN_OVERLOAD_LIMIT)
1718 atomic_add(truesize, &tsk->dupl_rcvcnt); 1773 atomic_add(truesize, dcnt);
1719 return 0; 1774 return 0;
1720 } 1775 }
1776 if (!err || tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode, -err))
1777 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
1778 return 0;
1779}
1721 1780
1722 if ((rc < 0) && !tipc_msg_reverse(skb, &onode, -rc)) 1781/**
1723 return 0; 1782 * tipc_sk_enqueue - extract all buffers with destination 'dport' from
1724 1783 * inputq and try adding them to socket or backlog queue
1725 tipc_link_xmit_skb(skb, onode, 0); 1784 * @inputq: list of incoming buffers with potentially different destinations
1785 * @sk: socket where the buffers should be enqueued
1786 * @dport: port number for the socket
1787 * @_skb: returned buffer to be forwarded or rejected, if applicable
1788 *
1789 * Caller must hold socket lock
1790 *
1791 * Returns TIPC_OK if all buffers enqueued, otherwise -TIPC_ERR_OVERLOAD
1792 * or -TIPC_ERR_NO_PORT
1793 */
1794static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
1795 u32 dport, struct sk_buff **_skb)
1796{
1797 unsigned int lim;
1798 atomic_t *dcnt;
1799 int err;
1800 struct sk_buff *skb;
1801 unsigned long time_limit = jiffies + 2;
1726 1802
1727 return 0; 1803 while (skb_queue_len(inputq)) {
1804 if (unlikely(time_after_eq(jiffies, time_limit)))
1805 return TIPC_OK;
1806 skb = tipc_skb_dequeue(inputq, dport);
1807 if (unlikely(!skb))
1808 return TIPC_OK;
1809 if (!sock_owned_by_user(sk)) {
1810 err = filter_rcv(sk, &skb);
1811 if (likely(!skb))
1812 continue;
1813 *_skb = skb;
1814 return err;
1815 }
1816 dcnt = &tipc_sk(sk)->dupl_rcvcnt;
1817 if (sk->sk_backlog.len)
1818 atomic_set(dcnt, 0);
1819 lim = rcvbuf_limit(sk, skb) + atomic_read(dcnt);
1820 if (likely(!sk_add_backlog(sk, skb, lim)))
1821 continue;
1822 *_skb = skb;
1823 return -TIPC_ERR_OVERLOAD;
1824 }
1825 return TIPC_OK;
1728} 1826}
1729 1827
1730/** 1828/**
1731 * tipc_sk_rcv - handle incoming message 1829 * tipc_sk_rcv - handle a chain of incoming buffers
1732 * @skb: buffer containing arriving message 1830 * @inputq: buffer list containing the buffers
1733 * Consumes buffer 1831 * Consumes all buffers in list until inputq is empty
1734 * Returns 0 if success, or errno: -EHOSTUNREACH 1832 * Note: may be called in multiple threads referring to the same queue
1833 * Returns 0 if last buffer was accepted, otherwise -EHOSTUNREACH
1834 * Only node local calls check the return value, sending single-buffer queues
1735 */ 1835 */
1736int tipc_sk_rcv(struct sk_buff *skb) 1836int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
1737{ 1837{
1838 u32 dnode, dport = 0;
1839 int err = -TIPC_ERR_NO_PORT;
1840 struct sk_buff *skb;
1738 struct tipc_sock *tsk; 1841 struct tipc_sock *tsk;
1842 struct tipc_net *tn;
1739 struct sock *sk; 1843 struct sock *sk;
1740 u32 dport = msg_destport(buf_msg(skb));
1741 int rc = TIPC_OK;
1742 uint limit;
1743 u32 dnode;
1744 1844
1745 /* Validate destination and message */ 1845 while (skb_queue_len(inputq)) {
1746 tsk = tipc_sk_get(dport); 1846 skb = NULL;
1747 if (unlikely(!tsk)) { 1847 dport = tipc_skb_peek_port(inputq, dport);
1748 rc = tipc_msg_eval(skb, &dnode); 1848 tsk = tipc_sk_lookup(net, dport);
1749 goto exit; 1849 if (likely(tsk)) {
1850 sk = &tsk->sk;
1851 if (likely(spin_trylock_bh(&sk->sk_lock.slock))) {
1852 err = tipc_sk_enqueue(inputq, sk, dport, &skb);
1853 spin_unlock_bh(&sk->sk_lock.slock);
1854 dport = 0;
1855 }
1856 sock_put(sk);
1857 } else {
1858 skb = tipc_skb_dequeue(inputq, dport);
1859 }
1860 if (likely(!skb))
1861 continue;
1862 if (tipc_msg_lookup_dest(net, skb, &dnode, &err))
1863 goto xmit;
1864 if (!err) {
1865 dnode = msg_destnode(buf_msg(skb));
1866 goto xmit;
1867 }
1868 tn = net_generic(net, tipc_net_id);
1869 if (!tipc_msg_reverse(tn->own_addr, skb, &dnode, -err))
1870 continue;
1871xmit:
1872 tipc_link_xmit_skb(net, skb, dnode, dport);
1750 } 1873 }
1751 sk = &tsk->sk; 1874 return err ? -EHOSTUNREACH : 0;
1752
1753 /* Queue message */
1754 spin_lock_bh(&sk->sk_lock.slock);
1755
1756 if (!sock_owned_by_user(sk)) {
1757 rc = filter_rcv(sk, skb);
1758 } else {
1759 if (sk->sk_backlog.len == 0)
1760 atomic_set(&tsk->dupl_rcvcnt, 0);
1761 limit = rcvbuf_limit(sk, skb) + atomic_read(&tsk->dupl_rcvcnt);
1762 if (sk_add_backlog(sk, skb, limit))
1763 rc = -TIPC_ERR_OVERLOAD;
1764 }
1765 spin_unlock_bh(&sk->sk_lock.slock);
1766 tipc_sk_put(tsk);
1767 if (likely(!rc))
1768 return 0;
1769exit:
1770 if ((rc < 0) && !tipc_msg_reverse(skb, &dnode, -rc))
1771 return -EHOSTUNREACH;
1772
1773 tipc_link_xmit_skb(skb, dnode, 0);
1774 return (rc < 0) ? -EHOSTUNREACH : 0;
1775} 1875}
1776 1876
1777static int tipc_wait_for_connect(struct socket *sock, long *timeo_p) 1877static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -2027,6 +2127,7 @@ exit:
2027static int tipc_shutdown(struct socket *sock, int how) 2127static int tipc_shutdown(struct socket *sock, int how)
2028{ 2128{
2029 struct sock *sk = sock->sk; 2129 struct sock *sk = sock->sk;
2130 struct net *net = sock_net(sk);
2030 struct tipc_sock *tsk = tipc_sk(sk); 2131 struct tipc_sock *tsk = tipc_sk(sk);
2031 struct sk_buff *skb; 2132 struct sk_buff *skb;
2032 u32 dnode; 2133 u32 dnode;
@@ -2049,21 +2150,24 @@ restart:
2049 kfree_skb(skb); 2150 kfree_skb(skb);
2050 goto restart; 2151 goto restart;
2051 } 2152 }
2052 if (tipc_msg_reverse(skb, &dnode, TIPC_CONN_SHUTDOWN)) 2153 if (tipc_msg_reverse(tsk_own_node(tsk), skb, &dnode,
2053 tipc_link_xmit_skb(skb, dnode, tsk->ref); 2154 TIPC_CONN_SHUTDOWN))
2054 tipc_node_remove_conn(dnode, tsk->ref); 2155 tipc_link_xmit_skb(net, skb, dnode,
2156 tsk->portid);
2157 tipc_node_remove_conn(net, dnode, tsk->portid);
2055 } else { 2158 } else {
2056 dnode = tsk_peer_node(tsk); 2159 dnode = tsk_peer_node(tsk);
2160
2057 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, 2161 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2058 TIPC_CONN_MSG, SHORT_H_SIZE, 2162 TIPC_CONN_MSG, SHORT_H_SIZE,
2059 0, dnode, tipc_own_addr, 2163 0, dnode, tsk_own_node(tsk),
2060 tsk_peer_port(tsk), 2164 tsk_peer_port(tsk),
2061 tsk->ref, TIPC_CONN_SHUTDOWN); 2165 tsk->portid, TIPC_CONN_SHUTDOWN);
2062 tipc_link_xmit_skb(skb, dnode, tsk->ref); 2166 tipc_link_xmit_skb(net, skb, dnode, tsk->portid);
2063 } 2167 }
2064 tsk->connected = 0; 2168 tsk->connected = 0;
2065 sock->state = SS_DISCONNECTING; 2169 sock->state = SS_DISCONNECTING;
2066 tipc_node_remove_conn(dnode, tsk->ref); 2170 tipc_node_remove_conn(net, dnode, tsk->portid);
2067 /* fall through */ 2171 /* fall through */
2068 2172
2069 case SS_DISCONNECTING: 2173 case SS_DISCONNECTING:
@@ -2084,18 +2188,14 @@ restart:
2084 return res; 2188 return res;
2085} 2189}
2086 2190
2087static void tipc_sk_timeout(unsigned long ref) 2191static void tipc_sk_timeout(unsigned long data)
2088{ 2192{
2089 struct tipc_sock *tsk; 2193 struct tipc_sock *tsk = (struct tipc_sock *)data;
2090 struct sock *sk; 2194 struct sock *sk = &tsk->sk;
2091 struct sk_buff *skb = NULL; 2195 struct sk_buff *skb = NULL;
2092 u32 peer_port, peer_node; 2196 u32 peer_port, peer_node;
2197 u32 own_node = tsk_own_node(tsk);
2093 2198
2094 tsk = tipc_sk_get(ref);
2095 if (!tsk)
2096 return;
2097
2098 sk = &tsk->sk;
2099 bh_lock_sock(sk); 2199 bh_lock_sock(sk);
2100 if (!tsk->connected) { 2200 if (!tsk->connected) {
2101 bh_unlock_sock(sk); 2201 bh_unlock_sock(sk);
@@ -2106,38 +2206,39 @@ static void tipc_sk_timeout(unsigned long ref)
2106 2206
2107 if (tsk->probing_state == TIPC_CONN_PROBING) { 2207 if (tsk->probing_state == TIPC_CONN_PROBING) {
2108 /* Previous probe not answered -> self abort */ 2208 /* Previous probe not answered -> self abort */
2109 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE, TIPC_CONN_MSG, 2209 skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
2110 SHORT_H_SIZE, 0, tipc_own_addr, 2210 TIPC_CONN_MSG, SHORT_H_SIZE, 0,
2111 peer_node, ref, peer_port, 2211 own_node, peer_node, tsk->portid,
2112 TIPC_ERR_NO_PORT); 2212 peer_port, TIPC_ERR_NO_PORT);
2113 } else { 2213 } else {
2114 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE, INT_H_SIZE, 2214 skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
2115 0, peer_node, tipc_own_addr, 2215 INT_H_SIZE, 0, peer_node, own_node,
2116 peer_port, ref, TIPC_OK); 2216 peer_port, tsk->portid, TIPC_OK);
2117 tsk->probing_state = TIPC_CONN_PROBING; 2217 tsk->probing_state = TIPC_CONN_PROBING;
2118 k_start_timer(&tsk->timer, tsk->probing_interval); 2218 sk_reset_timer(sk, &sk->sk_timer, jiffies + tsk->probing_intv);
2119 } 2219 }
2120 bh_unlock_sock(sk); 2220 bh_unlock_sock(sk);
2121 if (skb) 2221 if (skb)
2122 tipc_link_xmit_skb(skb, peer_node, ref); 2222 tipc_link_xmit_skb(sock_net(sk), skb, peer_node, tsk->portid);
2123exit: 2223exit:
2124 tipc_sk_put(tsk); 2224 sock_put(sk);
2125} 2225}
2126 2226
2127static int tipc_sk_publish(struct tipc_sock *tsk, uint scope, 2227static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2128 struct tipc_name_seq const *seq) 2228 struct tipc_name_seq const *seq)
2129{ 2229{
2230 struct net *net = sock_net(&tsk->sk);
2130 struct publication *publ; 2231 struct publication *publ;
2131 u32 key; 2232 u32 key;
2132 2233
2133 if (tsk->connected) 2234 if (tsk->connected)
2134 return -EINVAL; 2235 return -EINVAL;
2135 key = tsk->ref + tsk->pub_count + 1; 2236 key = tsk->portid + tsk->pub_count + 1;
2136 if (key == tsk->ref) 2237 if (key == tsk->portid)
2137 return -EADDRINUSE; 2238 return -EADDRINUSE;
2138 2239
2139 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper, 2240 publ = tipc_nametbl_publish(net, seq->type, seq->lower, seq->upper,
2140 scope, tsk->ref, key); 2241 scope, tsk->portid, key);
2141 if (unlikely(!publ)) 2242 if (unlikely(!publ))
2142 return -EINVAL; 2243 return -EINVAL;
2143 2244
@@ -2150,6 +2251,7 @@ static int tipc_sk_publish(struct tipc_sock *tsk, uint scope,
2150static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope, 2251static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2151 struct tipc_name_seq const *seq) 2252 struct tipc_name_seq const *seq)
2152{ 2253{
2254 struct net *net = sock_net(&tsk->sk);
2153 struct publication *publ; 2255 struct publication *publ;
2154 struct publication *safe; 2256 struct publication *safe;
2155 int rc = -EINVAL; 2257 int rc = -EINVAL;
@@ -2164,12 +2266,12 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2164 continue; 2266 continue;
2165 if (publ->upper != seq->upper) 2267 if (publ->upper != seq->upper)
2166 break; 2268 break;
2167 tipc_nametbl_withdraw(publ->type, publ->lower, 2269 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2168 publ->ref, publ->key); 2270 publ->ref, publ->key);
2169 rc = 0; 2271 rc = 0;
2170 break; 2272 break;
2171 } 2273 }
2172 tipc_nametbl_withdraw(publ->type, publ->lower, 2274 tipc_nametbl_withdraw(net, publ->type, publ->lower,
2173 publ->ref, publ->key); 2275 publ->ref, publ->key);
2174 rc = 0; 2276 rc = 0;
2175 } 2277 }
@@ -2178,336 +2280,105 @@ static int tipc_sk_withdraw(struct tipc_sock *tsk, uint scope,
2178 return rc; 2280 return rc;
2179} 2281}
2180 2282
2181static int tipc_sk_show(struct tipc_sock *tsk, char *buf,
2182 int len, int full_id)
2183{
2184 struct publication *publ;
2185 int ret;
2186
2187 if (full_id)
2188 ret = tipc_snprintf(buf, len, "<%u.%u.%u:%u>:",
2189 tipc_zone(tipc_own_addr),
2190 tipc_cluster(tipc_own_addr),
2191 tipc_node(tipc_own_addr), tsk->ref);
2192 else
2193 ret = tipc_snprintf(buf, len, "%-10u:", tsk->ref);
2194
2195 if (tsk->connected) {
2196 u32 dport = tsk_peer_port(tsk);
2197 u32 destnode = tsk_peer_node(tsk);
2198
2199 ret += tipc_snprintf(buf + ret, len - ret,
2200 " connected to <%u.%u.%u:%u>",
2201 tipc_zone(destnode),
2202 tipc_cluster(destnode),
2203 tipc_node(destnode), dport);
2204 if (tsk->conn_type != 0)
2205 ret += tipc_snprintf(buf + ret, len - ret,
2206 " via {%u,%u}", tsk->conn_type,
2207 tsk->conn_instance);
2208 } else if (tsk->published) {
2209 ret += tipc_snprintf(buf + ret, len - ret, " bound to");
2210 list_for_each_entry(publ, &tsk->publications, pport_list) {
2211 if (publ->lower == publ->upper)
2212 ret += tipc_snprintf(buf + ret, len - ret,
2213 " {%u,%u}", publ->type,
2214 publ->lower);
2215 else
2216 ret += tipc_snprintf(buf + ret, len - ret,
2217 " {%u,%u,%u}", publ->type,
2218 publ->lower, publ->upper);
2219 }
2220 }
2221 ret += tipc_snprintf(buf + ret, len - ret, "\n");
2222 return ret;
2223}
2224
2225struct sk_buff *tipc_sk_socks_show(void)
2226{
2227 struct sk_buff *buf;
2228 struct tlv_desc *rep_tlv;
2229 char *pb;
2230 int pb_len;
2231 struct tipc_sock *tsk;
2232 int str_len = 0;
2233 u32 ref = 0;
2234
2235 buf = tipc_cfg_reply_alloc(TLV_SPACE(ULTRA_STRING_MAX_LEN));
2236 if (!buf)
2237 return NULL;
2238 rep_tlv = (struct tlv_desc *)buf->data;
2239 pb = TLV_DATA(rep_tlv);
2240 pb_len = ULTRA_STRING_MAX_LEN;
2241
2242 tsk = tipc_sk_get_next(&ref);
2243 for (; tsk; tsk = tipc_sk_get_next(&ref)) {
2244 lock_sock(&tsk->sk);
2245 str_len += tipc_sk_show(tsk, pb + str_len,
2246 pb_len - str_len, 0);
2247 release_sock(&tsk->sk);
2248 tipc_sk_put(tsk);
2249 }
2250 str_len += 1; /* for "\0" */
2251 skb_put(buf, TLV_SPACE(str_len));
2252 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
2253
2254 return buf;
2255}
2256
2257/* tipc_sk_reinit: set non-zero address in all existing sockets 2283/* tipc_sk_reinit: set non-zero address in all existing sockets
2258 * when we go from standalone to network mode. 2284 * when we go from standalone to network mode.
2259 */ 2285 */
2260void tipc_sk_reinit(void) 2286void tipc_sk_reinit(struct net *net)
2261{ 2287{
2288 struct tipc_net *tn = net_generic(net, tipc_net_id);
2289 const struct bucket_table *tbl;
2290 struct rhash_head *pos;
2291 struct tipc_sock *tsk;
2262 struct tipc_msg *msg; 2292 struct tipc_msg *msg;
2263 u32 ref = 0; 2293 int i;
2264 struct tipc_sock *tsk = tipc_sk_get_next(&ref);
2265 2294
2266 for (; tsk; tsk = tipc_sk_get_next(&ref)) { 2295 rcu_read_lock();
2267 lock_sock(&tsk->sk); 2296 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2268 msg = &tsk->phdr; 2297 for (i = 0; i < tbl->size; i++) {
2269 msg_set_prevnode(msg, tipc_own_addr); 2298 rht_for_each_entry_rcu(tsk, pos, tbl, i, node) {
2270 msg_set_orignode(msg, tipc_own_addr); 2299 spin_lock_bh(&tsk->sk.sk_lock.slock);
2271 release_sock(&tsk->sk); 2300 msg = &tsk->phdr;
2272 tipc_sk_put(tsk); 2301 msg_set_prevnode(msg, tn->own_addr);
2302 msg_set_orignode(msg, tn->own_addr);
2303 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2304 }
2273 } 2305 }
2306 rcu_read_unlock();
2274} 2307}
2275 2308
2276/** 2309static struct tipc_sock *tipc_sk_lookup(struct net *net, u32 portid)
2277 * struct reference - TIPC socket reference entry
2278 * @tsk: pointer to socket associated with reference entry
2279 * @ref: reference value for socket (combines instance & array index info)
2280 */
2281struct reference {
2282 struct tipc_sock *tsk;
2283 u32 ref;
2284};
2285
2286/**
2287 * struct tipc_ref_table - table of TIPC socket reference entries
2288 * @entries: pointer to array of reference entries
2289 * @capacity: array index of first unusable entry
2290 * @init_point: array index of first uninitialized entry
2291 * @first_free: array index of first unused socket reference entry
2292 * @last_free: array index of last unused socket reference entry
2293 * @index_mask: bitmask for array index portion of reference values
2294 * @start_mask: initial value for instance value portion of reference values
2295 */
2296struct ref_table {
2297 struct reference *entries;
2298 u32 capacity;
2299 u32 init_point;
2300 u32 first_free;
2301 u32 last_free;
2302 u32 index_mask;
2303 u32 start_mask;
2304};
2305
2306/* Socket reference table consists of 2**N entries.
2307 *
2308 * State Socket ptr Reference
2309 * ----- ---------- ---------
2310 * In use non-NULL XXXX|own index
2311 * (XXXX changes each time entry is acquired)
2312 * Free NULL YYYY|next free index
2313 * (YYYY is one more than last used XXXX)
2314 * Uninitialized NULL 0
2315 *
2316 * Entry 0 is not used; this allows index 0 to denote the end of the free list.
2317 *
2318 * Note that a reference value of 0 does not necessarily indicate that an
2319 * entry is uninitialized, since the last entry in the free list could also
2320 * have a reference value of 0 (although this is unlikely).
2321 */
2322
2323static struct ref_table tipc_ref_table;
2324
2325static DEFINE_RWLOCK(ref_table_lock);
2326
2327/**
2328 * tipc_ref_table_init - create reference table for sockets
2329 */
2330int tipc_sk_ref_table_init(u32 req_sz, u32 start)
2331{ 2310{
2332 struct reference *table; 2311 struct tipc_net *tn = net_generic(net, tipc_net_id);
2333 u32 actual_sz; 2312 struct tipc_sock *tsk;
2334
2335 /* account for unused entry, then round up size to a power of 2 */
2336
2337 req_sz++;
2338 for (actual_sz = 16; actual_sz < req_sz; actual_sz <<= 1) {
2339 /* do nothing */
2340 };
2341
2342 /* allocate table & mark all entries as uninitialized */
2343 table = vzalloc(actual_sz * sizeof(struct reference));
2344 if (table == NULL)
2345 return -ENOMEM;
2346
2347 tipc_ref_table.entries = table;
2348 tipc_ref_table.capacity = req_sz;
2349 tipc_ref_table.init_point = 1;
2350 tipc_ref_table.first_free = 0;
2351 tipc_ref_table.last_free = 0;
2352 tipc_ref_table.index_mask = actual_sz - 1;
2353 tipc_ref_table.start_mask = start & ~tipc_ref_table.index_mask;
2354 2313
2355 return 0; 2314 rcu_read_lock();
2356} 2315 tsk = rhashtable_lookup(&tn->sk_rht, &portid);
2316 if (tsk)
2317 sock_hold(&tsk->sk);
2318 rcu_read_unlock();
2357 2319
2358/** 2320 return tsk;
2359 * tipc_ref_table_stop - destroy reference table for sockets
2360 */
2361void tipc_sk_ref_table_stop(void)
2362{
2363 if (!tipc_ref_table.entries)
2364 return;
2365 vfree(tipc_ref_table.entries);
2366 tipc_ref_table.entries = NULL;
2367} 2321}
2368 2322
2369/* tipc_ref_acquire - create reference to a socket 2323static int tipc_sk_insert(struct tipc_sock *tsk)
2370 *
2371 * Register an socket pointer in the reference table.
2372 * Returns a unique reference value that is used from then on to retrieve the
2373 * socket pointer, or to determine if the socket has been deregistered.
2374 */
2375u32 tipc_sk_ref_acquire(struct tipc_sock *tsk)
2376{ 2324{
2377 u32 index; 2325 struct sock *sk = &tsk->sk;
2378 u32 index_mask; 2326 struct net *net = sock_net(sk);
2379 u32 next_plus_upper; 2327 struct tipc_net *tn = net_generic(net, tipc_net_id);
2380 u32 ref = 0; 2328 u32 remaining = (TIPC_MAX_PORT - TIPC_MIN_PORT) + 1;
2381 struct reference *entry; 2329 u32 portid = prandom_u32() % remaining + TIPC_MIN_PORT;
2382 2330
2383 if (unlikely(!tsk)) { 2331 while (remaining--) {
2384 pr_err("Attempt to acquire ref. to non-existent obj\n"); 2332 portid++;
2385 return 0; 2333 if ((portid < TIPC_MIN_PORT) || (portid > TIPC_MAX_PORT))
2386 } 2334 portid = TIPC_MIN_PORT;
2387 if (unlikely(!tipc_ref_table.entries)) { 2335 tsk->portid = portid;
2388 pr_err("Ref. table not found in acquisition attempt\n"); 2336 sock_hold(&tsk->sk);
2389 return 0; 2337 if (rhashtable_lookup_insert(&tn->sk_rht, &tsk->node))
2390 } 2338 return 0;
2391 2339 sock_put(&tsk->sk);
2392 /* Take a free entry, if available; otherwise initialize a new one */
2393 write_lock_bh(&ref_table_lock);
2394 index = tipc_ref_table.first_free;
2395 entry = &tipc_ref_table.entries[index];
2396
2397 if (likely(index)) {
2398 index = tipc_ref_table.first_free;
2399 entry = &tipc_ref_table.entries[index];
2400 index_mask = tipc_ref_table.index_mask;
2401 next_plus_upper = entry->ref;
2402 tipc_ref_table.first_free = next_plus_upper & index_mask;
2403 ref = (next_plus_upper & ~index_mask) + index;
2404 entry->tsk = tsk;
2405 } else if (tipc_ref_table.init_point < tipc_ref_table.capacity) {
2406 index = tipc_ref_table.init_point++;
2407 entry = &tipc_ref_table.entries[index];
2408 ref = tipc_ref_table.start_mask + index;
2409 } 2340 }
2410 2341
2411 if (ref) { 2342 return -1;
2412 entry->ref = ref;
2413 entry->tsk = tsk;
2414 }
2415 write_unlock_bh(&ref_table_lock);
2416 return ref;
2417} 2343}
2418 2344
2419/* tipc_sk_ref_discard - invalidate reference to an socket 2345static void tipc_sk_remove(struct tipc_sock *tsk)
2420 *
2421 * Disallow future references to an socket and free up the entry for re-use.
2422 */
2423void tipc_sk_ref_discard(u32 ref)
2424{ 2346{
2425 struct reference *entry; 2347 struct sock *sk = &tsk->sk;
2426 u32 index; 2348 struct tipc_net *tn = net_generic(sock_net(sk), tipc_net_id);
2427 u32 index_mask;
2428
2429 if (unlikely(!tipc_ref_table.entries)) {
2430 pr_err("Ref. table not found during discard attempt\n");
2431 return;
2432 }
2433
2434 index_mask = tipc_ref_table.index_mask;
2435 index = ref & index_mask;
2436 entry = &tipc_ref_table.entries[index];
2437
2438 write_lock_bh(&ref_table_lock);
2439 2349
2440 if (unlikely(!entry->tsk)) { 2350 if (rhashtable_remove(&tn->sk_rht, &tsk->node)) {
2441 pr_err("Attempt to discard ref. to non-existent socket\n"); 2351 WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
2442 goto exit; 2352 __sock_put(sk);
2443 } 2353 }
2444 if (unlikely(entry->ref != ref)) {
2445 pr_err("Attempt to discard non-existent reference\n");
2446 goto exit;
2447 }
2448
2449 /* Mark entry as unused; increment instance part of entry's
2450 * reference to invalidate any subsequent references
2451 */
2452
2453 entry->tsk = NULL;
2454 entry->ref = (ref & ~index_mask) + (index_mask + 1);
2455
2456 /* Append entry to free entry list */
2457 if (unlikely(tipc_ref_table.first_free == 0))
2458 tipc_ref_table.first_free = index;
2459 else
2460 tipc_ref_table.entries[tipc_ref_table.last_free].ref |= index;
2461 tipc_ref_table.last_free = index;
2462exit:
2463 write_unlock_bh(&ref_table_lock);
2464} 2354}
2465 2355
2466/* tipc_sk_get - find referenced socket and return pointer to it 2356int tipc_sk_rht_init(struct net *net)
2467 */
2468struct tipc_sock *tipc_sk_get(u32 ref)
2469{ 2357{
2470 struct reference *entry; 2358 struct tipc_net *tn = net_generic(net, tipc_net_id);
2471 struct tipc_sock *tsk; 2359 struct rhashtable_params rht_params = {
2360 .nelem_hint = 192,
2361 .head_offset = offsetof(struct tipc_sock, node),
2362 .key_offset = offsetof(struct tipc_sock, portid),
2363 .key_len = sizeof(u32), /* portid */
2364 .hashfn = jhash,
2365 .max_shift = 20, /* 1M */
2366 .min_shift = 8, /* 256 */
2367 .grow_decision = rht_grow_above_75,
2368 .shrink_decision = rht_shrink_below_30,
2369 };
2472 2370
2473 if (unlikely(!tipc_ref_table.entries)) 2371 return rhashtable_init(&tn->sk_rht, &rht_params);
2474 return NULL;
2475 read_lock_bh(&ref_table_lock);
2476 entry = &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
2477 tsk = entry->tsk;
2478 if (likely(tsk && (entry->ref == ref)))
2479 sock_hold(&tsk->sk);
2480 else
2481 tsk = NULL;
2482 read_unlock_bh(&ref_table_lock);
2483 return tsk;
2484} 2372}
2485 2373
2486/* tipc_sk_get_next - lock & return next socket after referenced one 2374void tipc_sk_rht_destroy(struct net *net)
2487*/
2488struct tipc_sock *tipc_sk_get_next(u32 *ref)
2489{ 2375{
2490 struct reference *entry; 2376 struct tipc_net *tn = net_generic(net, tipc_net_id);
2491 struct tipc_sock *tsk = NULL;
2492 uint index = *ref & tipc_ref_table.index_mask;
2493 2377
2494 read_lock_bh(&ref_table_lock); 2378 /* Wait for socket readers to complete */
2495 while (++index < tipc_ref_table.capacity) { 2379 synchronize_net();
2496 entry = &tipc_ref_table.entries[index];
2497 if (!entry->tsk)
2498 continue;
2499 tsk = entry->tsk;
2500 sock_hold(&tsk->sk);
2501 *ref = entry->ref;
2502 break;
2503 }
2504 read_unlock_bh(&ref_table_lock);
2505 return tsk;
2506}
2507 2380
2508static void tipc_sk_put(struct tipc_sock *tsk) 2381 rhashtable_destroy(&tn->sk_rht);
2509{
2510 sock_put(&tsk->sk);
2511} 2382}
2512 2383
2513/** 2384/**
@@ -2639,8 +2510,9 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
2639 return put_user(sizeof(value), ol); 2510 return put_user(sizeof(value), ol);
2640} 2511}
2641 2512
2642static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg) 2513static int tipc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
2643{ 2514{
2515 struct sock *sk = sock->sk;
2644 struct tipc_sioc_ln_req lnr; 2516 struct tipc_sioc_ln_req lnr;
2645 void __user *argp = (void __user *)arg; 2517 void __user *argp = (void __user *)arg;
2646 2518
@@ -2648,7 +2520,8 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
2648 case SIOCGETLINKNAME: 2520 case SIOCGETLINKNAME:
2649 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2521 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2650 return -EFAULT; 2522 return -EFAULT;
2651 if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer, 2523 if (!tipc_node_get_linkname(sock_net(sk),
2524 lnr.bearer_id & 0xffff, lnr.peer,
2652 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2525 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2653 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2526 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2654 return -EFAULT; 2527 return -EFAULT;
@@ -2820,18 +2693,20 @@ static int __tipc_nl_add_sk(struct sk_buff *skb, struct netlink_callback *cb,
2820 int err; 2693 int err;
2821 void *hdr; 2694 void *hdr;
2822 struct nlattr *attrs; 2695 struct nlattr *attrs;
2696 struct net *net = sock_net(skb->sk);
2697 struct tipc_net *tn = net_generic(net, tipc_net_id);
2823 2698
2824 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2699 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2825 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_SOCK_GET); 2700 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_SOCK_GET);
2826 if (!hdr) 2701 if (!hdr)
2827 goto msg_cancel; 2702 goto msg_cancel;
2828 2703
2829 attrs = nla_nest_start(skb, TIPC_NLA_SOCK); 2704 attrs = nla_nest_start(skb, TIPC_NLA_SOCK);
2830 if (!attrs) 2705 if (!attrs)
2831 goto genlmsg_cancel; 2706 goto genlmsg_cancel;
2832 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->ref)) 2707 if (nla_put_u32(skb, TIPC_NLA_SOCK_REF, tsk->portid))
2833 goto attr_msg_cancel; 2708 goto attr_msg_cancel;
2834 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tipc_own_addr)) 2709 if (nla_put_u32(skb, TIPC_NLA_SOCK_ADDR, tn->own_addr))
2835 goto attr_msg_cancel; 2710 goto attr_msg_cancel;
2836 2711
2837 if (tsk->connected) { 2712 if (tsk->connected) {
@@ -2859,22 +2734,37 @@ int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb)
2859{ 2734{
2860 int err; 2735 int err;
2861 struct tipc_sock *tsk; 2736 struct tipc_sock *tsk;
2862 u32 prev_ref = cb->args[0]; 2737 const struct bucket_table *tbl;
2863 u32 ref = prev_ref; 2738 struct rhash_head *pos;
2864 2739 struct net *net = sock_net(skb->sk);
2865 tsk = tipc_sk_get_next(&ref); 2740 struct tipc_net *tn = net_generic(net, tipc_net_id);
2866 for (; tsk; tsk = tipc_sk_get_next(&ref)) { 2741 u32 tbl_id = cb->args[0];
2867 lock_sock(&tsk->sk); 2742 u32 prev_portid = cb->args[1];
2868 err = __tipc_nl_add_sk(skb, cb, tsk);
2869 release_sock(&tsk->sk);
2870 tipc_sk_put(tsk);
2871 if (err)
2872 break;
2873 2743
2874 prev_ref = ref; 2744 rcu_read_lock();
2875 } 2745 tbl = rht_dereference_rcu((&tn->sk_rht)->tbl, &tn->sk_rht);
2746 for (; tbl_id < tbl->size; tbl_id++) {
2747 rht_for_each_entry_rcu(tsk, pos, tbl, tbl_id, node) {
2748 spin_lock_bh(&tsk->sk.sk_lock.slock);
2749 if (prev_portid && prev_portid != tsk->portid) {
2750 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2751 continue;
2752 }
2876 2753
2877 cb->args[0] = prev_ref; 2754 err = __tipc_nl_add_sk(skb, cb, tsk);
2755 if (err) {
2756 prev_portid = tsk->portid;
2757 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2758 goto out;
2759 }
2760 prev_portid = 0;
2761 spin_unlock_bh(&tsk->sk.sk_lock.slock);
2762 }
2763 }
2764out:
2765 rcu_read_unlock();
2766 cb->args[0] = tbl_id;
2767 cb->args[1] = prev_portid;
2878 2768
2879 return skb->len; 2769 return skb->len;
2880} 2770}
@@ -2888,7 +2778,7 @@ static int __tipc_nl_add_sk_publ(struct sk_buff *skb,
2888 struct nlattr *attrs; 2778 struct nlattr *attrs;
2889 2779
2890 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 2780 hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
2891 &tipc_genl_v2_family, NLM_F_MULTI, TIPC_NL_PUBL_GET); 2781 &tipc_genl_family, NLM_F_MULTI, TIPC_NL_PUBL_GET);
2892 if (!hdr) 2782 if (!hdr)
2893 goto msg_cancel; 2783 goto msg_cancel;
2894 2784
@@ -2962,12 +2852,13 @@ static int __tipc_nl_list_sk_publ(struct sk_buff *skb,
2962int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb) 2852int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2963{ 2853{
2964 int err; 2854 int err;
2965 u32 tsk_ref = cb->args[0]; 2855 u32 tsk_portid = cb->args[0];
2966 u32 last_publ = cb->args[1]; 2856 u32 last_publ = cb->args[1];
2967 u32 done = cb->args[2]; 2857 u32 done = cb->args[2];
2858 struct net *net = sock_net(skb->sk);
2968 struct tipc_sock *tsk; 2859 struct tipc_sock *tsk;
2969 2860
2970 if (!tsk_ref) { 2861 if (!tsk_portid) {
2971 struct nlattr **attrs; 2862 struct nlattr **attrs;
2972 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1]; 2863 struct nlattr *sock[TIPC_NLA_SOCK_MAX + 1];
2973 2864
@@ -2984,13 +2875,13 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2984 if (!sock[TIPC_NLA_SOCK_REF]) 2875 if (!sock[TIPC_NLA_SOCK_REF])
2985 return -EINVAL; 2876 return -EINVAL;
2986 2877
2987 tsk_ref = nla_get_u32(sock[TIPC_NLA_SOCK_REF]); 2878 tsk_portid = nla_get_u32(sock[TIPC_NLA_SOCK_REF]);
2988 } 2879 }
2989 2880
2990 if (done) 2881 if (done)
2991 return 0; 2882 return 0;
2992 2883
2993 tsk = tipc_sk_get(tsk_ref); 2884 tsk = tipc_sk_lookup(net, tsk_portid);
2994 if (!tsk) 2885 if (!tsk)
2995 return -EINVAL; 2886 return -EINVAL;
2996 2887
@@ -2999,9 +2890,9 @@ int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb)
2999 if (!err) 2890 if (!err)
3000 done = 1; 2891 done = 1;
3001 release_sock(&tsk->sk); 2892 release_sock(&tsk->sk);
3002 tipc_sk_put(tsk); 2893 sock_put(&tsk->sk);
3003 2894
3004 cb->args[0] = tsk_ref; 2895 cb->args[0] = tsk_portid;
3005 cb->args[1] = last_publ; 2896 cb->args[1] = last_publ;
3006 cb->args[2] = done; 2897 cb->args[2] = done;
3007 2898
diff --git a/net/tipc/socket.h b/net/tipc/socket.h
index d34089387006..238f1b7bd9bd 100644
--- a/net/tipc/socket.h
+++ b/net/tipc/socket.h
@@ -1,6 +1,6 @@
1/* net/tipc/socket.h: Include file for TIPC socket code 1/* net/tipc/socket.h: Include file for TIPC socket code
2 * 2 *
3 * Copyright (c) 2014, Ericsson AB 3 * Copyright (c) 2014-2015, Ericsson AB
4 * All rights reserved. 4 * All rights reserved.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
@@ -42,12 +42,18 @@
42#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2) 42#define TIPC_FLOWCTRL_WIN (TIPC_CONNACK_INTV * 2)
43#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \ 43#define TIPC_CONN_OVERLOAD_LIMIT ((TIPC_FLOWCTRL_WIN * 2 + 1) * \
44 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE)) 44 SKB_TRUESIZE(TIPC_MAX_USER_MSG_SIZE))
45int tipc_sk_rcv(struct sk_buff *buf); 45int tipc_socket_init(void);
46struct sk_buff *tipc_sk_socks_show(void); 46void tipc_socket_stop(void);
47void tipc_sk_mcast_rcv(struct sk_buff *buf); 47int tipc_sock_create_local(struct net *net, int type, struct socket **res);
48void tipc_sk_reinit(void); 48void tipc_sock_release_local(struct socket *sock);
49int tipc_sk_ref_table_init(u32 requested_size, u32 start); 49int tipc_sock_accept_local(struct socket *sock, struct socket **newsock,
50void tipc_sk_ref_table_stop(void); 50 int flags);
51int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq);
52void tipc_sk_mcast_rcv(struct net *net, struct sk_buff_head *arrvq,
53 struct sk_buff_head *inputq);
54void tipc_sk_reinit(struct net *net);
55int tipc_sk_rht_init(struct net *net);
56void tipc_sk_rht_destroy(struct net *net);
51int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb); 57int tipc_nl_sk_dump(struct sk_buff *skb, struct netlink_callback *cb);
52int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb); 58int tipc_nl_publ_dump(struct sk_buff *skb, struct netlink_callback *cb);
53 59
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0344206b984f..72c339e432aa 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -50,33 +50,6 @@ struct tipc_subscriber {
50 struct list_head subscription_list; 50 struct list_head subscription_list;
51}; 51};
52 52
53static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
54 void *usr_data, void *buf, size_t len);
55static void *subscr_named_msg_event(int conid);
56static void subscr_conn_shutdown_event(int conid, void *usr_data);
57
58static atomic_t subscription_count = ATOMIC_INIT(0);
59
60static struct sockaddr_tipc topsrv_addr __read_mostly = {
61 .family = AF_TIPC,
62 .addrtype = TIPC_ADDR_NAMESEQ,
63 .addr.nameseq.type = TIPC_TOP_SRV,
64 .addr.nameseq.lower = TIPC_TOP_SRV,
65 .addr.nameseq.upper = TIPC_TOP_SRV,
66 .scope = TIPC_NODE_SCOPE
67};
68
69static struct tipc_server topsrv __read_mostly = {
70 .saddr = &topsrv_addr,
71 .imp = TIPC_CRITICAL_IMPORTANCE,
72 .type = SOCK_SEQPACKET,
73 .max_rcvbuf_size = sizeof(struct tipc_subscr),
74 .name = "topology_server",
75 .tipc_conn_recvmsg = subscr_conn_msg_event,
76 .tipc_conn_new = subscr_named_msg_event,
77 .tipc_conn_shutdown = subscr_conn_shutdown_event,
78};
79
80/** 53/**
81 * htohl - convert value to endianness used by destination 54 * htohl - convert value to endianness used by destination
82 * @in: value to convert 55 * @in: value to convert
@@ -93,6 +66,7 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
93 u32 found_upper, u32 event, u32 port_ref, 66 u32 found_upper, u32 event, u32 port_ref,
94 u32 node) 67 u32 node)
95{ 68{
69 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
96 struct tipc_subscriber *subscriber = sub->subscriber; 70 struct tipc_subscriber *subscriber = sub->subscriber;
97 struct kvec msg_sect; 71 struct kvec msg_sect;
98 72
@@ -103,8 +77,8 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
103 sub->evt.found_upper = htohl(found_upper, sub->swap); 77 sub->evt.found_upper = htohl(found_upper, sub->swap);
104 sub->evt.port.ref = htohl(port_ref, sub->swap); 78 sub->evt.port.ref = htohl(port_ref, sub->swap);
105 sub->evt.port.node = htohl(node, sub->swap); 79 sub->evt.port.node = htohl(node, sub->swap);
106 tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base, 80 tipc_conn_sendmsg(tn->topsrv, subscriber->conid, NULL,
107 msg_sect.iov_len); 81 msg_sect.iov_base, msg_sect.iov_len);
108} 82}
109 83
110/** 84/**
@@ -141,9 +115,11 @@ void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
141 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node); 115 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
142} 116}
143 117
144static void subscr_timeout(struct tipc_subscription *sub) 118static void subscr_timeout(unsigned long data)
145{ 119{
120 struct tipc_subscription *sub = (struct tipc_subscription *)data;
146 struct tipc_subscriber *subscriber = sub->subscriber; 121 struct tipc_subscriber *subscriber = sub->subscriber;
122 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
147 123
148 /* The spin lock per subscriber is used to protect its members */ 124 /* The spin lock per subscriber is used to protect its members */
149 spin_lock_bh(&subscriber->lock); 125 spin_lock_bh(&subscriber->lock);
@@ -167,9 +143,8 @@ static void subscr_timeout(struct tipc_subscription *sub)
167 TIPC_SUBSCR_TIMEOUT, 0, 0); 143 TIPC_SUBSCR_TIMEOUT, 0, 0);
168 144
169 /* Now destroy subscription */ 145 /* Now destroy subscription */
170 k_term_timer(&sub->timer);
171 kfree(sub); 146 kfree(sub);
172 atomic_dec(&subscription_count); 147 atomic_dec(&tn->subscription_count);
173} 148}
174 149
175/** 150/**
@@ -179,10 +154,12 @@ static void subscr_timeout(struct tipc_subscription *sub)
179 */ 154 */
180static void subscr_del(struct tipc_subscription *sub) 155static void subscr_del(struct tipc_subscription *sub)
181{ 156{
157 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
158
182 tipc_nametbl_unsubscribe(sub); 159 tipc_nametbl_unsubscribe(sub);
183 list_del(&sub->subscription_list); 160 list_del(&sub->subscription_list);
184 kfree(sub); 161 kfree(sub);
185 atomic_dec(&subscription_count); 162 atomic_dec(&tn->subscription_count);
186} 163}
187 164
188/** 165/**
@@ -190,9 +167,12 @@ static void subscr_del(struct tipc_subscription *sub)
190 * 167 *
191 * Note: Must call it in process context since it might sleep. 168 * Note: Must call it in process context since it might sleep.
192 */ 169 */
193static void subscr_terminate(struct tipc_subscriber *subscriber) 170static void subscr_terminate(struct tipc_subscription *sub)
194{ 171{
195 tipc_conn_terminate(&topsrv, subscriber->conid); 172 struct tipc_subscriber *subscriber = sub->subscriber;
173 struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
174
175 tipc_conn_terminate(tn->topsrv, subscriber->conid);
196} 176}
197 177
198static void subscr_release(struct tipc_subscriber *subscriber) 178static void subscr_release(struct tipc_subscriber *subscriber)
@@ -207,8 +187,7 @@ static void subscr_release(struct tipc_subscriber *subscriber)
207 subscription_list) { 187 subscription_list) {
208 if (sub->timeout != TIPC_WAIT_FOREVER) { 188 if (sub->timeout != TIPC_WAIT_FOREVER) {
209 spin_unlock_bh(&subscriber->lock); 189 spin_unlock_bh(&subscriber->lock);
210 k_cancel_timer(&sub->timer); 190 del_timer_sync(&sub->timer);
211 k_term_timer(&sub->timer);
212 spin_lock_bh(&subscriber->lock); 191 spin_lock_bh(&subscriber->lock);
213 } 192 }
214 subscr_del(sub); 193 subscr_del(sub);
@@ -250,8 +229,7 @@ static void subscr_cancel(struct tipc_subscr *s,
250 if (sub->timeout != TIPC_WAIT_FOREVER) { 229 if (sub->timeout != TIPC_WAIT_FOREVER) {
251 sub->timeout = TIPC_WAIT_FOREVER; 230 sub->timeout = TIPC_WAIT_FOREVER;
252 spin_unlock_bh(&subscriber->lock); 231 spin_unlock_bh(&subscriber->lock);
253 k_cancel_timer(&sub->timer); 232 del_timer_sync(&sub->timer);
254 k_term_timer(&sub->timer);
255 spin_lock_bh(&subscriber->lock); 233 spin_lock_bh(&subscriber->lock);
256 } 234 }
257 subscr_del(sub); 235 subscr_del(sub);
@@ -262,9 +240,11 @@ static void subscr_cancel(struct tipc_subscr *s,
262 * 240 *
263 * Called with subscriber lock held. 241 * Called with subscriber lock held.
264 */ 242 */
265static int subscr_subscribe(struct tipc_subscr *s, 243static int subscr_subscribe(struct net *net, struct tipc_subscr *s,
266 struct tipc_subscriber *subscriber, 244 struct tipc_subscriber *subscriber,
267 struct tipc_subscription **sub_p) { 245 struct tipc_subscription **sub_p)
246{
247 struct tipc_net *tn = net_generic(net, tipc_net_id);
268 struct tipc_subscription *sub; 248 struct tipc_subscription *sub;
269 int swap; 249 int swap;
270 250
@@ -279,7 +259,7 @@ static int subscr_subscribe(struct tipc_subscr *s,
279 } 259 }
280 260
281 /* Refuse subscription if global limit exceeded */ 261 /* Refuse subscription if global limit exceeded */
282 if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) { 262 if (atomic_read(&tn->subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
283 pr_warn("Subscription rejected, limit reached (%u)\n", 263 pr_warn("Subscription rejected, limit reached (%u)\n",
284 TIPC_MAX_SUBSCRIPTIONS); 264 TIPC_MAX_SUBSCRIPTIONS);
285 return -EINVAL; 265 return -EINVAL;
@@ -293,10 +273,11 @@ static int subscr_subscribe(struct tipc_subscr *s,
293 } 273 }
294 274
295 /* Initialize subscription object */ 275 /* Initialize subscription object */
276 sub->net = net;
296 sub->seq.type = htohl(s->seq.type, swap); 277 sub->seq.type = htohl(s->seq.type, swap);
297 sub->seq.lower = htohl(s->seq.lower, swap); 278 sub->seq.lower = htohl(s->seq.lower, swap);
298 sub->seq.upper = htohl(s->seq.upper, swap); 279 sub->seq.upper = htohl(s->seq.upper, swap);
299 sub->timeout = htohl(s->timeout, swap); 280 sub->timeout = msecs_to_jiffies(htohl(s->timeout, swap));
300 sub->filter = htohl(s->filter, swap); 281 sub->filter = htohl(s->filter, swap);
301 if ((!(sub->filter & TIPC_SUB_PORTS) == 282 if ((!(sub->filter & TIPC_SUB_PORTS) ==
302 !(sub->filter & TIPC_SUB_SERVICE)) || 283 !(sub->filter & TIPC_SUB_SERVICE)) ||
@@ -309,11 +290,10 @@ static int subscr_subscribe(struct tipc_subscr *s,
309 sub->subscriber = subscriber; 290 sub->subscriber = subscriber;
310 sub->swap = swap; 291 sub->swap = swap;
311 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr)); 292 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
312 atomic_inc(&subscription_count); 293 atomic_inc(&tn->subscription_count);
313 if (sub->timeout != TIPC_WAIT_FOREVER) { 294 if (sub->timeout != TIPC_WAIT_FOREVER) {
314 k_init_timer(&sub->timer, 295 setup_timer(&sub->timer, subscr_timeout, (unsigned long)sub);
315 (Handler)subscr_timeout, (unsigned long)sub); 296 mod_timer(&sub->timer, jiffies + sub->timeout);
316 k_start_timer(&sub->timer, sub->timeout);
317 } 297 }
318 *sub_p = sub; 298 *sub_p = sub;
319 return 0; 299 return 0;
@@ -326,16 +306,18 @@ static void subscr_conn_shutdown_event(int conid, void *usr_data)
326} 306}
327 307
328/* Handle one request to create a new subscription for the subscriber */ 308/* Handle one request to create a new subscription for the subscriber */
329static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr, 309static void subscr_conn_msg_event(struct net *net, int conid,
330 void *usr_data, void *buf, size_t len) 310 struct sockaddr_tipc *addr, void *usr_data,
311 void *buf, size_t len)
331{ 312{
332 struct tipc_subscriber *subscriber = usr_data; 313 struct tipc_subscriber *subscriber = usr_data;
333 struct tipc_subscription *sub = NULL; 314 struct tipc_subscription *sub = NULL;
334 315
335 spin_lock_bh(&subscriber->lock); 316 spin_lock_bh(&subscriber->lock);
336 if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) { 317 if (subscr_subscribe(net, (struct tipc_subscr *)buf, subscriber,
318 &sub) < 0) {
337 spin_unlock_bh(&subscriber->lock); 319 spin_unlock_bh(&subscriber->lock);
338 subscr_terminate(subscriber); 320 subscr_terminate(sub);
339 return; 321 return;
340 } 322 }
341 if (sub) 323 if (sub)
@@ -343,7 +325,6 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
343 spin_unlock_bh(&subscriber->lock); 325 spin_unlock_bh(&subscriber->lock);
344} 326}
345 327
346
347/* Handle one request to establish a new subscriber */ 328/* Handle one request to establish a new subscriber */
348static void *subscr_named_msg_event(int conid) 329static void *subscr_named_msg_event(int conid)
349{ 330{
@@ -362,12 +343,50 @@ static void *subscr_named_msg_event(int conid)
362 return (void *)subscriber; 343 return (void *)subscriber;
363} 344}
364 345
365int tipc_subscr_start(void) 346int tipc_subscr_start(struct net *net)
366{ 347{
367 return tipc_server_start(&topsrv); 348 struct tipc_net *tn = net_generic(net, tipc_net_id);
349 const char name[] = "topology_server";
350 struct tipc_server *topsrv;
351 struct sockaddr_tipc *saddr;
352
353 saddr = kzalloc(sizeof(*saddr), GFP_ATOMIC);
354 if (!saddr)
355 return -ENOMEM;
356 saddr->family = AF_TIPC;
357 saddr->addrtype = TIPC_ADDR_NAMESEQ;
358 saddr->addr.nameseq.type = TIPC_TOP_SRV;
359 saddr->addr.nameseq.lower = TIPC_TOP_SRV;
360 saddr->addr.nameseq.upper = TIPC_TOP_SRV;
361 saddr->scope = TIPC_NODE_SCOPE;
362
363 topsrv = kzalloc(sizeof(*topsrv), GFP_ATOMIC);
364 if (!topsrv) {
365 kfree(saddr);
366 return -ENOMEM;
367 }
368 topsrv->net = net;
369 topsrv->saddr = saddr;
370 topsrv->imp = TIPC_CRITICAL_IMPORTANCE;
371 topsrv->type = SOCK_SEQPACKET;
372 topsrv->max_rcvbuf_size = sizeof(struct tipc_subscr);
373 topsrv->tipc_conn_recvmsg = subscr_conn_msg_event;
374 topsrv->tipc_conn_new = subscr_named_msg_event;
375 topsrv->tipc_conn_shutdown = subscr_conn_shutdown_event;
376
377 strncpy(topsrv->name, name, strlen(name) + 1);
378 tn->topsrv = topsrv;
379 atomic_set(&tn->subscription_count, 0);
380
381 return tipc_server_start(topsrv);
368} 382}
369 383
370void tipc_subscr_stop(void) 384void tipc_subscr_stop(struct net *net)
371{ 385{
372 tipc_server_stop(&topsrv); 386 struct tipc_net *tn = net_generic(net, tipc_net_id);
387 struct tipc_server *topsrv = tn->topsrv;
388
389 tipc_server_stop(topsrv);
390 kfree(topsrv->saddr);
391 kfree(topsrv);
373} 392}
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index 393e417bee3f..33488bd9fe3c 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -39,6 +39,9 @@
39 39
40#include "server.h" 40#include "server.h"
41 41
42#define TIPC_MAX_SUBSCRIPTIONS 65535
43#define TIPC_MAX_PUBLICATIONS 65535
44
42struct tipc_subscription; 45struct tipc_subscription;
43struct tipc_subscriber; 46struct tipc_subscriber;
44 47
@@ -46,6 +49,7 @@ struct tipc_subscriber;
46 * struct tipc_subscription - TIPC network topology subscription object 49 * struct tipc_subscription - TIPC network topology subscription object
47 * @subscriber: pointer to its subscriber 50 * @subscriber: pointer to its subscriber
48 * @seq: name sequence associated with subscription 51 * @seq: name sequence associated with subscription
52 * @net: point to network namespace
49 * @timeout: duration of subscription (in ms) 53 * @timeout: duration of subscription (in ms)
50 * @filter: event filtering to be done for subscription 54 * @filter: event filtering to be done for subscription
51 * @timer: timer governing subscription duration (optional) 55 * @timer: timer governing subscription duration (optional)
@@ -58,7 +62,8 @@ struct tipc_subscriber;
58struct tipc_subscription { 62struct tipc_subscription {
59 struct tipc_subscriber *subscriber; 63 struct tipc_subscriber *subscriber;
60 struct tipc_name_seq seq; 64 struct tipc_name_seq seq;
61 u32 timeout; 65 struct net *net;
66 unsigned long timeout;
62 u32 filter; 67 u32 filter;
63 struct timer_list timer; 68 struct timer_list timer;
64 struct list_head nameseq_list; 69 struct list_head nameseq_list;
@@ -69,13 +74,10 @@ struct tipc_subscription {
69 74
70int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower, 75int tipc_subscr_overlap(struct tipc_subscription *sub, u32 found_lower,
71 u32 found_upper); 76 u32 found_upper);
72
73void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower, 77void tipc_subscr_report_overlap(struct tipc_subscription *sub, u32 found_lower,
74 u32 found_upper, u32 event, u32 port_ref, 78 u32 found_upper, u32 event, u32 port_ref,
75 u32 node, int must); 79 u32 node, int must);
76 80int tipc_subscr_start(struct net *net);
77int tipc_subscr_start(void); 81void tipc_subscr_stop(struct net *net);
78
79void tipc_subscr_stop(void);
80 82
81#endif 83#endif