aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorSteve French <sfrench@us.ibm.com>2006-01-17 22:49:59 -0500
committerSteve French <sfrench@us.ibm.com>2006-01-17 22:49:59 -0500
commitd65177c1ae7f085723154105c5dc8d9e16ae8265 (patch)
tree14408129d880d89cc5e937f2810f243ed1e6fcde /net/tipc
parentd41f084a74de860fe879403fbbad13abdf7aea8e (diff)
parent15578eeb6cd4b74492f26e60624aa1a9a52ddd7b (diff)
Merge with /pub/scm/linux/kernel/git/torvalds/linux-2.6.git
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig112
-rw-r--r--net/tipc/Makefile13
-rw-r--r--net/tipc/addr.c94
-rw-r--r--net/tipc/addr.h128
-rw-r--r--net/tipc/bcast.c806
-rw-r--r--net/tipc/bcast.h223
-rw-r--r--net/tipc/bearer.c692
-rw-r--r--net/tipc/bearer.h172
-rw-r--r--net/tipc/cluster.c576
-rw-r--r--net/tipc/cluster.h92
-rw-r--r--net/tipc/config.c718
-rw-r--r--net/tipc/config.h80
-rw-r--r--net/tipc/core.c285
-rw-r--r--net/tipc/core.h316
-rw-r--r--net/tipc/dbg.c395
-rw-r--r--net/tipc/dbg.h59
-rw-r--r--net/tipc/discover.c318
-rw-r--r--net/tipc/discover.h58
-rw-r--r--net/tipc/eth_media.c299
-rw-r--r--net/tipc/handler.c132
-rw-r--r--net/tipc/link.c3167
-rw-r--r--net/tipc/link.h296
-rw-r--r--net/tipc/msg.c334
-rw-r--r--net/tipc/msg.h818
-rw-r--r--net/tipc/name_distr.c309
-rw-r--r--net/tipc/name_distr.h48
-rw-r--r--net/tipc/name_table.c1079
-rw-r--r--net/tipc/name_table.h108
-rw-r--r--net/tipc/net.c311
-rw-r--r--net/tipc/net.h66
-rw-r--r--net/tipc/netlink.c112
-rw-r--r--net/tipc/node.c679
-rw-r--r--net/tipc/node.h144
-rw-r--r--net/tipc/node_subscr.c79
-rw-r--r--net/tipc/node_subscr.h63
-rw-r--r--net/tipc/port.c1708
-rw-r--r--net/tipc/port.h209
-rw-r--r--net/tipc/ref.c189
-rw-r--r--net/tipc/ref.h131
-rw-r--r--net/tipc/socket.c1726
-rw-r--r--net/tipc/subscr.c527
-rw-r--r--net/tipc/subscr.h80
-rw-r--r--net/tipc/user_reg.c265
-rw-r--r--net/tipc/user_reg.h48
-rw-r--r--net/tipc/zone.c169
-rw-r--r--net/tipc/zone.h71
46 files changed, 18304 insertions, 0 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644
index 000000000000..05ab18e62dee
--- /dev/null
+++ b/net/tipc/Kconfig
@@ -0,0 +1,112 @@
1#
2# TIPC configuration
3#
4
5menu "TIPC Configuration (EXPERIMENTAL)"
6 depends on INET && EXPERIMENTAL
7
8config TIPC
9 tristate "The TIPC Protocol (EXPERIMENTAL)"
10 ---help---
11 TBD.
12
13 This protocol support is also available as a module ( = code which
14 can be inserted in and removed from the running kernel whenever you
15 want). The module will be called tipc. If you want to compile it
16 as a module, say M here and read <file:Documentation/modules.txt>.
17
18 If in doubt, say N.
19
20config TIPC_ADVANCED
21 bool "TIPC: Advanced configuration"
22 depends on TIPC
23 default n
24 help
25 Saying Y here will open some advanced configuration
26 for TIPC. Most users do not need to bother, so if
27 unsure, just say N.
28
29config TIPC_ZONES
30 int "Maximum number of zones in network"
31 depends on TIPC && TIPC_ADVANCED
32 default "3"
33 help
34 Max number of zones inside TIPC network. Max supported value
35 is 255 zones, minimum is 1
36
37 Default is 3 zones in a network; setting this to higher
38 allows more zones but might use more memory.
39
40config TIPC_CLUSTERS
41 int "Maximum number of clusters in a zone"
42 depends on TIPC && TIPC_ADVANCED
43 default "1"
44 help
45 ***Only 1 (one cluster in a zone) is supported by current code.
46 Any value set here will be overridden.***
47
48 (Max number of clusters inside TIPC zone. Max supported
49 value is 4095 clusters, minimum is 1.
50
51 Default is 1; setting this to smaller value might save
52 some memory, setting it to higher
53 allows more clusters and might consume more memory.)
54
55config TIPC_NODES
56 int "Maximum number of nodes in cluster"
57 depends on TIPC && TIPC_ADVANCED
58 default "255"
59 help
60 Maximum number of nodes inside a TIPC cluster. Maximum
61 supported value is 2047 nodes, minimum is 8.
62
63 Setting this to a smaller value saves some memory,
64 setting it to higher allows more nodes.
65
66config TIPC_SLAVE_NODES
67 int "Maximum number of slave nodes in cluster"
68 depends on TIPC && TIPC_ADVANCED
69 default "0"
70 help
71 ***This capability is not supported by current code.***
72
73 Maximum number of slave nodes inside a TIPC cluster. Maximum
74 supported value is 2047 nodes, minimum is 0.
75
76 Setting this to a smaller value saves some memory,
77 setting it to higher allows more nodes.
78
79config TIPC_PORTS
80 int "Maximum number of ports in a node"
81 depends on TIPC && TIPC_ADVANCED
82 default "8191"
83 help
84 Maximum number of ports within a node. Maximum
85 supported value is 64535 nodes, minimum is 127.
86
87 Setting this to a smaller value saves some memory,
88 setting it to higher allows more ports.
89
90config TIPC_LOG
91 int "Size of log buffer"
92 depends on TIPC && TIPC_ADVANCED
93 default 0
94 help
95 Size (in bytes) of TIPC's internal log buffer, which records the
96 occurrence of significant events. Maximum supported value
97 is 32768 bytes, minimum is 0.
98
99 There is no need to enable the log buffer unless the node will be
100 managed remotely via TIPC.
101
102config TIPC_DEBUG
103 bool "Enable debugging support"
104 depends on TIPC
105 default n
106 help
107 This will enable debugging of TIPC.
108
109 Only say Y here if you are having trouble with TIPC. It will
110 enable the display of detailed information about what is going on.
111
112endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644
index 000000000000..dceb7027946c
--- /dev/null
+++ b/net/tipc/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Linux TIPC layer
3#
4
5obj-$(CONFIG_TIPC) := tipc.o
6
7tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
8 core.o handler.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o user_reg.o zone.o dbg.o eth_media.o
12
13# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644
index 000000000000..eca22260c98c
--- /dev/null
+++ b/net/tipc/addr.c
@@ -0,0 +1,94 @@
1/*
2 * net/tipc/addr.c: TIPC address utility routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "addr.h"
40#include "zone.h"
41#include "cluster.h"
42#include "net.h"
43
44u32 tipc_get_addr(void)
45{
46 return tipc_own_addr;
47}
48
49/**
50 * addr_domain_valid - validates a network domain address
51 *
52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
53 * where Z, C, and N are non-zero and do not exceed the configured limits.
54 *
55 * Returns 1 if domain address is valid, otherwise 0
56 */
57
58int addr_domain_valid(u32 addr)
59{
60 u32 n = tipc_node(addr);
61 u32 c = tipc_cluster(addr);
62 u32 z = tipc_zone(addr);
63 u32 max_nodes = tipc_max_nodes;
64
65 if (is_slave(addr))
66 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
67 if (n > max_nodes)
68 return 0;
69 if (c > tipc_max_clusters)
70 return 0;
71 if (z > tipc_max_zones)
72 return 0;
73
74 if (n && (!z || !c))
75 return 0;
76 if (c && !z)
77 return 0;
78 return 1;
79}
80
81/**
82 * addr_node_valid - validates a proposed network address for this node
83 *
84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
85 * the configured limits.
86 *
87 * Returns 1 if address can be used, otherwise 0
88 */
89
90int addr_node_valid(u32 addr)
91{
92 return (addr_domain_valid(addr) && tipc_node(addr));
93}
94
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644
index 000000000000..02ca71783e2e
--- /dev/null
+++ b/net/tipc/addr.h
@@ -0,0 +1,128 @@
1/*
2 * net/tipc/addr.h: Include file for TIPC address utility routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_ADDR_H
38#define _TIPC_ADDR_H
39
40static inline u32 own_node(void)
41{
42 return tipc_node(tipc_own_addr);
43}
44
45static inline u32 own_cluster(void)
46{
47 return tipc_cluster(tipc_own_addr);
48}
49
50static inline u32 own_zone(void)
51{
52 return tipc_zone(tipc_own_addr);
53}
54
55static inline int in_own_cluster(u32 addr)
56{
57 return !((addr ^ tipc_own_addr) >> 12);
58}
59
60static inline int in_own_zone(u32 addr)
61{
62 return !((addr ^ tipc_own_addr) >> 24);
63}
64
65static inline int is_slave(u32 addr)
66{
67 return addr & 0x800;
68}
69
70static inline int may_route(u32 addr)
71{
72 return(addr ^ tipc_own_addr) >> 11;
73}
74
75static inline int in_scope(u32 domain, u32 addr)
76{
77 if (!domain || (domain == addr))
78 return 1;
79 if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
80 return 1;
81 if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
82 return 1;
83 return 0;
84}
85
86/**
87 * addr_scope - convert message lookup domain to equivalent 2-bit scope value
88 */
89
90static inline int addr_scope(u32 domain)
91{
92 if (likely(!domain))
93 return TIPC_ZONE_SCOPE;
94 if (tipc_node(domain))
95 return TIPC_NODE_SCOPE;
96 if (tipc_cluster(domain))
97 return TIPC_CLUSTER_SCOPE;
98 return TIPC_ZONE_SCOPE;
99}
100
101/**
102 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
103 *
104 * Needed when address of a named message must be looked up a second time
105 * after a network hop.
106 */
107
108static inline int addr_domain(int sc)
109{
110 if (likely(sc == TIPC_NODE_SCOPE))
111 return tipc_own_addr;
112 if (sc == TIPC_CLUSTER_SCOPE)
113 return tipc_addr(tipc_zone(tipc_own_addr),
114 tipc_cluster(tipc_own_addr), 0);
115 return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
116}
117
118static inline char *addr_string_fill(char *string, u32 addr)
119{
120 snprintf(string, 16, "<%u.%u.%u>",
121 tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
122 return string;
123}
124
125int addr_domain_valid(u32);
126int addr_node_valid(u32 addr);
127
128#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644
index 000000000000..9713d622efb8
--- /dev/null
+++ b/net/tipc/bcast.c
@@ -0,0 +1,806 @@
1/*
2 * net/tipc/bcast.c: TIPC broadcast code
3 *
4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "msg.h"
40#include "dbg.h"
41#include "link.h"
42#include "net.h"
43#include "node.h"
44#include "port.h"
45#include "addr.h"
46#include "node_subscr.h"
47#include "name_distr.h"
48#include "bearer.h"
49#include "name_table.h"
50#include "bcast.h"
51
52
53#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
54
55#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
56
57#define BCLINK_LOG_BUF_SIZE 0
58
59/**
60 * struct bcbearer_pair - a pair of bearers used by broadcast link
61 * @primary: pointer to primary bearer
62 * @secondary: pointer to secondary bearer
63 *
64 * Bearers must have same priority and same set of reachable destinations
65 * to be paired.
66 */
67
68struct bcbearer_pair {
69 struct bearer *primary;
70 struct bearer *secondary;
71};
72
73/**
74 * struct bcbearer - bearer used by broadcast link
75 * @bearer: (non-standard) broadcast bearer structure
76 * @media: (non-standard) broadcast media structure
77 * @bpairs: array of bearer pairs
78 * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
79 */
80
81struct bcbearer {
82 struct bearer bearer;
83 struct media media;
84 struct bcbearer_pair bpairs[MAX_BEARERS];
85 struct bcbearer_pair bpairs_temp[TIPC_NUM_LINK_PRI];
86};
87
88/**
89 * struct bclink - link used for broadcast messages
90 * @link: (non-standard) broadcast link structure
91 * @node: (non-standard) node structure representing b'cast link's peer node
92 *
93 * Handles sequence numbering, fragmentation, bundling, etc.
94 */
95
96struct bclink {
97 struct link link;
98 struct node node;
99};
100
101
102static struct bcbearer *bcbearer = NULL;
103static struct bclink *bclink = NULL;
104static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
106
107char bc_link_name[] = "multicast-link";
108
109
110static inline u32 buf_seqno(struct sk_buff *buf)
111{
112 return msg_seqno(buf_msg(buf));
113}
114
115static inline u32 bcbuf_acks(struct sk_buff *buf)
116{
117 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
118}
119
120static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
121{
122 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
123}
124
125static inline void bcbuf_decr_acks(struct sk_buff *buf)
126{
127 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
128}
129
130
131/**
132 * bclink_set_gap - set gap according to contents of current deferred pkt queue
133 *
134 * Called with 'node' locked, bc_lock unlocked
135 */
136
137static inline void bclink_set_gap(struct node *n_ptr)
138{
139 struct sk_buff *buf = n_ptr->bclink.deferred_head;
140
141 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
142 mod(n_ptr->bclink.last_in);
143 if (unlikely(buf != NULL))
144 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
145}
146
147/**
148 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
149 *
150 * This mechanism endeavours to prevent all nodes in network from trying
151 * to ACK or NACK at the same time.
152 *
153 * Note: TIPC uses a different trigger to distribute ACKs than it does to
154 * distribute NACKs, but tries to use the same spacing (divide by 16).
155 */
156
157static inline int bclink_ack_allowed(u32 n)
158{
159 return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
160}
161
162
163/**
164 * bclink_retransmit_pkt - retransmit broadcast packets
165 * @after: sequence number of last packet to *not* retransmit
166 * @to: sequence number of last packet to retransmit
167 *
168 * Called with 'node' locked, bc_lock unlocked
169 */
170
171static void bclink_retransmit_pkt(u32 after, u32 to)
172{
173 struct sk_buff *buf;
174
175 spin_lock_bh(&bc_lock);
176 buf = bcl->first_out;
177 while (buf && less_eq(buf_seqno(buf), after)) {
178 buf = buf->next;
179 }
180 if (buf != NULL)
181 link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock);
183}
184
185/**
186 * bclink_acknowledge - handle acknowledgement of broadcast packets
187 * @n_ptr: node that sent acknowledgement info
188 * @acked: broadcast sequence # that has been acknowledged
189 *
190 * Node is locked, bc_lock unlocked.
191 */
192
193void bclink_acknowledge(struct node *n_ptr, u32 acked)
194{
195 struct sk_buff *crs;
196 struct sk_buff *next;
197 unsigned int released = 0;
198
199 if (less_eq(acked, n_ptr->bclink.acked))
200 return;
201
202 spin_lock_bh(&bc_lock);
203
204 /* Skip over packets that node has previously acknowledged */
205
206 crs = bcl->first_out;
207 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
208 crs = crs->next;
209 }
210
211 /* Update packets that node is now acknowledging */
212
213 while (crs && less_eq(buf_seqno(crs), acked)) {
214 next = crs->next;
215 bcbuf_decr_acks(crs);
216 if (bcbuf_acks(crs) == 0) {
217 bcl->first_out = next;
218 bcl->out_queue_size--;
219 buf_discard(crs);
220 released = 1;
221 }
222 crs = next;
223 }
224 n_ptr->bclink.acked = acked;
225
226 /* Try resolving broadcast link congestion, if necessary */
227
228 if (unlikely(bcl->next_out))
229 link_push_queue(bcl);
230 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
231 link_wakeup_ports(bcl, 0);
232 spin_unlock_bh(&bc_lock);
233}
234
235/**
236 * bclink_send_ack - unicast an ACK msg
237 *
238 * net_lock and node lock set
239 */
240
241static void bclink_send_ack(struct node *n_ptr)
242{
243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
244
245 if (l_ptr != NULL)
246 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
247}
248
249/**
250 * bclink_send_nack- broadcast a NACK msg
251 *
252 * net_lock and node lock set
253 */
254
255static void bclink_send_nack(struct node *n_ptr)
256{
257 struct sk_buff *buf;
258 struct tipc_msg *msg;
259
260 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
261 return;
262
263 buf = buf_acquire(INT_H_SIZE);
264 if (buf) {
265 msg = buf_msg(buf);
266 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
267 TIPC_OK, INT_H_SIZE, n_ptr->addr);
268 msg_set_mc_netid(msg, tipc_net_id);
269 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
270 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
272 msg_set_bcast_tag(msg, tipc_own_tag);
273
274 if (bearer_send(&bcbearer->bearer, buf, 0)) {
275 bcl->stats.sent_nacks++;
276 buf_discard(buf);
277 } else {
278 bearer_schedule(bcl->b_ptr, bcl);
279 bcl->proto_msg_queue = buf;
280 bcl->stats.bearer_congs++;
281 }
282
283 /*
284 * Ensure we doesn't send another NACK msg to the node
285 * until 16 more deferred messages arrive from it
286 * (i.e. helps prevent all nodes from NACK'ing at same time)
287 */
288
289 n_ptr->bclink.nack_sync = tipc_own_tag;
290 }
291}
292
293/**
294 * bclink_check_gap - send a NACK if a sequence gap exists
295 *
296 * net_lock and node lock set
297 */
298
299void bclink_check_gap(struct node *n_ptr, u32 last_sent)
300{
301 if (!n_ptr->bclink.supported ||
302 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
303 return;
304
305 bclink_set_gap(n_ptr);
306 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
307 n_ptr->bclink.gap_to = last_sent;
308 bclink_send_nack(n_ptr);
309}
310
311/**
312 * bclink_peek_nack - process a NACK msg meant for another node
313 *
314 * Only net_lock set.
315 */
316
317void bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
318{
319 struct node *n_ptr = node_find(dest);
320 u32 my_after, my_to;
321
322 if (unlikely(!n_ptr || !node_is_up(n_ptr)))
323 return;
324 node_lock(n_ptr);
325 /*
326 * Modify gap to suppress unnecessary NACKs from this node
327 */
328 my_after = n_ptr->bclink.gap_after;
329 my_to = n_ptr->bclink.gap_to;
330
331 if (less_eq(gap_after, my_after)) {
332 if (less(my_after, gap_to) && less(gap_to, my_to))
333 n_ptr->bclink.gap_after = gap_to;
334 else if (less_eq(my_to, gap_to))
335 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
336 } else if (less_eq(gap_after, my_to)) {
337 if (less_eq(my_to, gap_to))
338 n_ptr->bclink.gap_to = gap_after;
339 } else {
340 /*
341 * Expand gap if missing bufs not in deferred queue:
342 */
343 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 u32 prev = n_ptr->bclink.gap_to;
345
346 for (; buf; buf = buf->next) {
347 u32 seqno = buf_seqno(buf);
348
349 if (mod(seqno - prev) != 1)
350 buf = NULL;
351 if (seqno == gap_after)
352 break;
353 prev = seqno;
354 }
355 if (buf == NULL)
356 n_ptr->bclink.gap_to = gap_after;
357 }
358 /*
359 * Some nodes may send a complementary NACK now:
360 */
361 if (bclink_ack_allowed(sender_tag + 1)) {
362 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
363 bclink_send_nack(n_ptr);
364 bclink_set_gap(n_ptr);
365 }
366 }
367 node_unlock(n_ptr);
368}
369
370/**
371 * bclink_send_msg - broadcast a packet to all nodes in cluster
372 */
373
374int bclink_send_msg(struct sk_buff *buf)
375{
376 int res;
377
378 spin_lock_bh(&bc_lock);
379
380 res = link_send_buf(bcl, buf);
381 if (unlikely(res == -ELINKCONG))
382 buf_discard(buf);
383 else
384 bcl->stats.sent_info++;
385
386 if (bcl->out_queue_size > bcl->stats.max_queue_sz)
387 bcl->stats.max_queue_sz = bcl->out_queue_size;
388 bcl->stats.queue_sz_counts++;
389 bcl->stats.accu_queue_sz += bcl->out_queue_size;
390
391 spin_unlock_bh(&bc_lock);
392 return res;
393}
394
395/**
396 * bclink_recv_pkt - receive a broadcast packet, and deliver upwards
397 *
398 * net_lock is read_locked, no other locks set
399 */
400
401void bclink_recv_pkt(struct sk_buff *buf)
402{
403 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = node_find(msg_prevnode(msg));
405 u32 next_in;
406 u32 seqno;
407 struct sk_buff *deferred;
408
409 msg_dbg(msg, "<BC<<<");
410
411 if (unlikely(!node || !node_is_up(node) || !node->bclink.supported ||
412 (msg_mc_netid(msg) != tipc_net_id))) {
413 buf_discard(buf);
414 return;
415 }
416
417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
418 msg_dbg(msg, "<BCNACK<<<");
419 if (msg_destnode(msg) == tipc_own_addr) {
420 node_lock(node);
421 bclink_acknowledge(node, msg_bcast_ack(msg));
422 node_unlock(node);
423 bcl->stats.recv_nacks++;
424 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg));
426 } else {
427 bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg),
429 msg_bcgap_after(msg),
430 msg_bcgap_to(msg));
431 }
432 buf_discard(buf);
433 return;
434 }
435
436 node_lock(node);
437receive:
438 deferred = node->bclink.deferred_head;
439 next_in = mod(node->bclink.last_in + 1);
440 seqno = msg_seqno(msg);
441
442 if (likely(seqno == next_in)) {
443 bcl->stats.recv_info++;
444 node->bclink.last_in++;
445 bclink_set_gap(node);
446 if (unlikely(bclink_ack_allowed(seqno))) {
447 bclink_send_ack(node);
448 bcl->stats.sent_acks++;
449 }
450 if (likely(msg_isdata(msg))) {
451 node_unlock(node);
452 port_recv_mcast(buf, NULL);
453 } else if (msg_user(msg) == MSG_BUNDLER) {
454 bcl->stats.recv_bundles++;
455 bcl->stats.recv_bundled += msg_msgcnt(msg);
456 node_unlock(node);
457 link_recv_bundle(buf);
458 } else if (msg_user(msg) == MSG_FRAGMENTER) {
459 bcl->stats.recv_fragments++;
460 if (link_recv_fragment(&node->bclink.defragm,
461 &buf, &msg))
462 bcl->stats.recv_fragmented++;
463 node_unlock(node);
464 net_route_msg(buf);
465 } else {
466 node_unlock(node);
467 net_route_msg(buf);
468 }
469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
470 node_lock(node);
471 buf = deferred;
472 msg = buf_msg(buf);
473 node->bclink.deferred_head = deferred->next;
474 goto receive;
475 }
476 return;
477 } else if (less(next_in, seqno)) {
478 u32 gap_after = node->bclink.gap_after;
479 u32 gap_to = node->bclink.gap_to;
480
481 if (link_defer_pkt(&node->bclink.deferred_head,
482 &node->bclink.deferred_tail,
483 buf)) {
484 node->bclink.nack_sync++;
485 bcl->stats.deferred_recv++;
486 if (seqno == mod(gap_after + 1))
487 node->bclink.gap_after = seqno;
488 else if (less(gap_after, seqno) && less(seqno, gap_to))
489 node->bclink.gap_to = seqno;
490 }
491 if (bclink_ack_allowed(node->bclink.nack_sync)) {
492 if (gap_to != gap_after)
493 bclink_send_nack(node);
494 bclink_set_gap(node);
495 }
496 } else {
497 bcl->stats.duplicates++;
498 buf_discard(buf);
499 }
500 node_unlock(node);
501}
502
503u32 bclink_get_last_sent(void)
504{
505 u32 last_sent = mod(bcl->next_out_no - 1);
506
507 if (bcl->next_out)
508 last_sent = mod(buf_seqno(bcl->next_out) - 1);
509 return last_sent;
510}
511
512u32 bclink_acks_missing(struct node *n_ptr)
513{
514 return (n_ptr->bclink.supported &&
515 (bclink_get_last_sent() != n_ptr->bclink.acked));
516}
517
518
519/**
520 * bcbearer_send - send a packet through the broadcast pseudo-bearer
521 *
522 * Send through as many bearers as necessary to reach all nodes
523 * that support TIPC multicasting.
524 *
525 * Returns 0 if packet sent successfully, non-zero if not
526 */
527
528int bcbearer_send(struct sk_buff *buf,
529 struct tipc_bearer *unused1,
530 struct tipc_media_addr *unused2)
531{
532 static int send_count = 0;
533
534 struct node_map remains;
535 struct node_map remains_new;
536 int bp_index;
537 int swap_time;
538
539 /* Prepare buffer for broadcasting (if first time trying to send it) */
540
541 if (likely(!msg_non_seq(buf_msg(buf)))) {
542 struct tipc_msg *msg;
543
544 assert(cluster_bcast_nodes.count != 0);
545 bcbuf_set_acks(buf, cluster_bcast_nodes.count);
546 msg = buf_msg(buf);
547 msg_set_non_seq(msg);
548 msg_set_mc_netid(msg, tipc_net_id);
549 }
550
551 /* Determine if bearer pairs should be swapped following this attempt */
552
553 if ((swap_time = (++send_count >= 10)))
554 send_count = 0;
555
556 /* Send buffer over bearers until all targets reached */
557
558 remains = cluster_bcast_nodes;
559
560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
561 struct bearer *p = bcbearer->bpairs[bp_index].primary;
562 struct bearer *s = bcbearer->bpairs[bp_index].secondary;
563
564 if (!p)
565 break; /* no more bearers to try */
566
567 nmap_diff(&remains, &p->nodes, &remains_new);
568 if (remains_new.count == remains.count)
569 continue; /* bearer pair doesn't add anything */
570
571 if (!p->publ.blocked &&
572 !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
573 if (swap_time && s && !s->publ.blocked)
574 goto swap;
575 else
576 goto update;
577 }
578
579 if (!s || s->publ.blocked ||
580 s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
581 continue; /* unable to send using bearer pair */
582swap:
583 bcbearer->bpairs[bp_index].primary = s;
584 bcbearer->bpairs[bp_index].secondary = p;
585update:
586 if (remains_new.count == 0)
587 return TIPC_OK;
588
589 remains = remains_new;
590 }
591
592 /* Unable to reach all targets */
593
594 bcbearer->bearer.publ.blocked = 1;
595 bcl->stats.bearer_congs++;
596 return ~TIPC_OK;
597}
598
599/**
600 * bcbearer_sort - create sets of bearer pairs used by broadcast bearer
601 */
602
603void bcbearer_sort(void)
604{
605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
606 struct bcbearer_pair *bp_curr;
607 int b_index;
608 int pri;
609
610 spin_lock_bh(&bc_lock);
611
612 /* Group bearers by priority (can assume max of two per priority) */
613
614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
615
616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
617 struct bearer *b = &bearers[b_index];
618
619 if (!b->active || !b->nodes.count)
620 continue;
621
622 if (!bp_temp[b->priority].primary)
623 bp_temp[b->priority].primary = b;
624 else
625 bp_temp[b->priority].secondary = b;
626 }
627
628 /* Create array of bearer pairs for broadcasting */
629
630 bp_curr = bcbearer->bpairs;
631 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
632
633 for (pri = (TIPC_NUM_LINK_PRI - 1); pri >= 0; pri--) {
634
635 if (!bp_temp[pri].primary)
636 continue;
637
638 bp_curr->primary = bp_temp[pri].primary;
639
640 if (bp_temp[pri].secondary) {
641 if (nmap_equal(&bp_temp[pri].primary->nodes,
642 &bp_temp[pri].secondary->nodes)) {
643 bp_curr->secondary = bp_temp[pri].secondary;
644 } else {
645 bp_curr++;
646 bp_curr->primary = bp_temp[pri].secondary;
647 }
648 }
649
650 bp_curr++;
651 }
652
653 spin_unlock_bh(&bc_lock);
654}
655
656/**
657 * bcbearer_push - resolve bearer congestion
658 *
659 * Forces bclink to push out any unsent packets, until all packets are gone
660 * or congestion reoccurs.
661 * No locks set when function called
662 */
663
664void bcbearer_push(void)
665{
666 struct bearer *b_ptr;
667
668 spin_lock_bh(&bc_lock);
669 b_ptr = &bcbearer->bearer;
670 if (b_ptr->publ.blocked) {
671 b_ptr->publ.blocked = 0;
672 bearer_lock_push(b_ptr);
673 }
674 spin_unlock_bh(&bc_lock);
675}
676
677
678int bclink_stats(char *buf, const u32 buf_size)
679{
680 struct print_buf pb;
681
682 if (!bcl)
683 return 0;
684
685 printbuf_init(&pb, buf, buf_size);
686
687 spin_lock_bh(&bc_lock);
688
689 tipc_printf(&pb, "Link <%s>\n"
690 " Window:%u packets\n",
691 bcl->name, bcl->queue_limit[0]);
692 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
693 bcl->stats.recv_info,
694 bcl->stats.recv_fragments,
695 bcl->stats.recv_fragmented,
696 bcl->stats.recv_bundles,
697 bcl->stats.recv_bundled);
698 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
699 bcl->stats.sent_info,
700 bcl->stats.sent_fragments,
701 bcl->stats.sent_fragmented,
702 bcl->stats.sent_bundles,
703 bcl->stats.sent_bundled);
704 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
705 bcl->stats.recv_nacks,
706 bcl->stats.deferred_recv,
707 bcl->stats.duplicates);
708 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
709 bcl->stats.sent_nacks,
710 bcl->stats.sent_acks,
711 bcl->stats.retransmitted);
712 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
713 bcl->stats.bearer_congs,
714 bcl->stats.link_congs,
715 bcl->stats.max_queue_sz,
716 bcl->stats.queue_sz_counts
717 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
718 : 0);
719
720 spin_unlock_bh(&bc_lock);
721 return printbuf_validate(&pb);
722}
723
724int bclink_reset_stats(void)
725{
726 if (!bcl)
727 return -ENOPROTOOPT;
728
729 spin_lock_bh(&bc_lock);
730 memset(&bcl->stats, 0, sizeof(bcl->stats));
731 spin_unlock_bh(&bc_lock);
732 return TIPC_OK;
733}
734
735int bclink_set_queue_limits(u32 limit)
736{
737 if (!bcl)
738 return -ENOPROTOOPT;
739 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
740 return -EINVAL;
741
742 spin_lock_bh(&bc_lock);
743 link_set_queue_limits(bcl, limit);
744 spin_unlock_bh(&bc_lock);
745 return TIPC_OK;
746}
747
748int bclink_init(void)
749{
750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
752 if (!bcbearer || !bclink) {
753 nomem:
754 warn("Memory squeeze; Failed to create multicast link\n");
755 kfree(bcbearer);
756 bcbearer = NULL;
757 kfree(bclink);
758 bclink = NULL;
759 return -ENOMEM;
760 }
761
762 memset(bcbearer, 0, sizeof(struct bcbearer));
763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
764 bcbearer->bearer.media = &bcbearer->media;
765 bcbearer->media.send_msg = bcbearer_send;
766 sprintf(bcbearer->media.name, "tipc-multicast");
767
768 bcl = &bclink->link;
769 memset(bclink, 0, sizeof(struct bclink));
770 INIT_LIST_HEAD(&bcl->waiting_ports);
771 bcl->next_out_no = 1;
772 bclink->node.lock = SPIN_LOCK_UNLOCKED;
773 bcl->owner = &bclink->node;
774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
775 link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
776 bcl->b_ptr = &bcbearer->bearer;
777 bcl->state = WORKING_WORKING;
778 sprintf(bcl->name, bc_link_name);
779
780 if (BCLINK_LOG_BUF_SIZE) {
781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
782
783 if (!pb)
784 goto nomem;
785 printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
786 }
787
788 return TIPC_OK;
789}
790
791void bclink_stop(void)
792{
793 spin_lock_bh(&bc_lock);
794 if (bcbearer) {
795 link_stop(bcl);
796 if (BCLINK_LOG_BUF_SIZE)
797 kfree(bcl->print_buf.buf);
798 bcl = NULL;
799 kfree(bclink);
800 bclink = NULL;
801 kfree(bcbearer);
802 bcbearer = NULL;
803 }
804 spin_unlock_bh(&bc_lock);
805}
806
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644
index 000000000000..5430e524b4f9
--- /dev/null
+++ b/net/tipc/bcast.h
@@ -0,0 +1,223 @@
1/*
2 * net/tipc/bcast.h: Include file for TIPC broadcast code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_BCAST_H
38#define _TIPC_BCAST_H
39
40#define MAX_NODES 4096
41#define WSIZE 32
42
43/**
44 * struct node_map - set of node identifiers
45 * @count: # of nodes in set
46 * @map: bitmap of node identifiers that are in the set
47 */
48
49struct node_map {
50 u32 count;
51 u32 map[MAX_NODES / WSIZE];
52};
53
54
55#define PLSIZE 32
56
57/**
58 * struct port_list - set of node local destination ports
59 * @count: # of ports in set (only valid for first entry in list)
60 * @next: pointer to next entry in list
61 * @ports: array of port references
62 */
63
64struct port_list {
65 int count;
66 struct port_list *next;
67 u32 ports[PLSIZE];
68};
69
70
71struct node;
72
73extern char bc_link_name[];
74
75
76/**
77 * nmap_get - determine if node exists in a node map
78 */
79
80static inline int nmap_get(struct node_map *nm_ptr, u32 node)
81{
82 int n = tipc_node(node);
83 int w = n / WSIZE;
84 int b = n % WSIZE;
85
86 return nm_ptr->map[w] & (1 << b);
87}
88
89/**
90 * nmap_add - add a node to a node map
91 */
92
93static inline void nmap_add(struct node_map *nm_ptr, u32 node)
94{
95 int n = tipc_node(node);
96 int w = n / WSIZE;
97 u32 mask = (1 << (n % WSIZE));
98
99 if ((nm_ptr->map[w] & mask) == 0) {
100 nm_ptr->count++;
101 nm_ptr->map[w] |= mask;
102 }
103}
104
105/**
106 * nmap_remove - remove a node from a node map
107 */
108
109static inline void nmap_remove(struct node_map *nm_ptr, u32 node)
110{
111 int n = tipc_node(node);
112 int w = n / WSIZE;
113 u32 mask = (1 << (n % WSIZE));
114
115 if ((nm_ptr->map[w] & mask) != 0) {
116 nm_ptr->map[w] &= ~mask;
117 nm_ptr->count--;
118 }
119}
120
121/**
122 * nmap_equal - test for equality of node maps
123 */
124
125static inline int nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
126{
127 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
128}
129
130/**
131 * nmap_diff - find differences between node maps
132 * @nm_a: input node map A
133 * @nm_b: input node map B
134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
135 */
136
137static inline void nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
138 struct node_map *nm_diff)
139{
140 int stop = sizeof(nm_a->map) / sizeof(u32);
141 int w;
142 int b;
143 u32 map;
144
145 memset(nm_diff, 0, sizeof(*nm_diff));
146 for (w = 0; w < stop; w++) {
147 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
148 nm_diff->map[w] = map;
149 if (map != 0) {
150 for (b = 0 ; b < WSIZE; b++) {
151 if (map & (1 << b))
152 nm_diff->count++;
153 }
154 }
155 }
156}
157
158/**
159 * port_list_add - add a port to a port list, ensuring no duplicates
160 */
161
162static inline void port_list_add(struct port_list *pl_ptr, u32 port)
163{
164 struct port_list *item = pl_ptr;
165 int i;
166 int item_sz = PLSIZE;
167 int cnt = pl_ptr->count;
168
169 for (; ; cnt -= item_sz, item = item->next) {
170 if (cnt < PLSIZE)
171 item_sz = cnt;
172 for (i = 0; i < item_sz; i++)
173 if (item->ports[i] == port)
174 return;
175 if (i < PLSIZE) {
176 item->ports[i] = port;
177 pl_ptr->count++;
178 return;
179 }
180 if (!item->next) {
181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
182 if (!item->next) {
183 warn("Memory squeeze: multicast destination port list is incomplete\n");
184 return;
185 }
186 item->next->next = NULL;
187 }
188 }
189}
190
191/**
192 * port_list_free - free dynamically created entries in port_list chain
193 *
194 * Note: First item is on stack, so it doesn't need to be released
195 */
196
197static inline void port_list_free(struct port_list *pl_ptr)
198{
199 struct port_list *item;
200 struct port_list *next;
201
202 for (item = pl_ptr->next; item; item = next) {
203 next = item->next;
204 kfree(item);
205 }
206}
207
208
209int bclink_init(void);
210void bclink_stop(void);
211void bclink_acknowledge(struct node *n_ptr, u32 acked);
212int bclink_send_msg(struct sk_buff *buf);
213void bclink_recv_pkt(struct sk_buff *buf);
214u32 bclink_get_last_sent(void);
215u32 bclink_acks_missing(struct node *n_ptr);
216void bclink_check_gap(struct node *n_ptr, u32 seqno);
217int bclink_stats(char *stats_buf, const u32 buf_size);
218int bclink_reset_stats(void);
219int bclink_set_queue_limits(u32 limit);
220void bcbearer_sort(void);
221void bcbearer_push(void);
222
223#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644
index 000000000000..3dd19fdc5a2c
--- /dev/null
+++ b/net/tipc/bearer.c
@@ -0,0 +1,692 @@
1/*
2 * net/tipc/bearer.c: TIPC bearer code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "bearer.h"
41#include "link.h"
42#include "port.h"
43#include "discover.h"
44#include "bcast.h"
45
46#define MAX_ADDR_STR 32
47
48static struct media *media_list = 0;
49static u32 media_count = 0;
50
51struct bearer *bearers = 0;
52
53/**
54 * media_name_valid - validate media name
55 *
56 * Returns 1 if media name is valid, otherwise 0.
57 */
58
59static int media_name_valid(const char *name)
60{
61 u32 len;
62
63 len = strlen(name);
64 if ((len + 1) > TIPC_MAX_MEDIA_NAME)
65 return 0;
66 return (strspn(name, tipc_alphabet) == len);
67}
68
69/**
70 * media_find - locates specified media object by name
71 */
72
73static struct media *media_find(const char *name)
74{
75 struct media *m_ptr;
76 u32 i;
77
78 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
79 if (!strcmp(m_ptr->name, name))
80 return m_ptr;
81 }
82 return 0;
83}
84
85/**
86 * tipc_register_media - register a media type
87 *
88 * Bearers for this media type must be activated separately at a later stage.
89 */
90
91int tipc_register_media(u32 media_type,
92 char *name,
93 int (*enable)(struct tipc_bearer *),
94 void (*disable)(struct tipc_bearer *),
95 int (*send_msg)(struct sk_buff *,
96 struct tipc_bearer *,
97 struct tipc_media_addr *),
98 char *(*addr2str)(struct tipc_media_addr *a,
99 char *str_buf, int str_size),
100 struct tipc_media_addr *bcast_addr,
101 const u32 bearer_priority,
102 const u32 link_tolerance, /* [ms] */
103 const u32 send_window_limit)
104{
105 struct media *m_ptr;
106 u32 media_id;
107 u32 i;
108 int res = -EINVAL;
109
110 write_lock_bh(&net_lock);
111 if (!media_list)
112 goto exit;
113
114 if (!media_name_valid(name)) {
115 warn("Media registration error: illegal name <%s>\n", name);
116 goto exit;
117 }
118 if (!bcast_addr) {
119 warn("Media registration error: no broadcast address supplied\n");
120 goto exit;
121 }
122 if (bearer_priority >= TIPC_NUM_LINK_PRI) {
123 warn("Media registration error: priority %u\n", bearer_priority);
124 goto exit;
125 }
126 if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
127 (link_tolerance > TIPC_MAX_LINK_TOL)) {
128 warn("Media registration error: tolerance %u\n", link_tolerance);
129 goto exit;
130 }
131
132 media_id = media_count++;
133 if (media_id >= MAX_MEDIA) {
134 warn("Attempt to register more than %u media\n", MAX_MEDIA);
135 media_count--;
136 goto exit;
137 }
138 for (i = 0; i < media_id; i++) {
139 if (media_list[i].type_id == media_type) {
140 warn("Attempt to register second media with type %u\n",
141 media_type);
142 media_count--;
143 goto exit;
144 }
145 if (!strcmp(name, media_list[i].name)) {
146 warn("Attempt to re-register media name <%s>\n", name);
147 media_count--;
148 goto exit;
149 }
150 }
151
152 m_ptr = &media_list[media_id];
153 m_ptr->type_id = media_type;
154 m_ptr->send_msg = send_msg;
155 m_ptr->enable_bearer = enable;
156 m_ptr->disable_bearer = disable;
157 m_ptr->addr2str = addr2str;
158 memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
159 m_ptr->bcast = 1;
160 strcpy(m_ptr->name, name);
161 m_ptr->priority = bearer_priority;
162 m_ptr->tolerance = link_tolerance;
163 m_ptr->window = send_window_limit;
164 dbg("Media <%s> registered\n", name);
165 res = 0;
166exit:
167 write_unlock_bh(&net_lock);
168 return res;
169}
170
171/**
172 * media_addr_printf - record media address in print buffer
173 */
174
175void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
176{
177 struct media *m_ptr;
178 u32 media_type;
179 u32 i;
180
181 media_type = ntohl(a->type);
182 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
183 if (m_ptr->type_id == media_type)
184 break;
185 }
186
187 if ((i < media_count) && (m_ptr->addr2str != NULL)) {
188 char addr_str[MAX_ADDR_STR];
189
190 tipc_printf(pb, "%s(%s) ", m_ptr->name,
191 m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
192 } else {
193 unchar *addr = (unchar *)&a->dev_addr;
194
195 tipc_printf(pb, "UNKNOWN(%u):", media_type);
196 for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
197 tipc_printf(pb, "%02x ", addr[i]);
198 }
199 }
200}
201
202/**
203 * media_get_names - record names of registered media in buffer
204 */
205
206struct sk_buff *media_get_names(void)
207{
208 struct sk_buff *buf;
209 struct media *m_ptr;
210 int i;
211
212 buf = cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
213 if (!buf)
214 return NULL;
215
216 read_lock_bh(&net_lock);
217 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
218 cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
219 strlen(m_ptr->name) + 1);
220 }
221 read_unlock_bh(&net_lock);
222 return buf;
223}
224
225/**
226 * bearer_name_validate - validate & (optionally) deconstruct bearer name
227 * @name - ptr to bearer name string
228 * @name_parts - ptr to area for bearer name components (or NULL if not needed)
229 *
230 * Returns 1 if bearer name is valid, otherwise 0.
231 */
232
233static int bearer_name_validate(const char *name,
234 struct bearer_name *name_parts)
235{
236 char name_copy[TIPC_MAX_BEARER_NAME];
237 char *media_name;
238 char *if_name;
239 u32 media_len;
240 u32 if_len;
241
242 /* copy bearer name & ensure length is OK */
243
244 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
245 /* need above in case non-Posix strncpy() doesn't pad with nulls */
246 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
247 if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
248 return 0;
249
250 /* ensure all component parts of bearer name are present */
251
252 media_name = name_copy;
253 if ((if_name = strchr(media_name, ':')) == NULL)
254 return 0;
255 *(if_name++) = 0;
256 media_len = if_name - media_name;
257 if_len = strlen(if_name) + 1;
258
259 /* validate component parts of bearer name */
260
261 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
262 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
263 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
264 (strspn(if_name, tipc_alphabet) != (if_len - 1)))
265 return 0;
266
267 /* return bearer name components, if necessary */
268
269 if (name_parts) {
270 strcpy(name_parts->media_name, media_name);
271 strcpy(name_parts->if_name, if_name);
272 }
273 return 1;
274}
275
276/**
277 * bearer_find - locates bearer object with matching bearer name
278 */
279
280static struct bearer *bearer_find(const char *name)
281{
282 struct bearer *b_ptr;
283 u32 i;
284
285 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
286 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
287 return b_ptr;
288 }
289 return 0;
290}
291
292/**
293 * bearer_find - locates bearer object with matching interface name
294 */
295
296struct bearer *bearer_find_interface(const char *if_name)
297{
298 struct bearer *b_ptr;
299 char *b_if_name;
300 u32 i;
301
302 for (i = 0, b_ptr = bearers; i < MAX_BEARERS; i++, b_ptr++) {
303 if (!b_ptr->active)
304 continue;
305 b_if_name = strchr(b_ptr->publ.name, ':') + 1;
306 if (!strcmp(b_if_name, if_name))
307 return b_ptr;
308 }
309 return 0;
310}
311
312/**
313 * bearer_get_names - record names of bearers in buffer
314 */
315
316struct sk_buff *bearer_get_names(void)
317{
318 struct sk_buff *buf;
319 struct media *m_ptr;
320 struct bearer *b_ptr;
321 int i, j;
322
323 buf = cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
324 if (!buf)
325 return NULL;
326
327 read_lock_bh(&net_lock);
328 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
329 for (j = 0; j < MAX_BEARERS; j++) {
330 b_ptr = &bearers[j];
331 if (b_ptr->active && (b_ptr->media == m_ptr)) {
332 cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
333 b_ptr->publ.name,
334 strlen(b_ptr->publ.name) + 1);
335 }
336 }
337 }
338 read_unlock_bh(&net_lock);
339 return buf;
340}
341
342void bearer_add_dest(struct bearer *b_ptr, u32 dest)
343{
344 nmap_add(&b_ptr->nodes, dest);
345 disc_update_link_req(b_ptr->link_req);
346 bcbearer_sort();
347}
348
349void bearer_remove_dest(struct bearer *b_ptr, u32 dest)
350{
351 nmap_remove(&b_ptr->nodes, dest);
352 disc_update_link_req(b_ptr->link_req);
353 bcbearer_sort();
354}
355
356/*
357 * bearer_push(): Resolve bearer congestion. Force the waiting
358 * links to push out their unsent packets, one packet per link
359 * per iteration, until all packets are gone or congestion reoccurs.
360 * 'net_lock' is read_locked when this function is called
361 * bearer.lock must be taken before calling
362 * Returns binary true(1) ore false(0)
363 */
364static int bearer_push(struct bearer *b_ptr)
365{
366 u32 res = TIPC_OK;
367 struct link *ln, *tln;
368
369 if (b_ptr->publ.blocked)
370 return 0;
371
372 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
373 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
374 res = link_push_packet(ln);
375 if (res == PUSH_FAILED)
376 break;
377 if (res == PUSH_FINISHED)
378 list_move_tail(&ln->link_list, &b_ptr->links);
379 }
380 }
381 return list_empty(&b_ptr->cong_links);
382}
383
384void bearer_lock_push(struct bearer *b_ptr)
385{
386 int res;
387
388 spin_lock_bh(&b_ptr->publ.lock);
389 res = bearer_push(b_ptr);
390 spin_unlock_bh(&b_ptr->publ.lock);
391 if (res)
392 bcbearer_push();
393}
394
395
396/*
397 * Interrupt enabling new requests after bearer congestion or blocking:
398 * See bearer_send().
399 */
400void tipc_continue(struct tipc_bearer *tb_ptr)
401{
402 struct bearer *b_ptr = (struct bearer *)tb_ptr;
403
404 spin_lock_bh(&b_ptr->publ.lock);
405 b_ptr->continue_count++;
406 if (!list_empty(&b_ptr->cong_links))
407 k_signal((Handler)bearer_lock_push, (unsigned long)b_ptr);
408 b_ptr->publ.blocked = 0;
409 spin_unlock_bh(&b_ptr->publ.lock);
410}
411
412/*
413 * Schedule link for sending of messages after the bearer
414 * has been deblocked by 'continue()'. This method is called
415 * when somebody tries to send a message via this link while
416 * the bearer is congested. 'net_lock' is in read_lock here
417 * bearer.lock is busy
418 */
419
420static void bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
421{
422 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
423}
424
425/*
426 * Schedule link for sending of messages after the bearer
427 * has been deblocked by 'continue()'. This method is called
428 * when somebody tries to send a message via this link while
429 * the bearer is congested. 'net_lock' is in read_lock here,
430 * bearer.lock is free
431 */
432
433void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
434{
435 spin_lock_bh(&b_ptr->publ.lock);
436 bearer_schedule_unlocked(b_ptr, l_ptr);
437 spin_unlock_bh(&b_ptr->publ.lock);
438}
439
440
441/*
442 * bearer_resolve_congestion(): Check if there is bearer congestion,
443 * and if there is, try to resolve it before returning.
444 * 'net_lock' is read_locked when this function is called
445 */
446int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
447{
448 int res = 1;
449
450 if (list_empty(&b_ptr->cong_links))
451 return 1;
452 spin_lock_bh(&b_ptr->publ.lock);
453 if (!bearer_push(b_ptr)) {
454 bearer_schedule_unlocked(b_ptr, l_ptr);
455 res = 0;
456 }
457 spin_unlock_bh(&b_ptr->publ.lock);
458 return res;
459}
460
461
462/**
463 * tipc_enable_bearer - enable bearer with the given name
464 */
465
466int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
467{
468 struct bearer *b_ptr;
469 struct media *m_ptr;
470 struct bearer_name b_name;
471 char addr_string[16];
472 u32 bearer_id;
473 u32 with_this_prio;
474 u32 i;
475 int res = -EINVAL;
476
477 if (tipc_mode != TIPC_NET_MODE)
478 return -ENOPROTOOPT;
479 if (!bearer_name_validate(name, &b_name) ||
480 !addr_domain_valid(bcast_scope) ||
481 !in_scope(bcast_scope, tipc_own_addr) ||
482 (priority > TIPC_NUM_LINK_PRI))
483 return -EINVAL;
484
485 write_lock_bh(&net_lock);
486 if (!bearers)
487 goto failed;
488
489 m_ptr = media_find(b_name.media_name);
490 if (!m_ptr) {
491 warn("No media <%s>\n", b_name.media_name);
492 goto failed;
493 }
494 if (priority == TIPC_NUM_LINK_PRI)
495 priority = m_ptr->priority;
496
497restart:
498 bearer_id = MAX_BEARERS;
499 with_this_prio = 1;
500 for (i = MAX_BEARERS; i-- != 0; ) {
501 if (!bearers[i].active) {
502 bearer_id = i;
503 continue;
504 }
505 if (!strcmp(name, bearers[i].publ.name)) {
506 warn("Bearer <%s> already enabled\n", name);
507 goto failed;
508 }
509 if ((bearers[i].priority == priority) &&
510 (++with_this_prio > 2)) {
511 if (priority-- == 0) {
512 warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
513 name, priority + 1, priority);
514 goto failed;
515 }
516 warn("Third bearer <%s> with priority %u, lowering to %u\n",
517 name, priority + 1, priority);
518 goto restart;
519 }
520 }
521 if (bearer_id >= MAX_BEARERS) {
522 warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
523 goto failed;
524 }
525
526 b_ptr = &bearers[bearer_id];
527 memset(b_ptr, 0, sizeof(struct bearer));
528
529 strcpy(b_ptr->publ.name, name);
530 res = m_ptr->enable_bearer(&b_ptr->publ);
531 if (res) {
532 warn("Failed to enable bearer <%s>\n", name);
533 goto failed;
534 }
535
536 b_ptr->identity = bearer_id;
537 b_ptr->media = m_ptr;
538 b_ptr->net_plane = bearer_id + 'A';
539 b_ptr->active = 1;
540 b_ptr->detect_scope = bcast_scope;
541 b_ptr->priority = priority;
542 INIT_LIST_HEAD(&b_ptr->cong_links);
543 INIT_LIST_HEAD(&b_ptr->links);
544 if (m_ptr->bcast) {
545 b_ptr->link_req = disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
546 bcast_scope, 2);
547 }
548 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
549 write_unlock_bh(&net_lock);
550 info("Enabled bearer <%s>, discovery domain %s\n",
551 name, addr_string_fill(addr_string, bcast_scope));
552 return 0;
553failed:
554 write_unlock_bh(&net_lock);
555 return res;
556}
557
558/**
559 * tipc_block_bearer(): Block the bearer with the given name,
560 * and reset all its links
561 */
562
563int tipc_block_bearer(const char *name)
564{
565 struct bearer *b_ptr = 0;
566 struct link *l_ptr;
567 struct link *temp_l_ptr;
568
569 if (tipc_mode != TIPC_NET_MODE)
570 return -ENOPROTOOPT;
571
572 read_lock_bh(&net_lock);
573 b_ptr = bearer_find(name);
574 if (!b_ptr) {
575 warn("Attempt to block unknown bearer <%s>\n", name);
576 read_unlock_bh(&net_lock);
577 return -EINVAL;
578 }
579
580 spin_lock_bh(&b_ptr->publ.lock);
581 b_ptr->publ.blocked = 1;
582 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
583 struct node *n_ptr = l_ptr->owner;
584
585 spin_lock_bh(&n_ptr->lock);
586 link_reset(l_ptr);
587 spin_unlock_bh(&n_ptr->lock);
588 }
589 spin_unlock_bh(&b_ptr->publ.lock);
590 read_unlock_bh(&net_lock);
591 info("Blocked bearer <%s>\n", name);
592 return TIPC_OK;
593}
594
595/**
596 * bearer_disable -
597 *
598 * Note: This routine assumes caller holds net_lock.
599 */
600
601static int bearer_disable(const char *name)
602{
603 struct bearer *b_ptr;
604 struct link *l_ptr;
605 struct link *temp_l_ptr;
606
607 if (tipc_mode != TIPC_NET_MODE)
608 return -ENOPROTOOPT;
609
610 b_ptr = bearer_find(name);
611 if (!b_ptr) {
612 warn("Attempt to disable unknown bearer <%s>\n", name);
613 return -EINVAL;
614 }
615
616 disc_stop_link_req(b_ptr->link_req);
617 spin_lock_bh(&b_ptr->publ.lock);
618 b_ptr->link_req = NULL;
619 b_ptr->publ.blocked = 1;
620 if (b_ptr->media->disable_bearer) {
621 spin_unlock_bh(&b_ptr->publ.lock);
622 write_unlock_bh(&net_lock);
623 b_ptr->media->disable_bearer(&b_ptr->publ);
624 write_lock_bh(&net_lock);
625 spin_lock_bh(&b_ptr->publ.lock);
626 }
627 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
628 link_delete(l_ptr);
629 }
630 spin_unlock_bh(&b_ptr->publ.lock);
631 info("Disabled bearer <%s>\n", name);
632 memset(b_ptr, 0, sizeof(struct bearer));
633 return TIPC_OK;
634}
635
636int tipc_disable_bearer(const char *name)
637{
638 int res;
639
640 write_lock_bh(&net_lock);
641 res = bearer_disable(name);
642 write_unlock_bh(&net_lock);
643 return res;
644}
645
646
647
648int bearer_init(void)
649{
650 int res;
651
652 write_lock_bh(&net_lock);
653 bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
654 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
655 if (bearers && media_list) {
656 memset(bearers, 0, MAX_BEARERS * sizeof(struct bearer));
657 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
658 res = TIPC_OK;
659 } else {
660 kfree(bearers);
661 kfree(media_list);
662 bearers = 0;
663 media_list = 0;
664 res = -ENOMEM;
665 }
666 write_unlock_bh(&net_lock);
667 return res;
668}
669
670void bearer_stop(void)
671{
672 u32 i;
673
674 if (!bearers)
675 return;
676
677 for (i = 0; i < MAX_BEARERS; i++) {
678 if (bearers[i].active)
679 bearers[i].publ.blocked = 1;
680 }
681 for (i = 0; i < MAX_BEARERS; i++) {
682 if (bearers[i].active)
683 bearer_disable(bearers[i].publ.name);
684 }
685 kfree(bearers);
686 kfree(media_list);
687 bearers = 0;
688 media_list = 0;
689 media_count = 0;
690}
691
692
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644
index 000000000000..21e63d3f0183
--- /dev/null
+++ b/net/tipc/bearer.h
@@ -0,0 +1,172 @@
1/*
2 * net/tipc/bearer.h: Include file for TIPC bearer code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H
39
40#include <net/tipc/tipc_bearer.h>
41#include "bcast.h"
42
43#define MAX_BEARERS 8
44#define MAX_MEDIA 4
45
46
47/**
48 * struct media - TIPC media information available to internal users
49 * @send_msg: routine which handles buffer transmission
50 * @enable_bearer: routine which enables a bearer
51 * @disable_bearer: routine which disables a bearer
52 * @addr2str: routine which converts bearer's address to string form
53 * @bcast_addr: media address used in broadcasting
54 * @bcast: non-zero if media supports broadcasting [currently mandatory]
55 * @priority: default link (and bearer) priority
56 * @tolerance: default time (in ms) before declaring link failure
57 * @window: default window (in packets) before declaring link congestion
58 * @type_id: TIPC media identifier [defined in tipc_bearer.h]
59 * @name: media name
60 */
61
62struct media {
63 int (*send_msg)(struct sk_buff *buf,
64 struct tipc_bearer *b_ptr,
65 struct tipc_media_addr *dest);
66 int (*enable_bearer)(struct tipc_bearer *b_ptr);
67 void (*disable_bearer)(struct tipc_bearer *b_ptr);
68 char *(*addr2str)(struct tipc_media_addr *a,
69 char *str_buf, int str_size);
70 struct tipc_media_addr bcast_addr;
71 int bcast;
72 u32 priority;
73 u32 tolerance;
74 u32 window;
75 u32 type_id;
76 char name[TIPC_MAX_MEDIA_NAME];
77};
78
79/**
80 * struct bearer - TIPC bearer information available to internal users
81 * @publ: bearer information available to privileged users
82 * @media: ptr to media structure associated with bearer
83 * @priority: default link priority for bearer
84 * @detect_scope: network address mask used during automatic link creation
85 * @identity: array index of this bearer within TIPC bearer array
86 * @link_req: ptr to (optional) structure making periodic link setup requests
87 * @links: list of non-congested links associated with bearer
88 * @cong_links: list of congested links associated with bearer
89 * @continue_count: # of times bearer has resumed after congestion or blocking
90 * @active: non-zero if bearer structure is represents a bearer
91 * @net_plane: network plane ('A' through 'H') currently associated with bearer
92 * @nodes: indicates which nodes in cluster can be reached through bearer
93 */
94
95struct bearer {
96 struct tipc_bearer publ;
97 struct media *media;
98 u32 priority;
99 u32 detect_scope;
100 u32 identity;
101 struct link_req *link_req;
102 struct list_head links;
103 struct list_head cong_links;
104 u32 continue_count;
105 int active;
106 char net_plane;
107 struct node_map nodes;
108};
109
110struct bearer_name {
111 char media_name[TIPC_MAX_MEDIA_NAME];
112 char if_name[TIPC_MAX_IF_NAME];
113};
114
115struct link;
116
117extern struct bearer *bearers;
118
119void media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *media_get_names(void);
121
122struct sk_buff *bearer_get_names(void);
123void bearer_add_dest(struct bearer *b_ptr, u32 dest);
124void bearer_remove_dest(struct bearer *b_ptr, u32 dest);
125void bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *bearer_find_interface(const char *if_name);
127int bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int bearer_init(void);
129void bearer_stop(void);
130int bearer_broadcast(struct sk_buff *buf, struct tipc_bearer *b_ptr,
131 struct tipc_media_addr *dest);
132void bearer_lock_push(struct bearer *b_ptr);
133
134
135/**
136 * bearer_send- sends buffer to destination over bearer
137 *
138 * Returns true (1) if successful, or false (0) if unable to send
139 *
140 * IMPORTANT:
141 * The media send routine must not alter the buffer being passed in
142 * as it may be needed for later retransmission!
143 *
144 * If the media send routine returns a non-zero value (indicating that
145 * it was unable to send the buffer), it must:
146 * 1) mark the bearer as blocked,
147 * 2) call tipc_continue() once the bearer is able to send again.
148 * Media types that are unable to meet these two critera must ensure their
149 * send routine always returns success -- even if the buffer was not sent --
150 * and let TIPC's link code deal with the undelivered message.
151 */
152
153static inline int bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
154 struct tipc_media_addr *dest)
155{
156 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
157}
158
159/**
160 * bearer_congested - determines if bearer is currently congested
161 */
162
163static inline int bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
164{
165 if (unlikely(b_ptr->publ.blocked))
166 return 1;
167 if (likely(list_empty(&b_ptr->cong_links)))
168 return 0;
169 return !bearer_resolve_congestion(b_ptr, l_ptr);
170}
171
172#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644
index 000000000000..f0f7bac51d41
--- /dev/null
+++ b/net/tipc/cluster.c
@@ -0,0 +1,576 @@
1/*
2 * net/tipc/cluster.c: TIPC cluster management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "cluster.h"
39#include "addr.h"
40#include "node_subscr.h"
41#include "link.h"
42#include "node.h"
43#include "net.h"
44#include "msg.h"
45#include "bearer.h"
46
47void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper);
49struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest);
50
51struct node **local_nodes = 0;
52struct node_map cluster_bcast_nodes = {0,{0,}};
53u32 highest_allowed_slave = 0;
54
55struct cluster *cluster_create(u32 addr)
56{
57 struct _zone *z_ptr;
58 struct cluster *c_ptr;
59 int max_nodes;
60 int alloc;
61
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL)
64 return 0;
65 memset(c_ptr, 0, sizeof(*c_ptr));
66
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
68 if (in_own_cluster(addr))
69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
70 else
71 max_nodes = tipc_max_nodes + 1;
72 alloc = sizeof(void *) * (max_nodes + 1);
73 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) {
75 kfree(c_ptr);
76 return 0;
77 }
78 memset(c_ptr->nodes, 0, alloc);
79 if (in_own_cluster(addr))
80 local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0;
83
84 z_ptr = zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) {
86 z_ptr = zone_create(addr);
87 }
88 if (z_ptr != NULL) {
89 zone_attach_cluster(z_ptr, c_ptr);
90 c_ptr->owner = z_ptr;
91 }
92 else {
93 kfree(c_ptr);
94 c_ptr = 0;
95 }
96
97 return c_ptr;
98}
99
100void cluster_delete(struct cluster *c_ptr)
101{
102 u32 n_num;
103
104 if (!c_ptr)
105 return;
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 node_delete(c_ptr->nodes[n_num]);
108 }
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 node_delete(c_ptr->nodes[n_num]);
111 }
112 kfree(c_ptr->nodes);
113 kfree(c_ptr);
114}
115
116u32 cluster_next_node(struct cluster *c_ptr, u32 addr)
117{
118 struct node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1;
120
121 if (!c_ptr)
122 return addr;
123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && node_has_active_links(n_ptr))
126 return n_ptr->addr;
127 }
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && node_has_active_links(n_ptr))
131 return n_ptr->addr;
132 }
133 return 0;
134}
135
136void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr)
137{
138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes;
140
141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = highest_allowed_slave;
143 assert(n_num > 0);
144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == 0);
146 c_ptr->nodes[n_num] = n_ptr;
147 if (n_num > c_ptr->highest_node)
148 c_ptr->highest_node = n_num;
149}
150
151/**
152 * cluster_select_router - select router to a cluster
153 *
154 * Uses deterministic and fair algorithm.
155 */
156
157u32 cluster_select_router(struct cluster *c_ptr, u32 ref)
158{
159 u32 n_num;
160 u32 ulim = c_ptr->highest_node;
161 u32 mask;
162 u32 tstart;
163
164 assert(!in_own_cluster(c_ptr->addr));
165 if (!ulim)
166 return 0;
167
168 /* Start entry must be random */
169 mask = tipc_max_nodes;
170 while (mask > ulim)
171 mask >>= 1;
172 tstart = ref & mask;
173 n_num = tstart;
174
175 /* Lookup upwards with wrap-around */
176 do {
177 if (node_is_up(c_ptr->nodes[n_num]))
178 break;
179 } while (++n_num <= ulim);
180 if (n_num > ulim) {
181 n_num = 1;
182 do {
183 if (node_is_up(c_ptr->nodes[n_num]))
184 break;
185 } while (++n_num < tstart);
186 if (n_num == tstart)
187 return 0;
188 }
189 assert(n_num <= ulim);
190 return node_select_router(c_ptr->nodes[n_num], ref);
191}
192
193/**
194 * cluster_select_node - select destination node within a remote cluster
195 *
196 * Uses deterministic and fair algorithm.
197 */
198
199struct node *cluster_select_node(struct cluster *c_ptr, u32 selector)
200{
201 u32 n_num;
202 u32 mask = tipc_max_nodes;
203 u32 start_entry;
204
205 assert(!in_own_cluster(c_ptr->addr));
206 if (!c_ptr->highest_node)
207 return 0;
208
209 /* Start entry must be random */
210 while (mask > c_ptr->highest_node) {
211 mask >>= 1;
212 }
213 start_entry = (selector & mask) ? selector & mask : 1u;
214 assert(start_entry <= c_ptr->highest_node);
215
216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num];
220 }
221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num];
224 }
225 return 0;
226}
227
228/*
229 * Routing table management: See description in node.c
230 */
231
232struct sk_buff *cluster_prepare_routing_msg(u32 data_size, u32 dest)
233{
234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size);
236 struct tipc_msg *msg;
237
238 if (buf) {
239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
242 }
243 return buf;
244}
245
246void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper)
248{
249 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg;
251
252 if (buf) {
253 msg = buf_msg(buf);
254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION);
256 cluster_multicast(c_ptr, buf, lower, upper);
257 } else {
258 warn("Memory squeeze: broadcast of new route failed\n");
259 }
260}
261
262void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper)
264{
265 struct sk_buff *buf = cluster_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg;
267
268 if (buf) {
269 msg = buf_msg(buf);
270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL);
272 cluster_multicast(c_ptr, buf, lower, upper);
273 } else {
274 warn("Memory squeeze: broadcast of lost route failed\n");
275 }
276}
277
278void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest)
279{
280 struct sk_buff *buf;
281 struct tipc_msg *msg;
282 u32 highest = c_ptr->highest_slave;
283 u32 n_num;
284 int send = 0;
285
286 assert(!is_slave(dest));
287 assert(in_own_cluster(dest));
288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE)
290 return;
291 buf = cluster_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 c_ptr->addr);
293 if (buf) {
294 msg = buf_msg(buf);
295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] &&
299 node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1;
301 msg_set_dataoctet(msg, n_num);
302 }
303 }
304 if (send)
305 link_send(buf, dest, dest);
306 else
307 buf_discard(buf);
308 } else {
309 warn("Memory squeeze: broadcast of lost route failed\n");
310 }
311}
312
313void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest)
314{
315 struct sk_buff *buf;
316 struct tipc_msg *msg;
317 u32 highest = c_ptr->highest_node;
318 u32 n_num;
319 int send = 0;
320
321 if (in_own_cluster(c_ptr->addr))
322 return;
323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node;
326 buf = cluster_prepare_routing_msg(highest + 1, c_ptr->addr);
327 if (buf) {
328 msg = buf_msg(buf);
329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] &&
333 node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1;
335 msg_set_dataoctet(msg, n_num);
336 }
337 }
338 if (send)
339 link_send(buf, dest, dest);
340 else
341 buf_discard(buf);
342 } else {
343 warn("Memory squeeze: broadcast of external route failed\n");
344 }
345}
346
347void cluster_send_local_routes(struct cluster *c_ptr, u32 dest)
348{
349 struct sk_buff *buf;
350 struct tipc_msg *msg;
351 u32 highest = c_ptr->highest_node;
352 u32 n_num;
353 int send = 0;
354
355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr));
357 buf = cluster_prepare_routing_msg(highest, c_ptr->addr);
358 if (buf) {
359 msg = buf_msg(buf);
360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] &&
364 node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1;
366 msg_set_dataoctet(msg, n_num);
367 }
368 }
369 if (send)
370 link_send(buf, dest, dest);
371 else
372 buf_discard(buf);
373 } else {
374 warn("Memory squeeze: broadcast of local route failed\n");
375 }
376}
377
378void cluster_recv_routing_table(struct sk_buff *buf)
379{
380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr;
382 struct node *n_ptr;
383 unchar *node_table;
384 u32 table_size;
385 u32 router;
386 u32 rem_node = msg_remote_node(msg);
387 u32 z_num;
388 u32 c_num;
389 u32 n_num;
390
391 c_ptr = cluster_find(rem_node);
392 if (!c_ptr) {
393 c_ptr = cluster_create(rem_node);
394 if (!c_ptr) {
395 buf_discard(buf);
396 return;
397 }
398 }
399
400 node_table = buf->data + msg_hdr_sz(msg);
401 table_size = msg_size(msg) - msg_hdr_sz(msg);
402 router = msg_prevnode(msg);
403 z_num = tipc_zone(rem_node);
404 c_num = tipc_cluster(rem_node);
405
406 switch (msg_type(msg)) {
407 case LOCAL_ROUTING_TABLE:
408 assert(is_slave(tipc_own_addr));
409 case EXT_ROUTING_TABLE:
410 for (n_num = 1; n_num < table_size; n_num++) {
411 if (node_table[n_num]) {
412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num];
414 if (!n_ptr) {
415 n_ptr = node_create(addr);
416 }
417 if (n_ptr)
418 node_add_router(n_ptr, router);
419 }
420 }
421 break;
422 case SLAVE_ROUTING_TABLE:
423 assert(!is_slave(tipc_own_addr));
424 assert(in_own_cluster(c_ptr->addr));
425 for (n_num = 1; n_num < table_size; n_num++) {
426 if (node_table[n_num]) {
427 u32 slave_num = n_num + LOWEST_SLAVE;
428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num];
430 if (!n_ptr) {
431 n_ptr = node_create(addr);
432 }
433 if (n_ptr)
434 node_add_router(n_ptr, router);
435 }
436 }
437 break;
438 case ROUTE_ADDITION:
439 if (!is_slave(tipc_own_addr)) {
440 assert(!in_own_cluster(c_ptr->addr)
441 || is_slave(rem_node));
442 } else {
443 assert(in_own_cluster(c_ptr->addr)
444 && !is_slave(rem_node));
445 }
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr)
448 n_ptr = node_create(rem_node);
449 if (n_ptr)
450 node_add_router(n_ptr, router);
451 break;
452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) {
454 assert(!in_own_cluster(c_ptr->addr)
455 || is_slave(rem_node));
456 } else {
457 assert(in_own_cluster(c_ptr->addr)
458 && !is_slave(rem_node));
459 }
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr)
462 node_remove_router(n_ptr, router);
463 break;
464 default:
465 assert(!"Illegal routing manager message received\n");
466 }
467 buf_discard(buf);
468}
469
470void cluster_remove_as_router(struct cluster *c_ptr, u32 router)
471{
472 u32 start_entry;
473 u32 tstop;
474 u32 n_num;
475
476 if (is_slave(router))
477 return; /* Slave nodes can not be routers */
478
479 if (in_own_cluster(c_ptr->addr)) {
480 start_entry = LOWEST_SLAVE;
481 tstop = c_ptr->highest_slave;
482 } else {
483 start_entry = 1;
484 tstop = c_ptr->highest_node;
485 }
486
487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) {
489 node_remove_router(c_ptr->nodes[n_num], router);
490 }
491 }
492}
493
494/**
495 * cluster_multicast - multicast message to local nodes
496 */
497
498void cluster_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper)
500{
501 struct sk_buff *buf_copy;
502 struct node *n_ptr;
503 u32 n_num;
504 u32 tstop;
505
506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr));
512
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
514 if (tstop > upper)
515 tstop = upper;
516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL)
521 break;
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 link_send(buf_copy, n_ptr->addr, n_ptr->addr);
524 }
525 }
526 buf_discard(buf);
527}
528
529/**
530 * cluster_broadcast - broadcast message to all nodes within cluster
531 */
532
533void cluster_broadcast(struct sk_buff *buf)
534{
535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr;
537 struct node *n_ptr;
538 u32 n_num;
539 u32 tstart;
540 u32 tstop;
541 u32 node_type;
542
543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = cluster_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */
546
547 /* Send to standard nodes, then repeat loop sending to slaves */
548 tstart = 1;
549 tstop = c_ptr->highest_node;
550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL)
556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr);
559 link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr);
561 }
562 }
563 tstart = LOWEST_SLAVE;
564 tstop = c_ptr->highest_slave;
565 }
566 }
567exit:
568 buf_discard(buf);
569}
570
571int cluster_init(void)
572{
573 highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return cluster_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
575}
576
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644
index 000000000000..1ffb095991df
--- /dev/null
+++ b/net/tipc/cluster.h
@@ -0,0 +1,92 @@
1/*
2 * net/tipc/cluster.h: Include file for TIPC cluster management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CLUSTER_H
38#define _TIPC_CLUSTER_H
39
40#include "addr.h"
41#include "zone.h"
42
43#define LOWEST_SLAVE 2048u
44
45/**
46 * struct cluster - TIPC cluster structure
47 * @addr: network address of cluster
48 * @owner: pointer to zone that cluster belongs to
49 * @nodes: array of pointers to all nodes within cluster
50 * @highest_node: id of highest numbered node within cluster
51 * @highest_slave: (used for secondary node support)
52 */
53
54struct cluster {
55 u32 addr;
56 struct _zone *owner;
57 struct node **nodes;
58 u32 highest_node;
59 u32 highest_slave;
60};
61
62
63extern struct node **local_nodes;
64extern u32 highest_allowed_slave;
65extern struct node_map cluster_bcast_nodes;
66
67void cluster_remove_as_router(struct cluster *c_ptr, u32 router);
68void cluster_send_ext_routes(struct cluster *c_ptr, u32 dest);
69struct node *cluster_select_node(struct cluster *c_ptr, u32 selector);
70u32 cluster_select_router(struct cluster *c_ptr, u32 ref);
71void cluster_recv_routing_table(struct sk_buff *buf);
72struct cluster *cluster_create(u32 addr);
73void cluster_delete(struct cluster *c_ptr);
74void cluster_attach_node(struct cluster *c_ptr, struct node *n_ptr);
75void cluster_send_slave_routes(struct cluster *c_ptr, u32 dest);
76void cluster_broadcast(struct sk_buff *buf);
77int cluster_init(void);
78u32 cluster_next_node(struct cluster *c_ptr, u32 addr);
79void cluster_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
80void cluster_send_local_routes(struct cluster *c_ptr, u32 dest);
81void cluster_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
82
83static inline struct cluster *cluster_find(u32 addr)
84{
85 struct _zone *z_ptr = zone_find(addr);
86
87 if (z_ptr)
88 return z_ptr->clusters[1];
89 return 0;
90}
91
92#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644
index 000000000000..8ddef4fce2c2
--- /dev/null
+++ b/net/tipc/config.c
@@ -0,0 +1,718 @@
1/*
2 * net/tipc/config.c: TIPC configuration management code
3 *
4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "bearer.h"
40#include "port.h"
41#include "link.h"
42#include "zone.h"
43#include "addr.h"
44#include "name_table.h"
45#include "node.h"
46#include "config.h"
47#include "discover.h"
48
49struct subscr_data {
50 char usr_handle[8];
51 u32 domain;
52 u32 port_ref;
53 struct list_head subd_list;
54};
55
56struct manager {
57 u32 user_ref;
58 u32 port_ref;
59 u32 subscr_ref;
60 u32 link_subscriptions;
61 struct list_head link_subscribers;
62};
63
64static struct manager mng = { 0};
65
66static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
67
68static const void *req_tlv_area; /* request message TLV area */
69static int req_tlv_space; /* request message TLV area size */
70static int rep_headroom; /* reply message headroom to use */
71
72
73void cfg_link_event(u32 addr, char *name, int up)
74{
75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
76}
77
78
79struct sk_buff *cfg_reply_alloc(int payload_size)
80{
81 struct sk_buff *buf;
82
83 buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
84 if (buf)
85 skb_reserve(buf, rep_headroom);
86 return buf;
87}
88
89int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
90 void *tlv_data, int tlv_data_size)
91{
92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
93 int new_tlv_space = TLV_SPACE(tlv_data_size);
94
95 if (skb_tailroom(buf) < new_tlv_space) {
96 dbg("cfg_append_tlv unable to append TLV\n");
97 return 0;
98 }
99 skb_put(buf, new_tlv_space);
100 tlv->tlv_type = htons(tlv_type);
101 tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
102 if (tlv_data_size && tlv_data)
103 memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
104 return 1;
105}
106
107struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value)
108{
109 struct sk_buff *buf;
110 u32 value_net;
111
112 buf = cfg_reply_alloc(TLV_SPACE(sizeof(value)));
113 if (buf) {
114 value_net = htonl(value);
115 cfg_append_tlv(buf, tlv_type, &value_net,
116 sizeof(value_net));
117 }
118 return buf;
119}
120
121struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string)
122{
123 struct sk_buff *buf;
124 int string_len = strlen(string) + 1;
125
126 buf = cfg_reply_alloc(TLV_SPACE(string_len));
127 if (buf)
128 cfg_append_tlv(buf, tlv_type, string, string_len);
129 return buf;
130}
131
132
133
134
135#if 0
136
137/* Now obsolete code for handling commands not yet implemented the new way */
138
139int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
140 char *data,
141 u32 sz,
142 u32 *ret_size,
143 struct tipc_portid *orig)
144{
145 int rv = -EINVAL;
146 u32 cmd = msg->cmd;
147
148 *ret_size = 0;
149 switch (cmd) {
150 case TIPC_REMOVE_LINK:
151 case TIPC_CMD_BLOCK_LINK:
152 case TIPC_CMD_UNBLOCK_LINK:
153 if (!cfg_check_connection(orig))
154 rv = link_control(msg->argv.link_name, msg->cmd, 0);
155 break;
156 case TIPC_ESTABLISH:
157 {
158 int connected;
159
160 tipc_isconnected(mng.conn_port_ref, &connected);
161 if (connected || !orig) {
162 rv = TIPC_FAILURE;
163 break;
164 }
165 rv = tipc_connect2port(mng.conn_port_ref, orig);
166 if (rv == TIPC_OK)
167 orig = 0;
168 break;
169 }
170 case TIPC_GET_PEER_ADDRESS:
171 *ret_size = link_peer_addr(msg->argv.link_name, data, sz);
172 break;
173 case TIPC_GET_ROUTES:
174 rv = TIPC_OK;
175 break;
176 default: {}
177 }
178 if (*ret_size)
179 rv = TIPC_OK;
180 return rv;
181}
182
183static void cfg_cmd_event(struct tipc_cmd_msg *msg,
184 char *data,
185 u32 sz,
186 struct tipc_portid const *orig)
187{
188 int rv = -EINVAL;
189 struct tipc_cmd_result_msg rmsg;
190 struct iovec msg_sect[2];
191 int *arg;
192
193 msg->cmd = ntohl(msg->cmd);
194
195 cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
196 data, 0);
197 if (ntohl(msg->magic) != TIPC_MAGIC)
198 goto exit;
199
200 switch (msg->cmd) {
201 case TIPC_CREATE_LINK:
202 if (!cfg_check_connection(orig))
203 rv = disc_create_link(&msg->argv.create_link);
204 break;
205 case TIPC_LINK_SUBSCRIBE:
206 {
207 struct subscr_data *sub;
208
209 if (mng.link_subscriptions > 64)
210 break;
211 sub = (struct subscr_data *)kmalloc(sizeof(*sub),
212 GFP_ATOMIC);
213 if (sub == NULL) {
214 warn("Memory squeeze; dropped remote link subscription\n");
215 break;
216 }
217 INIT_LIST_HEAD(&sub->subd_list);
218 tipc_createport(mng.user_ref,
219 (void *)sub,
220 TIPC_HIGH_IMPORTANCE,
221 0,
222 0,
223 (tipc_conn_shutdown_event)cfg_linksubscr_cancel,
224 0,
225 0,
226 (tipc_conn_msg_event)cfg_linksubscr_cancel,
227 0,
228 &sub->port_ref);
229 if (!sub->port_ref) {
230 kfree(sub);
231 break;
232 }
233 memcpy(sub->usr_handle,msg->usr_handle,
234 sizeof(sub->usr_handle));
235 sub->domain = msg->argv.domain;
236 list_add_tail(&sub->subd_list, &mng.link_subscribers);
237 tipc_connect2port(sub->port_ref, orig);
238 rmsg.retval = TIPC_OK;
239 tipc_send(sub->port_ref, 2u, msg_sect);
240 mng.link_subscriptions++;
241 return;
242 }
243 default:
244 rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
245 }
246 exit:
247 rmsg.result_len = htonl(msg_sect[1].iov_len);
248 rmsg.retval = htonl(rv);
249 cfg_respond(msg_sect, 2u, orig);
250}
251#endif
252
253static struct sk_buff *cfg_enable_bearer(void)
254{
255 struct tipc_bearer_config *args;
256
257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
258 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
259
260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
261 if (tipc_enable_bearer(args->name,
262 ntohl(args->detect_scope),
263 ntohl(args->priority)))
264 return cfg_reply_error_string("unable to enable bearer");
265
266 return cfg_reply_none();
267}
268
269static struct sk_buff *cfg_disable_bearer(void)
270{
271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
272 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
273
274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
275 return cfg_reply_error_string("unable to disable bearer");
276
277 return cfg_reply_none();
278}
279
280static struct sk_buff *cfg_set_own_addr(void)
281{
282 u32 addr;
283
284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
285 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
286
287 addr = *(u32 *)TLV_DATA(req_tlv_area);
288 addr = ntohl(addr);
289 if (addr == tipc_own_addr)
290 return cfg_reply_none();
291 if (!addr_node_valid(addr))
292 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)");
294 if (tipc_own_addr)
295 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)");
297
298 spin_unlock_bh(&config_lock);
299 stop_net();
300 tipc_own_addr = addr;
301 start_net();
302 spin_lock_bh(&config_lock);
303 return cfg_reply_none();
304}
305
306static struct sk_buff *cfg_set_remote_mng(void)
307{
308 u32 value;
309
310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
311 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
312
313 value = *(u32 *)TLV_DATA(req_tlv_area);
314 value = ntohl(value);
315 tipc_remote_management = (value != 0);
316 return cfg_reply_none();
317}
318
319static struct sk_buff *cfg_set_max_publications(void)
320{
321 u32 value;
322
323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
324 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
325
326 value = *(u32 *)TLV_DATA(req_tlv_area);
327 value = ntohl(value);
328 if (value != delimit(value, 1, 65535))
329 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
330 " (max publications must be 1-65535)");
331 tipc_max_publications = value;
332 return cfg_reply_none();
333}
334
335static struct sk_buff *cfg_set_max_subscriptions(void)
336{
337 u32 value;
338
339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
340 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
341
342 value = *(u32 *)TLV_DATA(req_tlv_area);
343 value = ntohl(value);
344 if (value != delimit(value, 1, 65535))
345 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
346 " (max subscriptions must be 1-65535");
347 tipc_max_subscriptions = value;
348 return cfg_reply_none();
349}
350
351static struct sk_buff *cfg_set_max_ports(void)
352{
353 int orig_mode;
354 u32 value;
355
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value);
360 if (value != delimit(value, 127, 65535))
361 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)");
363
364 if (value == tipc_max_ports)
365 return cfg_reply_none();
366
367 if (atomic_read(&tipc_user_count) > 2)
368 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)");
370
371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE)
374 stop_net();
375 stop_core();
376 tipc_max_ports = value;
377 start_core();
378 if (orig_mode == TIPC_NET_MODE)
379 start_net();
380 spin_lock_bh(&config_lock);
381 return cfg_reply_none();
382}
383
384static struct sk_buff *set_net_max(int value, int *parameter)
385{
386 int orig_mode;
387
388 if (value != *parameter) {
389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE)
391 stop_net();
392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE)
394 start_net();
395 }
396
397 return cfg_reply_none();
398}
399
400static struct sk_buff *cfg_set_max_zones(void)
401{
402 u32 value;
403
404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
405 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value);
408 if (value != delimit(value, 1, 255))
409 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones);
412}
413
414static struct sk_buff *cfg_set_max_clusters(void)
415{
416 u32 value;
417
418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
419 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value);
422 if (value != 1)
423 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
424 " (max clusters fixed at 1)");
425 return cfg_reply_none();
426}
427
428static struct sk_buff *cfg_set_max_nodes(void)
429{
430 u32 value;
431
432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
433 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value);
436 if (value != delimit(value, 8, 2047))
437 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes);
440}
441
442static struct sk_buff *cfg_set_max_slaves(void)
443{
444 u32 value;
445
446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
447 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
448 value = *(u32 *)TLV_DATA(req_tlv_area);
449 value = ntohl(value);
450 if (value != 0)
451 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
452 " (max secondary nodes fixed at 0)");
453 return cfg_reply_none();
454}
455
456static struct sk_buff *cfg_set_netid(void)
457{
458 u32 value;
459
460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
461 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value);
464 if (value != delimit(value, 1, 9999))
465 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)");
467
468 if (tipc_own_addr)
469 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)");
471
472 return set_net_max(value, &tipc_net_id);
473}
474
475struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
476 int request_space, int reply_headroom)
477{
478 struct sk_buff *rep_tlv_buf;
479
480 spin_lock_bh(&config_lock);
481
482 /* Save request and reply details in a well-known location */
483
484 req_tlv_area = request_area;
485 req_tlv_space = request_space;
486 rep_headroom = reply_headroom;
487
488 /* Check command authorization */
489
490 if (likely(orig_node == tipc_own_addr)) {
491 /* command is permitted */
492 } else if (cmd >= 0x8000) {
493 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
494 " (cannot be done remotely)");
495 goto exit;
496 } else if (!tipc_remote_management) {
497 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
498 goto exit;
499 }
500 else if (cmd >= 0x4000) {
501 u32 domain = 0;
502
503 if ((nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
504 (domain != orig_node)) {
505 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
506 goto exit;
507 }
508 }
509
510 /* Call appropriate processing routine */
511
512 switch (cmd) {
513 case TIPC_CMD_NOOP:
514 rep_tlv_buf = cfg_reply_none();
515 break;
516 case TIPC_CMD_GET_NODES:
517 rep_tlv_buf = node_get_nodes(req_tlv_area, req_tlv_space);
518 break;
519 case TIPC_CMD_GET_LINKS:
520 rep_tlv_buf = node_get_links(req_tlv_area, req_tlv_space);
521 break;
522 case TIPC_CMD_SHOW_LINK_STATS:
523 rep_tlv_buf = link_cmd_show_stats(req_tlv_area, req_tlv_space);
524 break;
525 case TIPC_CMD_RESET_LINK_STATS:
526 rep_tlv_buf = link_cmd_reset_stats(req_tlv_area, req_tlv_space);
527 break;
528 case TIPC_CMD_SHOW_NAME_TABLE:
529 rep_tlv_buf = nametbl_get(req_tlv_area, req_tlv_space);
530 break;
531 case TIPC_CMD_GET_BEARER_NAMES:
532 rep_tlv_buf = bearer_get_names();
533 break;
534 case TIPC_CMD_GET_MEDIA_NAMES:
535 rep_tlv_buf = media_get_names();
536 break;
537 case TIPC_CMD_SHOW_PORTS:
538 rep_tlv_buf = port_get_ports();
539 break;
540#if 0
541 case TIPC_CMD_SHOW_PORT_STATS:
542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
543 break;
544 case TIPC_CMD_RESET_PORT_STATS:
545 rep_tlv_buf = cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
546 break;
547#endif
548 case TIPC_CMD_SET_LOG_SIZE:
549 rep_tlv_buf = log_resize(req_tlv_area, req_tlv_space);
550 break;
551 case TIPC_CMD_DUMP_LOG:
552 rep_tlv_buf = log_dump();
553 break;
554 case TIPC_CMD_SET_LINK_TOL:
555 case TIPC_CMD_SET_LINK_PRI:
556 case TIPC_CMD_SET_LINK_WINDOW:
557 rep_tlv_buf = link_cmd_config(req_tlv_area, req_tlv_space, cmd);
558 break;
559 case TIPC_CMD_ENABLE_BEARER:
560 rep_tlv_buf = cfg_enable_bearer();
561 break;
562 case TIPC_CMD_DISABLE_BEARER:
563 rep_tlv_buf = cfg_disable_bearer();
564 break;
565 case TIPC_CMD_SET_NODE_ADDR:
566 rep_tlv_buf = cfg_set_own_addr();
567 break;
568 case TIPC_CMD_SET_REMOTE_MNG:
569 rep_tlv_buf = cfg_set_remote_mng();
570 break;
571 case TIPC_CMD_SET_MAX_PORTS:
572 rep_tlv_buf = cfg_set_max_ports();
573 break;
574 case TIPC_CMD_SET_MAX_PUBL:
575 rep_tlv_buf = cfg_set_max_publications();
576 break;
577 case TIPC_CMD_SET_MAX_SUBSCR:
578 rep_tlv_buf = cfg_set_max_subscriptions();
579 break;
580 case TIPC_CMD_SET_MAX_ZONES:
581 rep_tlv_buf = cfg_set_max_zones();
582 break;
583 case TIPC_CMD_SET_MAX_CLUSTERS:
584 rep_tlv_buf = cfg_set_max_clusters();
585 break;
586 case TIPC_CMD_SET_MAX_NODES:
587 rep_tlv_buf = cfg_set_max_nodes();
588 break;
589 case TIPC_CMD_SET_MAX_SLAVES:
590 rep_tlv_buf = cfg_set_max_slaves();
591 break;
592 case TIPC_CMD_SET_NETID:
593 rep_tlv_buf = cfg_set_netid();
594 break;
595 case TIPC_CMD_GET_REMOTE_MNG:
596 rep_tlv_buf = cfg_reply_unsigned(tipc_remote_management);
597 break;
598 case TIPC_CMD_GET_MAX_PORTS:
599 rep_tlv_buf = cfg_reply_unsigned(tipc_max_ports);
600 break;
601 case TIPC_CMD_GET_MAX_PUBL:
602 rep_tlv_buf = cfg_reply_unsigned(tipc_max_publications);
603 break;
604 case TIPC_CMD_GET_MAX_SUBSCR:
605 rep_tlv_buf = cfg_reply_unsigned(tipc_max_subscriptions);
606 break;
607 case TIPC_CMD_GET_MAX_ZONES:
608 rep_tlv_buf = cfg_reply_unsigned(tipc_max_zones);
609 break;
610 case TIPC_CMD_GET_MAX_CLUSTERS:
611 rep_tlv_buf = cfg_reply_unsigned(tipc_max_clusters);
612 break;
613 case TIPC_CMD_GET_MAX_NODES:
614 rep_tlv_buf = cfg_reply_unsigned(tipc_max_nodes);
615 break;
616 case TIPC_CMD_GET_MAX_SLAVES:
617 rep_tlv_buf = cfg_reply_unsigned(tipc_max_slaves);
618 break;
619 case TIPC_CMD_GET_NETID:
620 rep_tlv_buf = cfg_reply_unsigned(tipc_net_id);
621 break;
622 default:
623 rep_tlv_buf = NULL;
624 break;
625 }
626
627 /* Return reply buffer */
628exit:
629 spin_unlock_bh(&config_lock);
630 return rep_tlv_buf;
631}
632
633static void cfg_named_msg_event(void *userdata,
634 u32 port_ref,
635 struct sk_buff **buf,
636 const unchar *msg,
637 u32 size,
638 u32 importance,
639 struct tipc_portid const *orig,
640 struct tipc_name_seq const *dest)
641{
642 struct tipc_cfg_msg_hdr *req_hdr;
643 struct tipc_cfg_msg_hdr *rep_hdr;
644 struct sk_buff *rep_buf;
645
646 /* Validate configuration message header (ignore invalid message) */
647
648 req_hdr = (struct tipc_cfg_msg_hdr *)msg;
649 if ((size < sizeof(*req_hdr)) ||
650 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
651 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
652 warn("discarded invalid configuration message\n");
653 return;
654 }
655
656 /* Generate reply for request (if can't, return request) */
657
658 rep_buf = cfg_do_cmd(orig->node,
659 ntohs(req_hdr->tcm_type),
660 msg + sizeof(*req_hdr),
661 size - sizeof(*req_hdr),
662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
663 if (rep_buf) {
664 skb_push(rep_buf, sizeof(*rep_hdr));
665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
666 memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
667 rep_hdr->tcm_len = htonl(rep_buf->len);
668 rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
669 } else {
670 rep_buf = *buf;
671 *buf = NULL;
672 }
673
674 /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
676}
677
678int cfg_init(void)
679{
680 struct tipc_name_seq seq;
681 int res;
682
683 memset(&mng, 0, sizeof(mng));
684 INIT_LIST_HEAD(&mng.link_subscribers);
685
686 res = tipc_attach(&mng.user_ref, 0, 0);
687 if (res)
688 goto failed;
689
690 res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
691 NULL, NULL, NULL,
692 NULL, cfg_named_msg_event, NULL,
693 NULL, &mng.port_ref);
694 if (res)
695 goto failed;
696
697 seq.type = TIPC_CFG_SRV;
698 seq.lower = seq.upper = tipc_own_addr;
699 res = nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
700 if (res)
701 goto failed;
702
703 return 0;
704
705failed:
706 err("Unable to create configuration service\n");
707 tipc_detach(mng.user_ref);
708 mng.user_ref = 0;
709 return res;
710}
711
712void cfg_stop(void)
713{
714 if (mng.user_ref) {
715 tipc_detach(mng.user_ref);
716 mng.user_ref = 0;
717 }
718}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644
index 000000000000..646377d40454
--- /dev/null
+++ b/net/tipc/config.h
@@ -0,0 +1,80 @@
1/*
2 * net/tipc/config.h: Include file for TIPC configuration service code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CONFIG_H
38#define _TIPC_CONFIG_H
39
40/* ---------------------------------------------------------------------- */
41
42#include <linux/tipc.h>
43#include <linux/tipc_config.h>
44#include "link.h"
45
46struct sk_buff *cfg_reply_alloc(int payload_size);
47int cfg_append_tlv(struct sk_buff *buf, int tlv_type,
48 void *tlv_data, int tlv_data_size);
49struct sk_buff *cfg_reply_unsigned_type(u16 tlv_type, u32 value);
50struct sk_buff *cfg_reply_string_type(u16 tlv_type, char *string);
51
52static inline struct sk_buff *cfg_reply_none(void)
53{
54 return cfg_reply_alloc(0);
55}
56
57static inline struct sk_buff *cfg_reply_unsigned(u32 value)
58{
59 return cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
60}
61
62static inline struct sk_buff *cfg_reply_error_string(char *string)
63{
64 return cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
65}
66
67static inline struct sk_buff *cfg_reply_ultra_string(char *string)
68{
69 return cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
70}
71
72struct sk_buff *cfg_do_cmd(u32 orig_node, u16 cmd,
73 const void *req_tlv_area, int req_tlv_space,
74 int headroom);
75
76void cfg_link_event(u32 addr, char *name, int up);
77int cfg_init(void);
78void cfg_stop(void);
79
80#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644
index 000000000000..e83ac06e31ba
--- /dev/null
+++ b/net/tipc/core.c
@@ -0,0 +1,285 @@
1/*
2 * net/tipc/core.c: TIPC module code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/version.h>
41#include <linux/random.h>
42
43#include "core.h"
44#include "dbg.h"
45#include "ref.h"
46#include "net.h"
47#include "user_reg.h"
48#include "name_table.h"
49#include "subscr.h"
50#include "config.h"
51
52int eth_media_start(void);
53void eth_media_stop(void);
54int handler_start(void);
55void handler_stop(void);
56int socket_init(void);
57void socket_stop(void);
58int netlink_start(void);
59void netlink_stop(void);
60
61#define MOD_NAME "tipc_start: "
62
63#ifndef CONFIG_TIPC_ZONES
64#define CONFIG_TIPC_ZONES 3
65#endif
66
67#ifndef CONFIG_TIPC_CLUSTERS
68#define CONFIG_TIPC_CLUSTERS 1
69#endif
70
71#ifndef CONFIG_TIPC_NODES
72#define CONFIG_TIPC_NODES 255
73#endif
74
75#ifndef CONFIG_TIPC_SLAVE_NODES
76#define CONFIG_TIPC_SLAVE_NODES 0
77#endif
78
79#ifndef CONFIG_TIPC_PORTS
80#define CONFIG_TIPC_PORTS 8191
81#endif
82
83#ifndef CONFIG_TIPC_LOG
84#define CONFIG_TIPC_LOG 0
85#endif
86
87/* global variables used by multiple sub-systems within TIPC */
88
89int tipc_mode = TIPC_NOT_RUNNING;
90int tipc_random;
91atomic_t tipc_user_count = ATOMIC_INIT(0);
92
93const char tipc_alphabet[] =
94 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
95
96/* configurable TIPC parameters */
97
98u32 tipc_own_addr;
99int tipc_max_zones;
100int tipc_max_clusters;
101int tipc_max_nodes;
102int tipc_max_slaves;
103int tipc_max_ports;
104int tipc_max_subscriptions;
105int tipc_max_publications;
106int tipc_net_id;
107int tipc_remote_management;
108
109
110int tipc_get_mode(void)
111{
112 return tipc_mode;
113}
114
115/**
116 * stop_net - shut down TIPC networking sub-systems
117 */
118
119void stop_net(void)
120{
121 eth_media_stop();
122 tipc_stop_net();
123}
124
125/**
126 * start_net - start TIPC networking sub-systems
127 */
128
129int start_net(void)
130{
131 int res;
132
133 if ((res = tipc_start_net()) ||
134 (res = eth_media_start())) {
135 stop_net();
136 }
137 return res;
138}
139
140/**
141 * stop_core - switch TIPC from SINGLE NODE to NOT RUNNING mode
142 */
143
144void stop_core(void)
145{
146 if (tipc_mode != TIPC_NODE_MODE)
147 return;
148
149 tipc_mode = TIPC_NOT_RUNNING;
150
151 netlink_stop();
152 handler_stop();
153 cfg_stop();
154 subscr_stop();
155 reg_stop();
156 nametbl_stop();
157 ref_table_stop();
158 socket_stop();
159}
160
161/**
162 * start_core - switch TIPC from NOT RUNNING to SINGLE NODE mode
163 */
164
165int start_core(void)
166{
167 int res;
168
169 if (tipc_mode != TIPC_NOT_RUNNING)
170 return -ENOPROTOOPT;
171
172 get_random_bytes(&tipc_random, sizeof(tipc_random));
173 tipc_mode = TIPC_NODE_MODE;
174
175 if ((res = handler_start()) ||
176 (res = ref_table_init(tipc_max_ports + tipc_max_subscriptions,
177 tipc_random)) ||
178 (res = reg_start()) ||
179 (res = nametbl_init()) ||
180 (res = k_signal((Handler)subscr_start, 0)) ||
181 (res = k_signal((Handler)cfg_init, 0)) ||
182 (res = netlink_start()) ||
183 (res = socket_init())) {
184 stop_core();
185 }
186 return res;
187}
188
189
190static int __init tipc_init(void)
191{
192 int res;
193
194 log_reinit(CONFIG_TIPC_LOG);
195 info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
196
197 tipc_own_addr = 0;
198 tipc_remote_management = 1;
199 tipc_max_publications = 10000;
200 tipc_max_subscriptions = 2000;
201 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
202 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
203 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
204 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
205 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
206 tipc_net_id = 4711;
207
208 if ((res = start_core()))
209 err("Unable to start in single node mode\n");
210 else
211 info("Started in single node mode\n");
212 return res;
213}
214
215static void __exit tipc_exit(void)
216{
217 stop_net();
218 stop_core();
219 info("Deactivated\n");
220 log_stop();
221}
222
223module_init(tipc_init);
224module_exit(tipc_exit);
225
226MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
227MODULE_LICENSE("Dual BSD/GPL");
228
229/* Native TIPC API for kernel-space applications (see tipc.h) */
230
231EXPORT_SYMBOL(tipc_attach);
232EXPORT_SYMBOL(tipc_detach);
233EXPORT_SYMBOL(tipc_get_addr);
234EXPORT_SYMBOL(tipc_get_mode);
235EXPORT_SYMBOL(tipc_createport);
236EXPORT_SYMBOL(tipc_deleteport);
237EXPORT_SYMBOL(tipc_ownidentity);
238EXPORT_SYMBOL(tipc_portimportance);
239EXPORT_SYMBOL(tipc_set_portimportance);
240EXPORT_SYMBOL(tipc_portunreliable);
241EXPORT_SYMBOL(tipc_set_portunreliable);
242EXPORT_SYMBOL(tipc_portunreturnable);
243EXPORT_SYMBOL(tipc_set_portunreturnable);
244EXPORT_SYMBOL(tipc_publish);
245EXPORT_SYMBOL(tipc_withdraw);
246EXPORT_SYMBOL(tipc_connect2port);
247EXPORT_SYMBOL(tipc_disconnect);
248EXPORT_SYMBOL(tipc_shutdown);
249EXPORT_SYMBOL(tipc_isconnected);
250EXPORT_SYMBOL(tipc_peer);
251EXPORT_SYMBOL(tipc_ref_valid);
252EXPORT_SYMBOL(tipc_send);
253EXPORT_SYMBOL(tipc_send_buf);
254EXPORT_SYMBOL(tipc_send2name);
255EXPORT_SYMBOL(tipc_forward2name);
256EXPORT_SYMBOL(tipc_send_buf2name);
257EXPORT_SYMBOL(tipc_forward_buf2name);
258EXPORT_SYMBOL(tipc_send2port);
259EXPORT_SYMBOL(tipc_forward2port);
260EXPORT_SYMBOL(tipc_send_buf2port);
261EXPORT_SYMBOL(tipc_forward_buf2port);
262EXPORT_SYMBOL(tipc_multicast);
263/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
264EXPORT_SYMBOL(tipc_ispublished);
265EXPORT_SYMBOL(tipc_available_nodes);
266
267/* TIPC API for external bearers (see tipc_bearer.h) */
268
269EXPORT_SYMBOL(tipc_block_bearer);
270EXPORT_SYMBOL(tipc_continue);
271EXPORT_SYMBOL(tipc_disable_bearer);
272EXPORT_SYMBOL(tipc_enable_bearer);
273EXPORT_SYMBOL(tipc_recv_msg);
274EXPORT_SYMBOL(tipc_register_media);
275
276/* TIPC API for external APIs (see tipc_port.h) */
277
278EXPORT_SYMBOL(tipc_createport_raw);
279EXPORT_SYMBOL(tipc_set_msg_option);
280EXPORT_SYMBOL(tipc_reject_msg);
281EXPORT_SYMBOL(tipc_send_buf_fast);
282EXPORT_SYMBOL(tipc_acknowledge);
283EXPORT_SYMBOL(tipc_get_port);
284EXPORT_SYMBOL(tipc_get_handle);
285
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644
index 000000000000..b69b60b2cc86
--- /dev/null
+++ b/net/tipc/core.h
@@ -0,0 +1,316 @@
1/*
2 * net/tipc/core.h: Include file for TIPC global declarations
3 *
4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CORE_H
38#define _TIPC_CORE_H
39
40#include <net/tipc/tipc.h>
41#include <linux/types.h>
42#include <linux/kernel.h>
43#include <linux/errno.h>
44#include <linux/mm.h>
45#include <linux/timer.h>
46#include <linux/string.h>
47#include <asm/uaccess.h>
48#include <linux/interrupt.h>
49#include <asm/atomic.h>
50#include <asm/hardirq.h>
51#include <linux/netdevice.h>
52#include <linux/in.h>
53#include <linux/list.h>
54#include <linux/vmalloc.h>
55
56/*
57 * TIPC debugging code
58 */
59
60#define assert(i) BUG_ON(!(i))
61
62struct tipc_msg;
63extern struct print_buf *CONS, *LOG;
64extern struct print_buf *TEE(struct print_buf *, struct print_buf *);
65void msg_print(struct print_buf*,struct tipc_msg *,const char*);
66void tipc_printf(struct print_buf *, const char *fmt, ...);
67void tipc_dump(struct print_buf*,const char *fmt, ...);
68
69#ifdef CONFIG_TIPC_DEBUG
70
71/*
72 * TIPC debug support included:
73 * - system messages are printed to TIPC_OUTPUT print buffer
74 * - debug messages are printed to DBG_OUTPUT print buffer
75 */
76
77#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
78#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
79#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
80
81#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
82#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) msg_print(DBG_OUTPUT, msg, txt);} while(0)
83#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
84
85
86/*
87 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
88 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
89 * here, or on a per .c file basis, by redefining these symbols. The following
90 * print buffer options are available:
91 *
92 * NULL : Output to null print buffer (i.e. print nowhere)
93 * CONS : Output to system console
94 * LOG : Output to TIPC log buffer
95 * &buf : Output to user-defined buffer (struct print_buf *)
96 * TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TEE(CONS,LOG) )
97 */
98
99#ifndef TIPC_OUTPUT
100#define TIPC_OUTPUT TEE(CONS,LOG)
101#endif
102
103#ifndef DBG_OUTPUT
104#define DBG_OUTPUT NULL
105#endif
106
107#else
108
109#ifndef DBG_OUTPUT
110#define DBG_OUTPUT NULL
111#endif
112
113/*
114 * TIPC debug support not included:
115 * - system messages are printed to system console
116 * - debug messages are not printed
117 */
118
119#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
120#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
121#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
122
123#define dbg(fmt, arg...) do {} while (0)
124#define msg_dbg(msg,txt) do {} while (0)
125#define dump(fmt,arg...) do {} while (0)
126
127#endif
128
129
130/*
131 * TIPC-specific error codes
132 */
133
134#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
135
136/*
137 * Global configuration variables
138 */
139
140extern u32 tipc_own_addr;
141extern int tipc_max_zones;
142extern int tipc_max_clusters;
143extern int tipc_max_nodes;
144extern int tipc_max_slaves;
145extern int tipc_max_ports;
146extern int tipc_max_subscriptions;
147extern int tipc_max_publications;
148extern int tipc_net_id;
149extern int tipc_remote_management;
150
151/*
152 * Other global variables
153 */
154
155extern int tipc_mode;
156extern int tipc_random;
157extern const char tipc_alphabet[];
158extern atomic_t tipc_user_count;
159
160
161/*
162 * Routines available to privileged subsystems
163 */
164
165extern int start_core(void);
166extern void stop_core(void);
167extern int start_net(void);
168extern void stop_net(void);
169
170static inline int delimit(int val, int min, int max)
171{
172 if (val > max)
173 return max;
174 if (val < min)
175 return min;
176 return val;
177}
178
179
180/*
181 * TIPC timer and signal code
182 */
183
184typedef void (*Handler) (unsigned long);
185
186u32 k_signal(Handler routine, unsigned long argument);
187
188/**
189 * k_init_timer - initialize a timer
190 * @timer: pointer to timer structure
191 * @routine: pointer to routine to invoke when timer expires
192 * @argument: value to pass to routine when timer expires
193 *
194 * Timer must be initialized before use (and terminated when no longer needed).
195 */
196
197static inline void k_init_timer(struct timer_list *timer, Handler routine,
198 unsigned long argument)
199{
200 dbg("initializing timer %p\n", timer);
201 init_timer(timer);
202 timer->function = routine;
203 timer->data = argument;
204}
205
206/**
207 * k_start_timer - start a timer
208 * @timer: pointer to timer structure
209 * @msec: time to delay (in ms)
210 *
211 * Schedules a previously initialized timer for later execution.
212 * If timer is already running, the new timeout overrides the previous request.
213 *
214 * To ensure the timer doesn't expire before the specified delay elapses,
215 * the amount of delay is rounded up when converting to the jiffies
216 * then an additional jiffy is added to account for the fact that
217 * the starting time may be in the middle of the current jiffy.
218 */
219
220static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
221{
222 dbg("starting timer %p for %u\n", timer, msec);
223 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
224}
225
226/**
227 * k_cancel_timer - cancel a timer
228 * @timer: pointer to timer structure
229 *
230 * Cancels a previously initialized timer.
231 * Can be called safely even if the timer is already inactive.
232 *
233 * WARNING: Must not be called when holding locks required by the timer's
234 * timeout routine, otherwise deadlock can occur on SMP systems!
235 */
236
237static inline void k_cancel_timer(struct timer_list *timer)
238{
239 dbg("cancelling timer %p\n", timer);
240 del_timer_sync(timer);
241}
242
243/**
244 * k_term_timer - terminate a timer
245 * @timer: pointer to timer structure
246 *
247 * Prevents further use of a previously initialized timer.
248 *
249 * WARNING: Caller must ensure timer isn't currently running.
250 *
251 * (Do not "enhance" this routine to automatically cancel an active timer,
252 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
253 */
254
255static inline void k_term_timer(struct timer_list *timer)
256{
257 dbg("terminating timer %p\n", timer);
258}
259
260
261/*
262 * TIPC message buffer code
263 *
264 * TIPC message buffer headroom leaves room for 14 byte Ethernet header,
265 * while ensuring TIPC header is word aligned for quicker access
266 */
267
268#define BUF_HEADROOM 16u
269
270struct tipc_skb_cb {
271 void *handle;
272};
273
274#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
275
276
277static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
278{
279 return (struct tipc_msg *)skb->data;
280}
281
282/**
283 * buf_acquire - creates a TIPC message buffer
284 * @size: message size (including TIPC header)
285 *
286 * Returns a new buffer. Space is reserved for a data link header.
287 */
288
289static inline struct sk_buff *buf_acquire(u32 size)
290{
291 struct sk_buff *skb;
292 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
293
294 skb = alloc_skb(buf_size, GFP_ATOMIC);
295 if (skb) {
296 skb_reserve(skb, BUF_HEADROOM);
297 skb_put(skb, size);
298 skb->next = NULL;
299 }
300 return skb;
301}
302
303/**
304 * buf_discard - frees a TIPC message buffer
305 * @skb: message buffer
306 *
307 * Frees a new buffer. If passed NULL, just returns.
308 */
309
310static inline void buf_discard(struct sk_buff *skb)
311{
312 if (likely(skb != NULL))
313 kfree_skb(skb);
314}
315
316#endif
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644
index 000000000000..7ed60a1cfbb8
--- /dev/null
+++ b/net/tipc/dbg.c
@@ -0,0 +1,395 @@
1/*
2 * net/tipc/dbg.c: TIPC print buffer routines for debuggign
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40
41#define MAX_STRING 512
42
43static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *CONS = &cons_buf;
48
49static struct print_buf log_buf = { NULL, 0, NULL, NULL };
50struct print_buf *LOG = &log_buf;
51
52
53#define FORMAT(PTR,LEN,FMT) \
54{\
55 va_list args;\
56 va_start(args, FMT);\
57 LEN = vsprintf(PTR, FMT, args);\
58 va_end(args);\
59 *(PTR + LEN) = '\0';\
60}
61
62/*
63 * Locking policy when using print buffers.
64 *
65 * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
66 * simultaneous use of the print buffer(s) being manipulated.
67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
68 * 'print_string' and to protect its print buffer(s).
69 * 3) TEE() uses 'print_lock' to protect its print buffer(s).
70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect LOG.
71 */
72
73/**
74 * printbuf_init - initialize print buffer to empty
75 */
76
77void printbuf_init(struct print_buf *pb, char *raw, u32 sz)
78{
79 if (!pb || !raw || (sz < (MAX_STRING + 1)))
80 return;
81
82 pb->crs = pb->buf = raw;
83 pb->size = sz;
84 pb->next = 0;
85 pb->buf[0] = 0;
86 pb->buf[sz-1] = ~0;
87}
88
89/**
90 * printbuf_reset - reinitialize print buffer to empty state
91 */
92
93void printbuf_reset(struct print_buf *pb)
94{
95 if (pb && pb->buf)
96 printbuf_init(pb, pb->buf, pb->size);
97}
98
99/**
100 * printbuf_empty - test if print buffer is in empty state
101 */
102
103int printbuf_empty(struct print_buf *pb)
104{
105 return (!pb || !pb->buf || (pb->crs == pb->buf));
106}
107
108/**
109 * printbuf_validate - check for print buffer overflow
110 *
111 * Verifies that a print buffer has captured all data written to it.
112 * If data has been lost, linearize buffer and prepend an error message
113 *
114 * Returns length of print buffer data string (including trailing NULL)
115 */
116
117int printbuf_validate(struct print_buf *pb)
118{
119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n";
120 char *cp_buf;
121 struct print_buf cb;
122
123 if (!pb || !pb->buf)
124 return 0;
125
126 if (pb->buf[pb->size - 1] == '\0') {
127 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
128 if (cp_buf != NULL){
129 printbuf_init(&cb, cp_buf, pb->size);
130 printbuf_move(&cb, pb);
131 printbuf_move(pb, &cb);
132 kfree(cp_buf);
133 memcpy(pb->buf, err, strlen(err));
134 } else {
135 printbuf_reset(pb);
136 tipc_printf(pb, err);
137 }
138 }
139 return (pb->crs - pb->buf + 1);
140}
141
142/**
143 * printbuf_move - move print buffer contents to another print buffer
144 *
145 * Current contents of destination print buffer (if any) are discarded.
146 * Source print buffer becomes empty if a successful move occurs.
147 */
148
149void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
150{
151 int len;
152
153 /* Handle the cases where contents can't be moved */
154
155 if (!pb_to || !pb_to->buf)
156 return;
157
158 if (!pb_from || !pb_from->buf) {
159 printbuf_reset(pb_to);
160 return;
161 }
162
163 if (pb_to->size < pb_from->size) {
164 printbuf_reset(pb_to);
165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
166 return;
167 }
168
169 /* Copy data from char after cursor to end (if used) */
170 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
171 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
172 strcpy(pb_to->buf, pb_from->crs + 1);
173 pb_to->crs = pb_to->buf + len;
174 } else
175 pb_to->crs = pb_to->buf;
176
177 /* Copy data from start to cursor (always) */
178 len = pb_from->crs - pb_from->buf;
179 strcpy(pb_to->crs, pb_from->buf);
180 pb_to->crs += len;
181
182 printbuf_reset(pb_from);
183}
184
185/**
186 * tipc_printf - append formatted output to print buffer chain
187 */
188
189void tipc_printf(struct print_buf *pb, const char *fmt, ...)
190{
191 int chars_to_add;
192 int chars_left;
193 char save_char;
194 struct print_buf *pb_next;
195
196 spin_lock_bh(&print_lock);
197 FORMAT(print_string, chars_to_add, fmt);
198 if (chars_to_add >= MAX_STRING)
199 strcpy(print_string, "*** STRING TOO LONG ***");
200
201 while (pb) {
202 if (pb == CONS)
203 printk(print_string);
204 else if (pb->buf) {
205 chars_left = pb->buf + pb->size - pb->crs - 1;
206 if (chars_to_add <= chars_left) {
207 strcpy(pb->crs, print_string);
208 pb->crs += chars_to_add;
209 } else {
210 strcpy(pb->buf, print_string + chars_left);
211 save_char = print_string[chars_left];
212 print_string[chars_left] = 0;
213 strcpy(pb->crs, print_string);
214 print_string[chars_left] = save_char;
215 pb->crs = pb->buf + chars_to_add - chars_left;
216 }
217 }
218 pb_next = pb->next;
219 pb->next = 0;
220 pb = pb_next;
221 }
222 spin_unlock_bh(&print_lock);
223}
224
225/**
226 * TEE - perform next output operation on both print buffers
227 */
228
229struct print_buf *TEE(struct print_buf *b0, struct print_buf *b1)
230{
231 struct print_buf *pb = b0;
232
233 if (!b0 || (b0 == b1))
234 return b1;
235 if (!b1)
236 return b0;
237
238 spin_lock_bh(&print_lock);
239 while (pb->next) {
240 if ((pb->next == b1) || (pb->next == b0))
241 pb->next = pb->next->next;
242 else
243 pb = pb->next;
244 }
245 pb->next = b1;
246 spin_unlock_bh(&print_lock);
247 return b0;
248}
249
250/**
251 * print_to_console - write string of bytes to console in multiple chunks
252 */
253
254static void print_to_console(char *crs, int len)
255{
256 int rest = len;
257
258 while (rest > 0) {
259 int sz = rest < MAX_STRING ? rest : MAX_STRING;
260 char c = crs[sz];
261
262 crs[sz] = 0;
263 printk((const char *)crs);
264 crs[sz] = c;
265 rest -= sz;
266 crs += sz;
267 }
268}
269
270/**
271 * printbuf_dump - write print buffer contents to console
272 */
273
274static void printbuf_dump(struct print_buf *pb)
275{
276 int len;
277
278 /* Dump print buffer from char after cursor to end (if used) */
279 len = pb->buf + pb->size - pb->crs - 2;
280 if ((pb->buf[pb->size - 1] == 0) && (len > 0))
281 print_to_console(pb->crs + 1, len);
282
283 /* Dump print buffer from start to cursor (always) */
284 len = pb->crs - pb->buf;
285 print_to_console(pb->buf, len);
286}
287
288/**
289 * tipc_dump - dump non-console print buffer(s) to console
290 */
291
292void tipc_dump(struct print_buf *pb, const char *fmt, ...)
293{
294 int len;
295
296 spin_lock_bh(&print_lock);
297 FORMAT(CONS->buf, len, fmt);
298 printk(CONS->buf);
299
300 for (; pb; pb = pb->next) {
301 if (pb == CONS)
302 continue;
303 printk("\n---- Start of dump,%s log ----\n\n",
304 (pb == LOG) ? "global" : "local");
305 printbuf_dump(pb);
306 printbuf_reset(pb);
307 printk("\n-------- End of dump --------\n");
308 }
309 spin_unlock_bh(&print_lock);
310}
311
312/**
313 * log_stop - free up TIPC log print buffer
314 */
315
316void log_stop(void)
317{
318 spin_lock_bh(&print_lock);
319 if (LOG->buf) {
320 kfree(LOG->buf);
321 LOG->buf = NULL;
322 }
323 spin_unlock_bh(&print_lock);
324}
325
326/**
327 * log_reinit - set TIPC log print buffer to specified size
328 */
329
330void log_reinit(int log_size)
331{
332 log_stop();
333
334 if (log_size) {
335 if (log_size <= MAX_STRING)
336 log_size = MAX_STRING + 1;
337 spin_lock_bh(&print_lock);
338 printbuf_init(LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
339 spin_unlock_bh(&print_lock);
340 }
341}
342
343/**
344 * log_resize - reconfigure size of TIPC log buffer
345 */
346
347struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space)
348{
349 u32 value;
350
351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
352 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
353
354 value = *(u32 *)TLV_DATA(req_tlv_area);
355 value = ntohl(value);
356 if (value != delimit(value, 0, 32768))
357 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
358 " (log size must be 0-32768)");
359 log_reinit(value);
360 return cfg_reply_none();
361}
362
363/**
364 * log_dump - capture TIPC log buffer contents in configuration message
365 */
366
367struct sk_buff *log_dump(void)
368{
369 struct sk_buff *reply;
370
371 spin_lock_bh(&print_lock);
372 if (!LOG->buf)
373 reply = cfg_reply_ultra_string("log not activated\n");
374 else if (printbuf_empty(LOG))
375 reply = cfg_reply_ultra_string("log is empty\n");
376 else {
377 struct tlv_desc *rep_tlv;
378 struct print_buf pb;
379 int str_len;
380
381 str_len = min(LOG->size, 32768u);
382 reply = cfg_reply_alloc(TLV_SPACE(str_len));
383 if (reply) {
384 rep_tlv = (struct tlv_desc *)reply->data;
385 printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
386 printbuf_move(&pb, LOG);
387 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
388 skb_put(reply, TLV_SPACE(str_len));
389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
390 }
391 }
392 spin_unlock_bh(&print_lock);
393 return reply;
394}
395
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644
index 000000000000..c6b2a64c224f
--- /dev/null
+++ b/net/tipc/dbg.h
@@ -0,0 +1,59 @@
1/*
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 *
4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_DBG_H
38#define _TIPC_DBG_H
39
40struct print_buf {
41 char *buf;
42 u32 size;
43 char *crs;
44 struct print_buf *next;
45};
46
47void printbuf_init(struct print_buf *pb, char *buf, u32 sz);
48void printbuf_reset(struct print_buf *pb);
49int printbuf_empty(struct print_buf *pb);
50int printbuf_validate(struct print_buf *pb);
51void printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
52
53void log_reinit(int log_size);
54void log_stop(void);
55
56struct sk_buff *log_resize(const void *req_tlv_area, int req_tlv_space);
57struct sk_buff *log_dump(void);
58
59#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644
index 000000000000..b106ef1621cc
--- /dev/null
+++ b/net/tipc/discover.c
@@ -0,0 +1,318 @@
1/*
2 * net/tipc/discover.c
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "link.h"
40#include "zone.h"
41#include "discover.h"
42#include "port.h"
43#include "name_table.h"
44
45#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
46#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
47#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
48
49#if 0
50#define GET_NODE_INFO 300
51#define GET_NODE_INFO_RESULT 301
52#define FORWARD_LINK_PROBE 302
53#define LINK_REQUEST_REJECTED 303
54#define LINK_REQUEST_ACCEPTED 304
55#define DROP_LINK_REQUEST 305
56#define CHECK_LINK_COUNT 306
57#endif
58
59/*
60 * TODO: Most of the inter-cluster setup stuff should be
61 * rewritten, and be made conformant with specification.
62 */
63
64
65/**
66 * struct link_req - information about an ongoing link setup request
67 * @bearer: bearer issuing requests
68 * @dest: destination address for request messages
69 * @buf: request message to be (repeatedly) sent
70 * @timer: timer governing period between requests
71 * @timer_intv: current interval between requests (in ms)
72 */
73struct link_req {
74 struct bearer *bearer;
75 struct tipc_media_addr dest;
76 struct sk_buff *buf;
77 struct timer_list timer;
78 unsigned int timer_intv;
79};
80
81
82#if 0
83int disc_create_link(const struct tipc_link_create *argv)
84{
85 /*
86 * Code for inter cluster link setup here
87 */
88 return TIPC_OK;
89}
90#endif
91
92/*
93 * disc_lost_link(): A link has lost contact
94 */
95
96void disc_link_event(u32 addr, char *name, int up)
97{
98 if (in_own_cluster(addr))
99 return;
100 /*
101 * Code for inter cluster link setup here
102 */
103}
104
105/**
106 * disc_init_msg - initialize a link setup message
107 * @type: message type (request or response)
108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message
111 */
112
113struct sk_buff *disc_init_msg(u32 type,
114 u32 req_links,
115 u32 dest_domain,
116 struct bearer *b_ptr)
117{
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg;
120
121 if (buf) {
122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
124 dest_domain);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id);
129 msg_set_media_addr(msg, &b_ptr->publ.addr);
130 }
131 return buf;
132}
133
134/**
135 * disc_recv_msg - handle incoming link setup message (request or response)
136 * @buf: buffer containing message
137 */
138
139void disc_recv_msg(struct sk_buff *buf)
140{
141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
142 struct link *link;
143 struct tipc_media_addr media_addr;
144 struct tipc_msg *msg = buf_msg(buf);
145 u32 dest = msg_dest_domain(msg);
146 u32 orig = msg_prevnode(msg);
147 u32 net_id = msg_bc_netid(msg);
148 u32 type = msg_type(msg);
149
150 msg_get_media_addr(msg,&media_addr);
151 msg_dbg(msg, "RECV:");
152 buf_discard(buf);
153
154 if (net_id != tipc_net_id)
155 return;
156 if (!addr_domain_valid(dest))
157 return;
158 if (!addr_node_valid(orig))
159 return;
160 if (orig == tipc_own_addr)
161 return;
162 if (!in_scope(dest, tipc_own_addr))
163 return;
164 if (is_slave(tipc_own_addr) && is_slave(orig))
165 return;
166 if (is_slave(orig) && !in_own_cluster(orig))
167 return;
168 if (in_own_cluster(orig)) {
169 /* Always accept link here */
170 struct sk_buff *rbuf;
171 struct tipc_media_addr *addr;
172 struct node *n_ptr = node_find(orig);
173 int link_up;
174 dbg(" in own cluster\n");
175 if (n_ptr == NULL) {
176 n_ptr = node_create(orig);
177 }
178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n");
180 return;
181 }
182 spin_lock_bh(&n_ptr->lock);
183 link = n_ptr->links[b_ptr->identity];
184 if (!link) {
185 dbg("creating link\n");
186 link = link_create(b_ptr, orig, &media_addr);
187 if (!link) {
188 spin_unlock_bh(&n_ptr->lock);
189 return;
190 }
191 }
192 addr = &link->media_addr;
193 if (memcmp(addr, &media_addr, sizeof(*addr))) {
194 char addr_string[16];
195
196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr));
199 link_reset(link);
200 }
201 link_up = link_is_up(link);
202 spin_unlock_bh(&n_ptr->lock);
203 if ((type == DSC_RESP_MSG) || link_up)
204 return;
205 rbuf = disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
206 if (rbuf != NULL) {
207 msg_dbg(buf_msg(rbuf),"SEND:");
208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
209 buf_discard(rbuf);
210 }
211 }
212}
213
214/**
215 * disc_stop_link_req - stop sending periodic link setup requests
216 * @req: ptr to link request structure
217 */
218
219void disc_stop_link_req(struct link_req *req)
220{
221 if (!req)
222 return;
223
224 k_cancel_timer(&req->timer);
225 k_term_timer(&req->timer);
226 buf_discard(req->buf);
227 kfree(req);
228}
229
230/**
231 * disc_update_link_req - update frequency of periodic link setup requests
232 * @req: ptr to link request structure
233 */
234
235void disc_update_link_req(struct link_req *req)
236{
237 if (!req)
238 return;
239
240 if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
241 if (!req->bearer->nodes.count) {
242 req->timer_intv = TIPC_LINK_REQ_FAST;
243 k_start_timer(&req->timer, req->timer_intv);
244 }
245 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
246 if (req->bearer->nodes.count) {
247 req->timer_intv = TIPC_LINK_REQ_SLOW;
248 k_start_timer(&req->timer, req->timer_intv);
249 }
250 } else {
251 /* leave timer "as is" if haven't yet reached a "normal" rate */
252 }
253}
254
255/**
256 * disc_timeout - send a periodic link setup request
257 * @req: ptr to link request structure
258 *
259 * Called whenever a link setup request timer associated with a bearer expires.
260 */
261
262static void disc_timeout(struct link_req *req)
263{
264 spin_lock_bh(&req->bearer->publ.lock);
265
266 req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
267
268 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
269 (req->timer_intv == TIPC_LINK_REQ_FAST)) {
270 /* leave timer interval "as is" if already at a "normal" rate */
271 } else {
272 req->timer_intv *= 2;
273 if (req->timer_intv > TIPC_LINK_REQ_SLOW)
274 req->timer_intv = TIPC_LINK_REQ_SLOW;
275 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
276 (req->bearer->nodes.count))
277 req->timer_intv = TIPC_LINK_REQ_SLOW;
278 }
279 k_start_timer(&req->timer, req->timer_intv);
280
281 spin_unlock_bh(&req->bearer->publ.lock);
282}
283
284/**
285 * disc_init_link_req - start sending periodic link setup requests
286 * @b_ptr: ptr to bearer issuing requests
287 * @dest: destination address for request messages
288 * @dest_domain: network domain of node(s) which should respond to message
289 * @req_links: max number of desired links
290 *
291 * Returns pointer to link request structure, or NULL if unable to create.
292 */
293
294struct link_req *disc_init_link_req(struct bearer *b_ptr,
295 const struct tipc_media_addr *dest,
296 u32 dest_domain,
297 u32 req_links)
298{
299 struct link_req *req;
300
301 req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
302 if (!req)
303 return NULL;
304
305 req->buf = disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
306 if (!req->buf) {
307 kfree(req);
308 return NULL;
309 }
310
311 memcpy(&req->dest, dest, sizeof(*dest));
312 req->bearer = b_ptr;
313 req->timer_intv = TIPC_LINK_REQ_INIT;
314 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
315 k_start_timer(&req->timer, req->timer_intv);
316 return req;
317}
318
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644
index 000000000000..2a6114d91626
--- /dev/null
+++ b/net/tipc/discover.h
@@ -0,0 +1,58 @@
1/*
2 * net/tipc/discover.h
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_DISCOVER_H
38#define _TIPC_DISCOVER_H
39
40#include <linux/tipc.h>
41
42struct link_req;
43
44struct link_req *disc_init_link_req(struct bearer *b_ptr,
45 const struct tipc_media_addr *dest,
46 u32 dest_domain,
47 u32 req_links);
48void disc_update_link_req(struct link_req *req);
49void disc_stop_link_req(struct link_req *req);
50
51void disc_recv_msg(struct sk_buff *buf);
52
53void disc_link_event(u32 addr, char *name, int up);
54#if 0
55int disc_create_link(const struct tipc_link_create *argv);
56#endif
57
58#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644
index 000000000000..34d0462db3aa
--- /dev/null
+++ b/net/tipc/eth_media.c
@@ -0,0 +1,299 @@
1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 *
4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/tipc/tipc.h>
38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h>
41#include <linux/version.h>
42
43#define MAX_ETH_BEARERS 2
44#define TIPC_PROTOCOL 0x88ca
45#define ETH_LINK_PRIORITY 10
46#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
47
48
49/**
50 * struct eth_bearer - Ethernet bearer data structure
51 * @bearer: ptr to associated "generic" bearer structure
52 * @dev: ptr to associated Ethernet network device
53 * @tipc_packet_type: used in binding TIPC to Ethernet driver
54 */
55
56struct eth_bearer {
57 struct tipc_bearer *bearer;
58 struct net_device *dev;
59 struct packet_type tipc_packet_type;
60};
61
62static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
63static int eth_started = 0;
64static struct notifier_block notifier;
65
66/**
67 * send_msg - send a TIPC message out over an Ethernet interface
68 */
69
70static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
71 struct tipc_media_addr *dest)
72{
73 struct sk_buff *clone;
74 struct net_device *dev;
75
76 clone = skb_clone(buf, GFP_ATOMIC);
77 if (clone) {
78 clone->nh.raw = clone->data;
79 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
80 clone->dev = dev;
81 dev->hard_header(clone, dev, TIPC_PROTOCOL,
82 &dest->dev_addr.eth_addr,
83 dev->dev_addr, clone->len);
84 dev_queue_xmit(clone);
85 }
86 return TIPC_OK;
87}
88
89/**
90 * recv_msg - handle incoming TIPC message from an Ethernet interface
91 *
92 * Routine truncates any Ethernet padding/CRC appended to the message,
93 * and ensures message size matches actual length
94 */
95
96static int recv_msg(struct sk_buff *buf, struct net_device *dev,
97 struct packet_type *pt, struct net_device *orig_dev)
98{
99 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
100 u32 size;
101
102 if (likely(eb_ptr->bearer)) {
103 size = msg_size((struct tipc_msg *)buf->data);
104 skb_trim(buf, size);
105 if (likely(buf->len == size)) {
106 buf->next = NULL;
107 tipc_recv_msg(buf, eb_ptr->bearer);
108 } else {
109 kfree_skb(buf);
110 }
111 } else {
112 kfree_skb(buf);
113 }
114 return TIPC_OK;
115}
116
117/**
118 * enable_bearer - attach TIPC bearer to an Ethernet interface
119 */
120
121static int enable_bearer(struct tipc_bearer *tb_ptr)
122{
123 struct net_device *dev = dev_base;
124 struct eth_bearer *eb_ptr = &eth_bearers[0];
125 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
126 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
127
128 /* Find device with specified name */
129
130 while (dev && dev->name &&
131 (memcmp(dev->name, driver_name, strlen(dev->name)))) {
132 dev = dev->next;
133 }
134 if (!dev)
135 return -ENODEV;
136
137 /* Find Ethernet bearer for device (or create one) */
138
139 for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
140 if (eb_ptr == stop)
141 return -EDQUOT;
142 if (!eb_ptr->dev) {
143 eb_ptr->dev = dev;
144 eb_ptr->tipc_packet_type.type = __constant_htons(TIPC_PROTOCOL);
145 eb_ptr->tipc_packet_type.dev = dev;
146 eb_ptr->tipc_packet_type.func = recv_msg;
147 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
148 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
149 dev_hold(dev);
150 dev_add_pack(&eb_ptr->tipc_packet_type);
151 }
152
153 /* Associate TIPC bearer with Ethernet bearer */
154
155 eb_ptr->bearer = tb_ptr;
156 tb_ptr->usr_handle = (void *)eb_ptr;
157 tb_ptr->mtu = dev->mtu;
158 tb_ptr->blocked = 0;
159 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
160 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
161 return 0;
162}
163
164/**
165 * disable_bearer - detach TIPC bearer from an Ethernet interface
166 *
167 * We really should do dev_remove_pack() here, but this function can not be
168 * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
169 * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
170 */
171
172static void disable_bearer(struct tipc_bearer *tb_ptr)
173{
174 ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
175}
176
177/**
178 * recv_notification - handle device updates from OS
179 *
180 * Change the state of the Ethernet bearer (if any) associated with the
181 * specified device.
182 */
183
184static int recv_notification(struct notifier_block *nb, unsigned long evt,
185 void *dv)
186{
187 struct net_device *dev = (struct net_device *)dv;
188 struct eth_bearer *eb_ptr = &eth_bearers[0];
189 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
190
191 while ((eb_ptr->dev != dev)) {
192 if (++eb_ptr == stop)
193 return NOTIFY_DONE; /* couldn't find device */
194 }
195 if (!eb_ptr->bearer)
196 return NOTIFY_DONE; /* bearer had been disabled */
197
198 eb_ptr->bearer->mtu = dev->mtu;
199
200 switch (evt) {
201 case NETDEV_CHANGE:
202 if (netif_carrier_ok(dev))
203 tipc_continue(eb_ptr->bearer);
204 else
205 tipc_block_bearer(eb_ptr->bearer->name);
206 break;
207 case NETDEV_UP:
208 tipc_continue(eb_ptr->bearer);
209 break;
210 case NETDEV_DOWN:
211 tipc_block_bearer(eb_ptr->bearer->name);
212 break;
213 case NETDEV_CHANGEMTU:
214 case NETDEV_CHANGEADDR:
215 tipc_block_bearer(eb_ptr->bearer->name);
216 tipc_continue(eb_ptr->bearer);
217 break;
218 case NETDEV_UNREGISTER:
219 case NETDEV_CHANGENAME:
220 tipc_disable_bearer(eb_ptr->bearer->name);
221 break;
222 }
223 return NOTIFY_OK;
224}
225
226/**
227 * eth_addr2str - convert Ethernet address to string
228 */
229
230static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
231{
232 unchar *addr = (unchar *)&a->dev_addr;
233
234 if (str_size < 18)
235 *str_buf = '\0';
236 else
237 sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
238 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
239 return str_buf;
240}
241
242/**
243 * eth_media_start - activate Ethernet bearer support
244 *
245 * Register Ethernet media type with TIPC bearer code. Also register
246 * with OS for notifications about device state changes.
247 */
248
249int eth_media_start(void)
250{
251 struct tipc_media_addr bcast_addr;
252 int res;
253
254 if (eth_started)
255 return -EINVAL;
256
257 memset(&bcast_addr, 0xff, sizeof(bcast_addr));
258 memset(eth_bearers, 0, sizeof(eth_bearers));
259
260 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
261 enable_bearer, disable_bearer, send_msg,
262 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
263 ETH_LINK_TOLERANCE, TIPC_DEF_LINK_WIN);
264 if (res)
265 return res;
266
267 notifier.notifier_call = &recv_notification;
268 notifier.priority = 0;
269 res = register_netdevice_notifier(&notifier);
270 if (!res)
271 eth_started = 1;
272 return res;
273}
274
275/**
276 * eth_media_stop - deactivate Ethernet bearer support
277 */
278
279void eth_media_stop(void)
280{
281 int i;
282
283 if (!eth_started)
284 return;
285
286 unregister_netdevice_notifier(&notifier);
287 for (i = 0; i < MAX_ETH_BEARERS ; i++) {
288 if (eth_bearers[i].bearer) {
289 eth_bearers[i].bearer->blocked = 1;
290 eth_bearers[i].bearer = 0;
291 }
292 if (eth_bearers[i].dev) {
293 dev_remove_pack(&eth_bearers[i].tipc_packet_type);
294 dev_put(eth_bearers[i].dev);
295 }
296 }
297 memset(&eth_bearers, 0, sizeof(eth_bearers));
298 eth_started = 0;
299}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644
index 000000000000..f320010f8a65
--- /dev/null
+++ b/net/tipc/handler.c
@@ -0,0 +1,132 @@
1/*
2 * net/tipc/handler.c: TIPC signal handling
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static kmem_cache_t *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
48static int handler_enabled = 0;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 if (!handler_enabled) {
60 err("Signal request ignored by handler\n");
61 return -ENOPROTOOPT;
62 }
63
64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 if (!handler_enabled)
116 return;
117
118 handler_enabled = 0;
119 tasklet_disable(&tipc_tasklet);
120 tasklet_kill(&tipc_tasklet);
121
122 spin_lock_bh(&qitem_lock);
123 list_for_each_safe(l, n, &signal_queue_head) {
124 item = list_entry(l, struct queue_item, next_signal);
125 list_del(&item->next_signal);
126 kmem_cache_free(tipc_queue_item_cache, item);
127 }
128 spin_unlock_bh(&qitem_lock);
129
130 kmem_cache_destroy(tipc_queue_item_cache);
131}
132
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644
index 000000000000..7265f4be4766
--- /dev/null
+++ b/net/tipc/link.c
@@ -0,0 +1,3167 @@
1/*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "link.h"
40#include "net.h"
41#include "node.h"
42#include "port.h"
43#include "addr.h"
44#include "node_subscr.h"
45#include "name_distr.h"
46#include "bearer.h"
47#include "name_table.h"
48#include "discover.h"
49#include "config.h"
50#include "bcast.h"
51
52
53/*
54 * Limit for deferred reception queue:
55 */
56
57#define DEF_QUEUE_LIMIT 256u
58
59/*
60 * Link state events:
61 */
62
63#define STARTING_EVT 856384768 /* link processing trigger */
64#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
65#define TIMEOUT_EVT 560817u /* link timer expired */
66
67/*
68 * The following two 'message types' is really just implementation
69 * data conveniently stored in the message header.
70 * They must not be considered part of the protocol
71 */
72#define OPEN_MSG 0
73#define CLOSED_MSG 1
74
75/*
76 * State value stored in 'exp_msg_count'
77 */
78
79#define START_CHANGEOVER 100000u
80
81/**
82 * struct link_name - deconstructed link name
83 * @addr_local: network address of node at this end
84 * @if_local: name of interface at this end
85 * @addr_peer: network address of node at far end
86 * @if_peer: name of interface at far end
87 */
88
89struct link_name {
90 u32 addr_local;
91 char if_local[TIPC_MAX_IF_NAME];
92 u32 addr_peer;
93 char if_peer[TIPC_MAX_IF_NAME];
94};
95
96#if 0
97
98/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
99
100/**
101 * struct link_event - link up/down event notification
102 */
103
104struct link_event {
105 u32 addr;
106 int up;
107 void (*fcn)(u32, char *, int);
108 char name[TIPC_MAX_LINK_NAME];
109};
110
111#endif
112
113static void link_handle_out_of_seq_msg(struct link *l_ptr,
114 struct sk_buff *buf);
115static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
116static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
117static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
118static int link_send_sections_long(struct port *sender,
119 struct iovec const *msg_sect,
120 u32 num_sect, u32 destnode);
121static void link_check_defragm_bufs(struct link *l_ptr);
122static void link_state_event(struct link *l_ptr, u32 event);
123static void link_reset_statistics(struct link *l_ptr);
124static void link_print(struct link *l_ptr, struct print_buf *buf,
125 const char *str);
126
127/*
128 * Debugging code used by link routines only
129 *
130 * When debugging link problems on a system that has multiple links,
131 * the standard TIPC debugging routines may not be useful since they
132 * allow the output from multiple links to be intermixed. For this reason
133 * routines of the form "dbg_link_XXX()" have been created that will capture
134 * debug info into a link's personal print buffer, which can then be dumped
135 * into the TIPC system log (LOG) upon request.
136 *
137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
139 * the dbg_link_XXX() routines simply send their output to the standard
140 * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
141 * when there is only a single link in the system being debugged.
142 *
143 * Notes:
144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
145 * - "l_ptr" must be valid when using dbg_link_XXX() macros
146 */
147
148#define LINK_LOG_BUF_SIZE 0
149
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156 printbuf_move(LOG, &l_ptr->print_buf); \
157 } \
158} while (0)
159
160static inline void dbg_print_link(struct link *l_ptr, const char *str)
161{
162 if (DBG_OUTPUT)
163 link_print(l_ptr, DBG_OUTPUT, str);
164}
165
166static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
167{
168 if (DBG_OUTPUT) {
169 struct sk_buff *buf = root_buf;
170
171 while (buf) {
172 msg_dbg(buf_msg(buf), "In chain: ");
173 buf = buf->next;
174 }
175 }
176}
177
178/*
179 * Simple inlined link routines
180 */
181
182static inline unsigned int align(unsigned int i)
183{
184 return (i + 3) & ~3u;
185}
186
187static inline int link_working_working(struct link *l_ptr)
188{
189 return (l_ptr->state == WORKING_WORKING);
190}
191
192static inline int link_working_unknown(struct link *l_ptr)
193{
194 return (l_ptr->state == WORKING_UNKNOWN);
195}
196
197static inline int link_reset_unknown(struct link *l_ptr)
198{
199 return (l_ptr->state == RESET_UNKNOWN);
200}
201
202static inline int link_reset_reset(struct link *l_ptr)
203{
204 return (l_ptr->state == RESET_RESET);
205}
206
207static inline int link_blocked(struct link *l_ptr)
208{
209 return (l_ptr->exp_msg_count || l_ptr->blocked);
210}
211
212static inline int link_congested(struct link *l_ptr)
213{
214 return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
215}
216
217static inline u32 link_max_pkt(struct link *l_ptr)
218{
219 return l_ptr->max_pkt;
220}
221
222static inline void link_init_max_pkt(struct link *l_ptr)
223{
224 u32 max_pkt;
225
226 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
227 if (max_pkt > MAX_MSG_SIZE)
228 max_pkt = MAX_MSG_SIZE;
229
230 l_ptr->max_pkt_target = max_pkt;
231 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
232 l_ptr->max_pkt = l_ptr->max_pkt_target;
233 else
234 l_ptr->max_pkt = MAX_PKT_DEFAULT;
235
236 l_ptr->max_pkt_probes = 0;
237}
238
239static inline u32 link_next_sent(struct link *l_ptr)
240{
241 if (l_ptr->next_out)
242 return msg_seqno(buf_msg(l_ptr->next_out));
243 return mod(l_ptr->next_out_no);
244}
245
246static inline u32 link_last_sent(struct link *l_ptr)
247{
248 return mod(link_next_sent(l_ptr) - 1);
249}
250
251/*
252 * Simple non-inlined link routines (i.e. referenced outside this file)
253 */
254
255int link_is_up(struct link *l_ptr)
256{
257 if (!l_ptr)
258 return 0;
259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
260}
261
262int link_is_active(struct link *l_ptr)
263{
264 return ((l_ptr->owner->active_links[0] == l_ptr) ||
265 (l_ptr->owner->active_links[1] == l_ptr));
266}
267
268/**
269 * link_name_validate - validate & (optionally) deconstruct link name
270 * @name - ptr to link name string
271 * @name_parts - ptr to area for link name components (or NULL if not needed)
272 *
273 * Returns 1 if link name is valid, otherwise 0.
274 */
275
276static int link_name_validate(const char *name, struct link_name *name_parts)
277{
278 char name_copy[TIPC_MAX_LINK_NAME];
279 char *addr_local;
280 char *if_local;
281 char *addr_peer;
282 char *if_peer;
283 char dummy;
284 u32 z_local, c_local, n_local;
285 u32 z_peer, c_peer, n_peer;
286 u32 if_local_len;
287 u32 if_peer_len;
288
289 /* copy link name & ensure length is OK */
290
291 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
292 /* need above in case non-Posix strncpy() doesn't pad with nulls */
293 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
294 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
295 return 0;
296
297 /* ensure all component parts of link name are present */
298
299 addr_local = name_copy;
300 if ((if_local = strchr(addr_local, ':')) == NULL)
301 return 0;
302 *(if_local++) = 0;
303 if ((addr_peer = strchr(if_local, '-')) == NULL)
304 return 0;
305 *(addr_peer++) = 0;
306 if_local_len = addr_peer - if_local;
307 if ((if_peer = strchr(addr_peer, ':')) == NULL)
308 return 0;
309 *(if_peer++) = 0;
310 if_peer_len = strlen(if_peer) + 1;
311
312 /* validate component parts of link name */
313
314 if ((sscanf(addr_local, "%u.%u.%u%c",
315 &z_local, &c_local, &n_local, &dummy) != 3) ||
316 (sscanf(addr_peer, "%u.%u.%u%c",
317 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
318 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
319 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
320 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
321 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
322 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
323 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
324 return 0;
325
326 /* return link name components, if necessary */
327
328 if (name_parts) {
329 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
330 strcpy(name_parts->if_local, if_local);
331 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
332 strcpy(name_parts->if_peer, if_peer);
333 }
334 return 1;
335}
336
337/**
338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link
340 *
341 * This routine must not grab "net_lock" to avoid a potential deadlock conflict
342 * with link_delete(). (There is no risk that the node will be deleted by
343 * another thread because link_delete() always cancels the link timer before
344 * node_delete() is called.)
345 */
346
347static void link_timeout(struct link *l_ptr)
348{
349 node_lock(l_ptr->owner);
350
351 /* update counters used in statistical profiling of send traffic */
352
353 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
354 l_ptr->stats.queue_sz_counts++;
355
356 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
357 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
358
359 if (l_ptr->first_out) {
360 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
361 u32 length = msg_size(msg);
362
363 if ((msg_user(msg) == MSG_FRAGMENTER)
364 && (msg_type(msg) == FIRST_FRAGMENT)) {
365 length = msg_size(msg_get_wrapped(msg));
366 }
367 if (length) {
368 l_ptr->stats.msg_lengths_total += length;
369 l_ptr->stats.msg_length_counts++;
370 if (length <= 64)
371 l_ptr->stats.msg_length_profile[0]++;
372 else if (length <= 256)
373 l_ptr->stats.msg_length_profile[1]++;
374 else if (length <= 1024)
375 l_ptr->stats.msg_length_profile[2]++;
376 else if (length <= 4096)
377 l_ptr->stats.msg_length_profile[3]++;
378 else if (length <= 16384)
379 l_ptr->stats.msg_length_profile[4]++;
380 else if (length <= 32768)
381 l_ptr->stats.msg_length_profile[5]++;
382 else
383 l_ptr->stats.msg_length_profile[6]++;
384 }
385 }
386
387 /* do all other link processing performed on a periodic basis */
388
389 link_check_defragm_bufs(l_ptr);
390
391 link_state_event(l_ptr, TIMEOUT_EVT);
392
393 if (l_ptr->next_out)
394 link_push_queue(l_ptr);
395
396 node_unlock(l_ptr->owner);
397}
398
399static inline void link_set_timer(struct link *l_ptr, u32 time)
400{
401 k_start_timer(&l_ptr->timer, time);
402}
403
404/**
405 * link_create - create a new link
406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link
409 *
410 * Returns pointer to link.
411 */
412
413struct link *link_create(struct bearer *b_ptr, const u32 peer,
414 const struct tipc_media_addr *media_addr)
415{
416 struct link *l_ptr;
417 struct tipc_msg *msg;
418 char *if_name;
419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) {
422 warn("Memory squeeze; Failed to create link\n");
423 return NULL;
424 }
425 memset(l_ptr, 0, sizeof(*l_ptr));
426
427 l_ptr->addr = peer;
428 if_name = strchr(b_ptr->publ.name, ':') + 1;
429 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
430 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
431 tipc_node(tipc_own_addr),
432 if_name,
433 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
434 /* note: peer i/f is appended to link name by reset/activate */
435 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
436 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
437 list_add_tail(&l_ptr->link_list, &b_ptr->links);
438 l_ptr->checkpoint = 1;
439 l_ptr->b_ptr = b_ptr;
440 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
441 l_ptr->state = RESET_UNKNOWN;
442
443 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
444 msg = l_ptr->pmsg;
445 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
446 msg_set_size(msg, sizeof(l_ptr->proto_msg));
447 msg_set_session(msg, tipc_random);
448 msg_set_bearer_id(msg, b_ptr->identity);
449 strcpy((char *)msg_data(msg), if_name);
450
451 l_ptr->priority = b_ptr->priority;
452 link_set_queue_limits(l_ptr, b_ptr->media->window);
453
454 link_init_max_pkt(l_ptr);
455
456 l_ptr->next_out_no = 1;
457 INIT_LIST_HEAD(&l_ptr->waiting_ports);
458
459 link_reset_statistics(l_ptr);
460
461 l_ptr->owner = node_attach_link(l_ptr);
462 if (!l_ptr->owner) {
463 kfree(l_ptr);
464 return NULL;
465 }
466
467 if (LINK_LOG_BUF_SIZE) {
468 char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
469
470 if (!pb) {
471 kfree(l_ptr);
472 warn("Memory squeeze; Failed to create link\n");
473 return NULL;
474 }
475 printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
476 }
477
478 k_signal((Handler)link_start, (unsigned long)l_ptr);
479
480 dbg("link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
482
483 return l_ptr;
484}
485
486/**
487 * link_delete - delete a link
488 * @l_ptr: pointer to link
489 *
490 * Note: 'net_lock' is write_locked, bearer is locked.
491 * This routine must not grab the node lock until after link timer cancellation
492 * to avoid a potential deadlock situation.
493 */
494
495void link_delete(struct link *l_ptr)
496{
497 if (!l_ptr) {
498 err("Attempt to delete non-existent link\n");
499 return;
500 }
501
502 dbg("link_delete()\n");
503
504 k_cancel_timer(&l_ptr->timer);
505
506 node_lock(l_ptr->owner);
507 link_reset(l_ptr);
508 node_detach_link(l_ptr->owner, l_ptr);
509 link_stop(l_ptr);
510 list_del_init(&l_ptr->link_list);
511 if (LINK_LOG_BUF_SIZE)
512 kfree(l_ptr->print_buf.buf);
513 node_unlock(l_ptr->owner);
514 k_term_timer(&l_ptr->timer);
515 kfree(l_ptr);
516}
517
518void link_start(struct link *l_ptr)
519{
520 dbg("link_start %x\n", l_ptr);
521 link_state_event(l_ptr, STARTING_EVT);
522}
523
524/**
525 * link_schedule_port - schedule port for deferred sending
526 * @l_ptr: pointer to link
527 * @origport: reference to sending port
528 * @sz: amount of data to be sent
529 *
530 * Schedules port for renewed sending of messages after link congestion
531 * has abated.
532 */
533
534static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
535{
536 struct port *p_ptr;
537
538 spin_lock_bh(&port_list_lock);
539 p_ptr = port_lock(origport);
540 if (p_ptr) {
541 if (!p_ptr->wakeup)
542 goto exit;
543 if (!list_empty(&p_ptr->wait_list))
544 goto exit;
545 p_ptr->congested_link = l_ptr;
546 p_ptr->publ.congested = 1;
547 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549 l_ptr->stats.link_congs++;
550exit:
551 port_unlock(p_ptr);
552 }
553 spin_unlock_bh(&port_list_lock);
554 return -ELINKCONG;
555}
556
557void link_wakeup_ports(struct link *l_ptr, int all)
558{
559 struct port *p_ptr;
560 struct port *temp_p_ptr;
561 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
562
563 if (all)
564 win = 100000;
565 if (win <= 0)
566 return;
567 if (!spin_trylock_bh(&port_list_lock))
568 return;
569 if (link_congested(l_ptr))
570 goto exit;
571 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
572 wait_list) {
573 if (win <= 0)
574 break;
575 list_del_init(&p_ptr->wait_list);
576 p_ptr->congested_link = 0;
577 assert(p_ptr->wakeup);
578 spin_lock_bh(p_ptr->publ.lock);
579 p_ptr->publ.congested = 0;
580 p_ptr->wakeup(&p_ptr->publ);
581 win -= p_ptr->waiting_pkts;
582 spin_unlock_bh(p_ptr->publ.lock);
583 }
584
585exit:
586 spin_unlock_bh(&port_list_lock);
587}
588
589/**
590 * link_release_outqueue - purge link's outbound message queue
591 * @l_ptr: pointer to link
592 */
593
594static void link_release_outqueue(struct link *l_ptr)
595{
596 struct sk_buff *buf = l_ptr->first_out;
597 struct sk_buff *next;
598
599 while (buf) {
600 next = buf->next;
601 buf_discard(buf);
602 buf = next;
603 }
604 l_ptr->first_out = NULL;
605 l_ptr->out_queue_size = 0;
606}
607
608/**
609 * link_reset_fragments - purge link's inbound message fragments queue
610 * @l_ptr: pointer to link
611 */
612
613void link_reset_fragments(struct link *l_ptr)
614{
615 struct sk_buff *buf = l_ptr->defragm_buf;
616 struct sk_buff *next;
617
618 while (buf) {
619 next = buf->next;
620 buf_discard(buf);
621 buf = next;
622 }
623 l_ptr->defragm_buf = NULL;
624}
625
626/**
627 * link_stop - purge all inbound and outbound messages associated with link
628 * @l_ptr: pointer to link
629 */
630
631void link_stop(struct link *l_ptr)
632{
633 struct sk_buff *buf;
634 struct sk_buff *next;
635
636 buf = l_ptr->oldest_deferred_in;
637 while (buf) {
638 next = buf->next;
639 buf_discard(buf);
640 buf = next;
641 }
642
643 buf = l_ptr->first_out;
644 while (buf) {
645 next = buf->next;
646 buf_discard(buf);
647 buf = next;
648 }
649
650 link_reset_fragments(l_ptr);
651
652 buf_discard(l_ptr->proto_msg_queue);
653 l_ptr->proto_msg_queue = NULL;
654}
655
656#if 0
657
658/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
659
660static void link_recv_event(struct link_event *ev)
661{
662 ev->fcn(ev->addr, ev->name, ev->up);
663 kfree(ev);
664}
665
666static void link_send_event(void (*fcn)(u32 a, char *n, int up),
667 struct link *l_ptr, int up)
668{
669 struct link_event *ev;
670
671 ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
672 if (!ev) {
673 warn("Link event allocation failure\n");
674 return;
675 }
676 ev->addr = l_ptr->addr;
677 ev->up = up;
678 ev->fcn = fcn;
679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680 k_signal((Handler)link_recv_event, (unsigned long)ev);
681}
682
683#else
684
685#define link_send_event(fcn, l_ptr, up) do { } while (0)
686
687#endif
688
689void link_reset(struct link *l_ptr)
690{
691 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state;
693 u32 checkpoint = l_ptr->next_in_no;
694
695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
696
697 /* Link is down, accept any session: */
698 l_ptr->peer_session = 0;
699
700 /* Prepare for max packet size negotiation */
701 link_init_max_pkt(l_ptr);
702
703 l_ptr->state = RESET_UNKNOWN;
704 dbg_link_state("Resetting Link\n");
705
706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
707 return;
708
709 node_link_down(l_ptr->owner, l_ptr);
710 bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
711#if 0
712 tipc_printf(CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump();
714#endif
715 if (node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER;
719 }
720
721 /* Clean up all queues: */
722
723 link_release_outqueue(l_ptr);
724 buf_discard(l_ptr->proto_msg_queue);
725 l_ptr->proto_msg_queue = NULL;
726 buf = l_ptr->oldest_deferred_in;
727 while (buf) {
728 struct sk_buff *next = buf->next;
729 buf_discard(buf);
730 buf = next;
731 }
732 if (!list_empty(&l_ptr->waiting_ports))
733 link_wakeup_ports(l_ptr, 1);
734
735 l_ptr->retransm_queue_head = 0;
736 l_ptr->retransm_queue_size = 0;
737 l_ptr->last_out = NULL;
738 l_ptr->first_out = NULL;
739 l_ptr->next_out = NULL;
740 l_ptr->unacked_window = 0;
741 l_ptr->checkpoint = 1;
742 l_ptr->next_out_no = 1;
743 l_ptr->deferred_inqueue_sz = 0;
744 l_ptr->oldest_deferred_in = NULL;
745 l_ptr->newest_deferred_in = NULL;
746 l_ptr->fsm_msg_cnt = 0;
747 l_ptr->stale_count = 0;
748 link_reset_statistics(l_ptr);
749
750 link_send_event(cfg_link_event, l_ptr, 0);
751 if (!in_own_cluster(l_ptr->addr))
752 link_send_event(disc_link_event, l_ptr, 0);
753}
754
755
756static void link_activate(struct link *l_ptr)
757{
758 l_ptr->next_in_no = 1;
759 node_link_up(l_ptr->owner, l_ptr);
760 bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(cfg_link_event, l_ptr, 1);
762 if (!in_own_cluster(l_ptr->addr))
763 link_send_event(disc_link_event, l_ptr, 1);
764}
765
766/**
767 * link_state_event - link finite state machine
768 * @l_ptr: pointer to link
769 * @event: state machine event to process
770 */
771
772static void link_state_event(struct link *l_ptr, unsigned event)
773{
774 struct link *other;
775 u32 cont_intv = l_ptr->continuity_interval;
776
777 if (!l_ptr->started && (event != STARTING_EVT))
778 return; /* Not yet. */
779
780 if (link_blocked(l_ptr)) {
781 if (event == TIMEOUT_EVT) {
782 link_set_timer(l_ptr, cont_intv);
783 }
784 return; /* Changeover going on */
785 }
786 dbg_link("STATE_EV: <%s> ", l_ptr->name);
787
788 switch (l_ptr->state) {
789 case WORKING_WORKING:
790 dbg_link("WW/");
791 switch (event) {
792 case TRAFFIC_MSG_EVT:
793 dbg_link("TRF-");
794 /* fall through */
795 case ACTIVATE_MSG:
796 dbg_link("ACT\n");
797 break;
798 case TIMEOUT_EVT:
799 dbg_link("TIM ");
800 if (l_ptr->next_in_no != l_ptr->checkpoint) {
801 l_ptr->checkpoint = l_ptr->next_in_no;
802 if (bclink_acks_missing(l_ptr->owner)) {
803 link_send_proto_msg(l_ptr, STATE_MSG,
804 0, 0, 0, 0, 0);
805 l_ptr->fsm_msg_cnt++;
806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807 link_send_proto_msg(l_ptr, STATE_MSG,
808 1, 0, 0, 0, 0);
809 l_ptr->fsm_msg_cnt++;
810 }
811 link_set_timer(l_ptr, cont_intv);
812 break;
813 }
814 dbg_link(" -> WU\n");
815 l_ptr->state = WORKING_UNKNOWN;
816 l_ptr->fsm_msg_cnt = 0;
817 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818 l_ptr->fsm_msg_cnt++;
819 link_set_timer(l_ptr, cont_intv / 4);
820 break;
821 case RESET_MSG:
822 dbg_link("RES -> RR\n");
823 link_reset(l_ptr);
824 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0;
826 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv);
829 break;
830 default:
831 err("Unknown link event %u in WW state\n", event);
832 }
833 break;
834 case WORKING_UNKNOWN:
835 dbg_link("WU/");
836 switch (event) {
837 case TRAFFIC_MSG_EVT:
838 dbg_link("TRF-");
839 case ACTIVATE_MSG:
840 dbg_link("ACT -> WW\n");
841 l_ptr->state = WORKING_WORKING;
842 l_ptr->fsm_msg_cnt = 0;
843 link_set_timer(l_ptr, cont_intv);
844 break;
845 case RESET_MSG:
846 dbg_link("RES -> RR\n");
847 link_reset(l_ptr);
848 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0;
850 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851 l_ptr->fsm_msg_cnt++;
852 link_set_timer(l_ptr, cont_intv);
853 break;
854 case TIMEOUT_EVT:
855 dbg_link("TIM ");
856 if (l_ptr->next_in_no != l_ptr->checkpoint) {
857 dbg_link("-> WW \n");
858 l_ptr->state = WORKING_WORKING;
859 l_ptr->fsm_msg_cnt = 0;
860 l_ptr->checkpoint = l_ptr->next_in_no;
861 if (bclink_acks_missing(l_ptr->owner)) {
862 link_send_proto_msg(l_ptr, STATE_MSG,
863 0, 0, 0, 0, 0);
864 l_ptr->fsm_msg_cnt++;
865 }
866 link_set_timer(l_ptr, cont_intv);
867 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
868 dbg_link("Probing %u/%u,timer = %u ms)\n",
869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
870 cont_intv / 4);
871 link_send_proto_msg(l_ptr, STATE_MSG,
872 1, 0, 0, 0, 0);
873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv / 4);
875 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt);
878 link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0;
881 link_send_proto_msg(l_ptr, RESET_MSG,
882 0, 0, 0, 0, 0);
883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv);
885 }
886 break;
887 default:
888 err("Unknown link event %u in WU state\n", event);
889 }
890 break;
891 case RESET_UNKNOWN:
892 dbg_link("RU/");
893 switch (event) {
894 case TRAFFIC_MSG_EVT:
895 dbg_link("TRF-\n");
896 break;
897 case ACTIVATE_MSG:
898 other = l_ptr->owner->active_links[0];
899 if (other && link_working_unknown(other)) {
900 dbg_link("ACT\n");
901 break;
902 }
903 dbg_link("ACT -> WW\n");
904 l_ptr->state = WORKING_WORKING;
905 l_ptr->fsm_msg_cnt = 0;
906 link_activate(l_ptr);
907 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908 l_ptr->fsm_msg_cnt++;
909 link_set_timer(l_ptr, cont_intv);
910 break;
911 case RESET_MSG:
912 dbg_link("RES \n");
913 dbg_link(" -> RR\n");
914 l_ptr->state = RESET_RESET;
915 l_ptr->fsm_msg_cnt = 0;
916 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917 l_ptr->fsm_msg_cnt++;
918 link_set_timer(l_ptr, cont_intv);
919 break;
920 case STARTING_EVT:
921 dbg_link("START-");
922 l_ptr->started = 1;
923 /* fall through */
924 case TIMEOUT_EVT:
925 dbg_link("TIM \n");
926 link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927 l_ptr->fsm_msg_cnt++;
928 link_set_timer(l_ptr, cont_intv);
929 break;
930 default:
931 err("Unknown link event %u in RU state\n", event);
932 }
933 break;
934 case RESET_RESET:
935 dbg_link("RR/ ");
936 switch (event) {
937 case TRAFFIC_MSG_EVT:
938 dbg_link("TRF-");
939 /* fall through */
940 case ACTIVATE_MSG:
941 other = l_ptr->owner->active_links[0];
942 if (other && link_working_unknown(other)) {
943 dbg_link("ACT\n");
944 break;
945 }
946 dbg_link("ACT -> WW\n");
947 l_ptr->state = WORKING_WORKING;
948 l_ptr->fsm_msg_cnt = 0;
949 link_activate(l_ptr);
950 link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951 l_ptr->fsm_msg_cnt++;
952 link_set_timer(l_ptr, cont_intv);
953 break;
954 case RESET_MSG:
955 dbg_link("RES\n");
956 break;
957 case TIMEOUT_EVT:
958 dbg_link("TIM\n");
959 link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960 l_ptr->fsm_msg_cnt++;
961 link_set_timer(l_ptr, cont_intv);
962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
963 break;
964 default:
965 err("Unknown link event %u in RR state\n", event);
966 }
967 break;
968 default:
969 err("Unknown link state %u/%u\n", l_ptr->state, event);
970 }
971}
972
973/*
974 * link_bundle_buf(): Append contents of a buffer to
975 * the tail of an existing one.
976 */
977
978static int link_bundle_buf(struct link *l_ptr,
979 struct sk_buff *bundler,
980 struct sk_buff *buf)
981{
982 struct tipc_msg *bundler_msg = buf_msg(bundler);
983 struct tipc_msg *msg = buf_msg(buf);
984 u32 size = msg_size(msg);
985 u32 to_pos = align(msg_size(bundler_msg));
986 u32 rest = link_max_pkt(l_ptr) - to_pos;
987
988 if (msg_user(bundler_msg) != MSG_BUNDLER)
989 return 0;
990 if (msg_type(bundler_msg) != OPEN_MSG)
991 return 0;
992 if (rest < align(size))
993 return 0;
994
995 skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
996 memcpy(bundler->data + to_pos, buf->data, size);
997 msg_set_size(bundler_msg, to_pos + size);
998 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
999 dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
1000 msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
1001 msg_dbg(msg, "PACKD:");
1002 buf_discard(buf);
1003 l_ptr->stats.sent_bundled++;
1004 return 1;
1005}
1006
1007static inline void link_add_to_outqueue(struct link *l_ptr,
1008 struct sk_buff *buf,
1009 struct tipc_msg *msg)
1010{
1011 u32 ack = mod(l_ptr->next_in_no - 1);
1012 u32 seqno = mod(l_ptr->next_out_no++);
1013
1014 msg_set_word(msg, 2, ((ack << 16) | seqno));
1015 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1016 buf->next = NULL;
1017 if (l_ptr->first_out) {
1018 l_ptr->last_out->next = buf;
1019 l_ptr->last_out = buf;
1020 } else
1021 l_ptr->first_out = l_ptr->last_out = buf;
1022 l_ptr->out_queue_size++;
1023}
1024
1025/*
1026 * link_send_buf() is the 'full path' for messages, called from
1027 * inside TIPC when the 'fast path' in tipc_send_buf
1028 * has failed, and from link_send()
1029 */
1030
1031int link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1032{
1033 struct tipc_msg *msg = buf_msg(buf);
1034 u32 size = msg_size(msg);
1035 u32 dsz = msg_data_sz(msg);
1036 u32 queue_size = l_ptr->out_queue_size;
1037 u32 imp = msg_tot_importance(msg);
1038 u32 queue_limit = l_ptr->queue_limit[imp];
1039 u32 max_packet = link_max_pkt(l_ptr);
1040
1041 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
1042
1043 /* Match msg importance against queue limits: */
1044
1045 if (unlikely(queue_size >= queue_limit)) {
1046 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
1047 return link_schedule_port(l_ptr, msg_origport(msg),
1048 size);
1049 }
1050 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1051 buf_discard(buf);
1052 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name);
1054 link_reset(l_ptr);
1055 }
1056 return dsz;
1057 }
1058
1059 /* Fragmentation needed ? */
1060
1061 if (size > max_packet)
1062 return link_send_long_buf(l_ptr, buf);
1063
1064 /* Packet can be queued or sent: */
1065
1066 if (queue_size > l_ptr->stats.max_queue_sz)
1067 l_ptr->stats.max_queue_sz = queue_size;
1068
1069 if (likely(!bearer_congested(l_ptr->b_ptr, l_ptr) &&
1070 !link_congested(l_ptr))) {
1071 link_add_to_outqueue(l_ptr, buf, msg);
1072
1073 if (likely(bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074 l_ptr->unacked_window = 0;
1075 } else {
1076 bearer_schedule(l_ptr->b_ptr, l_ptr);
1077 l_ptr->stats.bearer_congs++;
1078 l_ptr->next_out = buf;
1079 }
1080 return dsz;
1081 }
1082 /* Congestion: can message be bundled ?: */
1083
1084 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
1085 (msg_user(msg) != MSG_FRAGMENTER)) {
1086
1087 /* Try adding message to an existing bundle */
1088
1089 if (l_ptr->next_out &&
1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1092 return dsz;
1093 }
1094
1095 /* Try creating a new bundle */
1096
1097 if (size <= max_packet * 2 / 3) {
1098 struct sk_buff *bundler = buf_acquire(max_packet);
1099 struct tipc_msg bundler_hdr;
1100
1101 if (bundler) {
1102 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1103 TIPC_OK, INT_H_SIZE, l_ptr->addr);
1104 memcpy(bundler->data, (unchar *)&bundler_hdr,
1105 INT_H_SIZE);
1106 skb_trim(bundler, INT_H_SIZE);
1107 link_bundle_buf(l_ptr, bundler, buf);
1108 buf = bundler;
1109 msg = buf_msg(buf);
1110 l_ptr->stats.sent_bundles++;
1111 }
1112 }
1113 }
1114 if (!l_ptr->next_out)
1115 l_ptr->next_out = buf;
1116 link_add_to_outqueue(l_ptr, buf, msg);
1117 bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1118 return dsz;
1119}
1120
1121/*
1122 * link_send(): same as link_send_buf(), but the link to use has
1123 * not been selected yet, and the the owner node is not locked
1124 * Called by TIPC internal users, e.g. the name distributor
1125 */
1126
1127int link_send(struct sk_buff *buf, u32 dest, u32 selector)
1128{
1129 struct link *l_ptr;
1130 struct node *n_ptr;
1131 int res = -ELINKCONG;
1132
1133 read_lock_bh(&net_lock);
1134 n_ptr = node_select(dest, selector);
1135 if (n_ptr) {
1136 node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) {
1140 res = link_send_buf(l_ptr, buf);
1141 }
1142 node_unlock(n_ptr);
1143 } else {
1144 dbg("Attempt to send msg to unknown node:\n");
1145 msg_dbg(buf_msg(buf),">>>");
1146 buf_discard(buf);
1147 }
1148 read_unlock_bh(&net_lock);
1149 return res;
1150}
1151
1152/*
1153 * link_send_buf_fast: Entry for data messages where the
1154 * destination link is known and the header is complete,
1155 * inclusive total message length. Very time critical.
1156 * Link is locked. Returns user data length.
1157 */
1158
1159static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1160 u32 *used_max_pkt)
1161{
1162 struct tipc_msg *msg = buf_msg(buf);
1163 int res = msg_data_sz(msg);
1164
1165 if (likely(!link_congested(l_ptr))) {
1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168 link_add_to_outqueue(l_ptr, buf, msg);
1169 if (likely(bearer_send(l_ptr->b_ptr, buf,
1170 &l_ptr->media_addr))) {
1171 l_ptr->unacked_window = 0;
1172 msg_dbg(msg,"SENT_FAST:");
1173 return res;
1174 }
1175 dbg("failed sent fast...\n");
1176 bearer_schedule(l_ptr->b_ptr, l_ptr);
1177 l_ptr->stats.bearer_congs++;
1178 l_ptr->next_out = buf;
1179 return res;
1180 }
1181 }
1182 else
1183 *used_max_pkt = link_max_pkt(l_ptr);
1184 }
1185 return link_send_buf(l_ptr, buf); /* All other cases */
1186}
1187
1188/*
1189 * tipc_send_buf_fast: Entry for data messages where the
1190 * destination node is known and the header is complete,
1191 * inclusive total message length.
1192 * Returns user data length.
1193 */
1194int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1195{
1196 struct link *l_ptr;
1197 struct node *n_ptr;
1198 int res;
1199 u32 selector = msg_origport(buf_msg(buf)) & 1;
1200 u32 dummy;
1201
1202 if (destnode == tipc_own_addr)
1203 return port_recv_msg(buf);
1204
1205 read_lock_bh(&net_lock);
1206 n_ptr = node_select(destnode, selector);
1207 if (likely(n_ptr)) {
1208 node_lock(n_ptr);
1209 l_ptr = n_ptr->active_links[selector];
1210 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211 buf, l_ptr, destnode);
1212 if (likely(l_ptr)) {
1213 res = link_send_buf_fast(l_ptr, buf, &dummy);
1214 node_unlock(n_ptr);
1215 read_unlock_bh(&net_lock);
1216 return res;
1217 }
1218 node_unlock(n_ptr);
1219 }
1220 read_unlock_bh(&net_lock);
1221 res = msg_data_sz(buf_msg(buf));
1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1223 return res;
1224}
1225
1226
1227/*
1228 * link_send_sections_fast: Entry for messages where the
1229 * destination processor is known and the header is complete,
1230 * except for total message length.
1231 * Returns user data length or errno.
1232 */
1233int link_send_sections_fast(struct port *sender,
1234 struct iovec const *msg_sect,
1235 const u32 num_sect,
1236 u32 destaddr)
1237{
1238 struct tipc_msg *hdr = &sender->publ.phdr;
1239 struct link *l_ptr;
1240 struct sk_buff *buf;
1241 struct node *node;
1242 int res;
1243 u32 selector = msg_origport(hdr) & 1;
1244
1245 assert(destaddr != tipc_own_addr);
1246
1247again:
1248 /*
1249 * Try building message using port's max_pkt hint.
1250 * (Must not hold any locks while building message.)
1251 */
1252
1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254 !sender->user_port, &buf);
1255
1256 read_lock_bh(&net_lock);
1257 node = node_select(destaddr, selector);
1258 if (likely(node)) {
1259 node_lock(node);
1260 l_ptr = node->active_links[selector];
1261 if (likely(l_ptr)) {
1262 if (likely(buf)) {
1263 res = link_send_buf_fast(l_ptr, buf,
1264 &sender->max_pkt);
1265 if (unlikely(res < 0))
1266 buf_discard(buf);
1267exit:
1268 node_unlock(node);
1269 read_unlock_bh(&net_lock);
1270 return res;
1271 }
1272
1273 /* Exit if build request was invalid */
1274
1275 if (unlikely(res < 0))
1276 goto exit;
1277
1278 /* Exit if link (or bearer) is congested */
1279
1280 if (link_congested(l_ptr) ||
1281 !list_empty(&l_ptr->b_ptr->cong_links)) {
1282 res = link_schedule_port(l_ptr,
1283 sender->publ.ref, res);
1284 goto exit;
1285 }
1286
1287 /*
1288 * Message size exceeds max_pkt hint; update hint,
1289 * then re-try fast path or fragment the message
1290 */
1291
1292 sender->max_pkt = link_max_pkt(l_ptr);
1293 node_unlock(node);
1294 read_unlock_bh(&net_lock);
1295
1296
1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1298 goto again;
1299
1300 return link_send_sections_long(sender, msg_sect,
1301 num_sect, destaddr);
1302 }
1303 node_unlock(node);
1304 }
1305 read_unlock_bh(&net_lock);
1306
1307 /* Couldn't find a link to the destination node */
1308
1309 if (buf)
1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1311 if (res >= 0)
1312 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1313 TIPC_ERR_NO_NODE);
1314 return res;
1315}
1316
1317/*
1318 * link_send_sections_long(): Entry for long messages where the
1319 * destination node is known and the header is complete,
1320 * inclusive total message length.
1321 * Link and bearer congestion status have been checked to be ok,
1322 * and are ignored if they change.
1323 *
1324 * Note that fragments do not use the full link MTU so that they won't have
1325 * to undergo refragmentation if link changeover causes them to be sent
1326 * over another link with an additional tunnel header added as prefix.
1327 * (Refragmentation will still occur if the other link has a smaller MTU.)
1328 *
1329 * Returns user data length or errno.
1330 */
1331static int link_send_sections_long(struct port *sender,
1332 struct iovec const *msg_sect,
1333 u32 num_sect,
1334 u32 destaddr)
1335{
1336 struct link *l_ptr;
1337 struct node *node;
1338 struct tipc_msg *hdr = &sender->publ.phdr;
1339 u32 dsz = msg_data_sz(hdr);
1340 u32 max_pkt,fragm_sz,rest;
1341 struct tipc_msg fragm_hdr;
1342 struct sk_buff *buf,*buf_chain,*prev;
1343 u32 fragm_crs,fragm_rest,hsz,sect_rest;
1344 const unchar *sect_crs;
1345 int curr_sect;
1346 u32 fragm_no;
1347
1348again:
1349 fragm_no = 1;
1350 max_pkt = sender->max_pkt - INT_H_SIZE;
1351 /* leave room for tunnel header in case of link changeover */
1352 fragm_sz = max_pkt - INT_H_SIZE;
1353 /* leave room for fragmentation header in each fragment */
1354 rest = dsz;
1355 fragm_crs = 0;
1356 fragm_rest = 0;
1357 sect_rest = 0;
1358 sect_crs = 0;
1359 curr_sect = -1;
1360
1361 /* Prepare reusable fragment header: */
1362
1363 msg_dbg(hdr, ">FRAGMENTING>");
1364 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1365 TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
1366 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1367 msg_set_size(&fragm_hdr, max_pkt);
1368 msg_set_fragm_no(&fragm_hdr, 1);
1369
1370 /* Prepare header of first fragment: */
1371
1372 buf_chain = buf = buf_acquire(max_pkt);
1373 if (!buf)
1374 return -ENOMEM;
1375 buf->next = NULL;
1376 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1377 hsz = msg_hdr_sz(hdr);
1378 memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
1379 msg_dbg(buf_msg(buf), ">BUILD>");
1380
1381 /* Chop up message: */
1382
1383 fragm_crs = INT_H_SIZE + hsz;
1384 fragm_rest = fragm_sz - hsz;
1385
1386 do { /* For all sections */
1387 u32 sz;
1388
1389 if (!sect_rest) {
1390 sect_rest = msg_sect[++curr_sect].iov_len;
1391 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1392 }
1393
1394 if (sect_rest < fragm_rest)
1395 sz = sect_rest;
1396 else
1397 sz = fragm_rest;
1398
1399 if (likely(!sender->user_port)) {
1400 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1401error:
1402 for (; buf_chain; buf_chain = buf) {
1403 buf = buf_chain->next;
1404 buf_discard(buf_chain);
1405 }
1406 return -EFAULT;
1407 }
1408 } else
1409 memcpy(buf->data + fragm_crs, sect_crs, sz);
1410
1411 sect_crs += sz;
1412 sect_rest -= sz;
1413 fragm_crs += sz;
1414 fragm_rest -= sz;
1415 rest -= sz;
1416
1417 if (!fragm_rest && rest) {
1418
1419 /* Initiate new fragment: */
1420 if (rest <= fragm_sz) {
1421 fragm_sz = rest;
1422 msg_set_type(&fragm_hdr,LAST_FRAGMENT);
1423 } else {
1424 msg_set_type(&fragm_hdr, FRAGMENT);
1425 }
1426 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1427 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1428 prev = buf;
1429 buf = buf_acquire(fragm_sz + INT_H_SIZE);
1430 if (!buf)
1431 goto error;
1432
1433 buf->next = NULL;
1434 prev->next = buf;
1435 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1436 fragm_crs = INT_H_SIZE;
1437 fragm_rest = fragm_sz;
1438 msg_dbg(buf_msg(buf)," >BUILD>");
1439 }
1440 }
1441 while (rest > 0);
1442
1443 /*
1444 * Now we have a buffer chain. Select a link and check
1445 * that packet size is still OK
1446 */
1447 node = node_select(destaddr, sender->publ.ref & 1);
1448 if (likely(node)) {
1449 node_lock(node);
1450 l_ptr = node->active_links[sender->publ.ref & 1];
1451 if (!l_ptr) {
1452 node_unlock(node);
1453 goto reject;
1454 }
1455 if (link_max_pkt(l_ptr) < max_pkt) {
1456 sender->max_pkt = link_max_pkt(l_ptr);
1457 node_unlock(node);
1458 for (; buf_chain; buf_chain = buf) {
1459 buf = buf_chain->next;
1460 buf_discard(buf_chain);
1461 }
1462 goto again;
1463 }
1464 } else {
1465reject:
1466 for (; buf_chain; buf_chain = buf) {
1467 buf = buf_chain->next;
1468 buf_discard(buf_chain);
1469 }
1470 return port_reject_sections(sender, hdr, msg_sect, num_sect,
1471 TIPC_ERR_NO_NODE);
1472 }
1473
1474 /* Append whole chain to send queue: */
1475
1476 buf = buf_chain;
1477 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1478 if (!l_ptr->next_out)
1479 l_ptr->next_out = buf_chain;
1480 l_ptr->stats.sent_fragmented++;
1481 while (buf) {
1482 struct sk_buff *next = buf->next;
1483 struct tipc_msg *msg = buf_msg(buf);
1484
1485 l_ptr->stats.sent_fragments++;
1486 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1487 link_add_to_outqueue(l_ptr, buf, msg);
1488 msg_dbg(msg, ">ADD>");
1489 buf = next;
1490 }
1491
1492 /* Send it, if possible: */
1493
1494 link_push_queue(l_ptr);
1495 node_unlock(node);
1496 return dsz;
1497}
1498
1499/*
1500 * link_push_packet: Push one unsent packet to the media
1501 */
1502u32 link_push_packet(struct link *l_ptr)
1503{
1504 struct sk_buff *buf = l_ptr->first_out;
1505 u32 r_q_size = l_ptr->retransm_queue_size;
1506 u32 r_q_head = l_ptr->retransm_queue_head;
1507
1508 /* Step to position where retransmission failed, if any, */
1509 /* consider that buffers may have been released in meantime */
1510
1511 if (r_q_size && buf) {
1512 u32 last = lesser(mod(r_q_head + r_q_size),
1513 link_last_sent(l_ptr));
1514 u32 first = msg_seqno(buf_msg(buf));
1515
1516 while (buf && less(first, r_q_head)) {
1517 first = mod(first + 1);
1518 buf = buf->next;
1519 }
1520 l_ptr->retransm_queue_head = r_q_head = first;
1521 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1522 }
1523
1524 /* Continue retransmission now, if there is anything: */
1525
1526 if (r_q_size && buf && !skb_cloned(buf)) {
1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1529 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531 l_ptr->retransm_queue_head = mod(++r_q_head);
1532 l_ptr->retransm_queue_size = --r_q_size;
1533 l_ptr->stats.retransmitted++;
1534 return TIPC_OK;
1535 } else {
1536 l_ptr->stats.bearer_congs++;
1537 msg_dbg(buf_msg(buf), "|>DEF-RETR>");
1538 return PUSH_FAILED;
1539 }
1540 }
1541
1542 /* Send deferred protocol message, if any: */
1543
1544 buf = l_ptr->proto_msg_queue;
1545 if (buf) {
1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1548 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550 l_ptr->unacked_window = 0;
1551 buf_discard(buf);
1552 l_ptr->proto_msg_queue = 0;
1553 return TIPC_OK;
1554 } else {
1555 msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1556 l_ptr->stats.bearer_congs++;
1557 return PUSH_FAILED;
1558 }
1559 }
1560
1561 /* Send one deferred data message, if send window not full: */
1562
1563 buf = l_ptr->next_out;
1564 if (buf) {
1565 struct tipc_msg *msg = buf_msg(buf);
1566 u32 next = msg_seqno(msg);
1567 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1568
1569 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1572 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573 if (msg_user(msg) == MSG_BUNDLER)
1574 msg_set_type(msg, CLOSED_MSG);
1575 msg_dbg(msg, ">PUSH-DATA>");
1576 l_ptr->next_out = buf->next;
1577 return TIPC_OK;
1578 } else {
1579 msg_dbg(msg, "|PUSH-DATA|");
1580 l_ptr->stats.bearer_congs++;
1581 return PUSH_FAILED;
1582 }
1583 }
1584 }
1585 return PUSH_FINISHED;
1586}
1587
1588/*
1589 * push_queue(): push out the unsent messages of a link where
1590 * congestion has abated. Node is locked
1591 */
1592void link_push_queue(struct link *l_ptr)
1593{
1594 u32 res;
1595
1596 if (bearer_congested(l_ptr->b_ptr, l_ptr))
1597 return;
1598
1599 do {
1600 res = link_push_packet(l_ptr);
1601 }
1602 while (res == TIPC_OK);
1603 if (res == PUSH_FAILED)
1604 bearer_schedule(l_ptr->b_ptr, l_ptr);
1605}
1606
1607void link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits)
1609{
1610 struct tipc_msg *msg;
1611
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613
1614 if (bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616 dbg_print_link(l_ptr, " ");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1618 l_ptr->retransm_queue_size = retransmits;
1619 return;
1620 }
1621 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1622 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) {
1629 msg_print(CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count);
1632 link_print(l_ptr, CONS, "Resetting Link\n");;
1633 link_reset(l_ptr);
1634 break;
1635 }
1636 } else {
1637 l_ptr->stale_count = 0;
1638 }
1639 l_ptr->last_retransmitted = msg_seqno(msg);
1640
1641 msg_dbg(buf_msg(buf), ">RETR>");
1642 buf = buf->next;
1643 retransmits--;
1644 l_ptr->stats.retransmitted++;
1645 } else {
1646 bearer_schedule(l_ptr->b_ptr, l_ptr);
1647 l_ptr->stats.bearer_congs++;
1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649 l_ptr->retransm_queue_size = retransmits;
1650 return;
1651 }
1652 }
1653 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1654}
1655
1656/*
1657 * link_recv_non_seq: Receive packets which are outside
1658 * the link sequence flow
1659 */
1660
1661static void link_recv_non_seq(struct sk_buff *buf)
1662{
1663 struct tipc_msg *msg = buf_msg(buf);
1664
1665 if (msg_user(msg) == LINK_CONFIG)
1666 disc_recv_msg(buf);
1667 else
1668 bclink_recv_pkt(buf);
1669}
1670
1671/**
1672 * link_insert_deferred_queue - insert deferred messages back into receive chain
1673 */
1674
1675static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1676 struct sk_buff *buf)
1677{
1678 u32 seq_no;
1679
1680 if (l_ptr->oldest_deferred_in == NULL)
1681 return buf;
1682
1683 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1684 if (seq_no == mod(l_ptr->next_in_no)) {
1685 l_ptr->newest_deferred_in->next = buf;
1686 buf = l_ptr->oldest_deferred_in;
1687 l_ptr->oldest_deferred_in = NULL;
1688 l_ptr->deferred_inqueue_sz = 0;
1689 }
1690 return buf;
1691}
1692
1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1694{
1695 read_lock_bh(&net_lock);
1696 while (head) {
1697 struct bearer *b_ptr;
1698 struct node *n_ptr;
1699 struct link *l_ptr;
1700 struct sk_buff *crs;
1701 struct sk_buff *buf = head;
1702 struct tipc_msg *msg = buf_msg(buf);
1703 u32 seq_no = msg_seqno(msg);
1704 u32 ackd = msg_ack(msg);
1705 u32 released = 0;
1706 int type;
1707
1708 b_ptr = (struct bearer *)tb_ptr;
1709 TIPC_SKB_CB(buf)->handle = b_ptr;
1710
1711 head = head->next;
1712 if (unlikely(msg_version(msg) != TIPC_VERSION))
1713 goto cont;
1714#if 0
1715 if (msg_user(msg) != LINK_PROTOCOL)
1716#endif
1717 msg_dbg(msg,"<REC<");
1718
1719 if (unlikely(msg_non_seq(msg))) {
1720 link_recv_non_seq(buf);
1721 continue;
1722 }
1723 n_ptr = node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr))
1725 goto cont;
1726
1727 node_lock(n_ptr);
1728 l_ptr = n_ptr->links[b_ptr->identity];
1729 if (unlikely(!l_ptr)) {
1730 node_unlock(n_ptr);
1731 goto cont;
1732 }
1733 /*
1734 * Release acked messages
1735 */
1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737 if (node_is_up(n_ptr) && n_ptr->bclink.supported)
1738 bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739 }
1740
1741 crs = l_ptr->first_out;
1742 while ((crs != l_ptr->next_out) &&
1743 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1744 struct sk_buff *next = crs->next;
1745
1746 buf_discard(crs);
1747 crs = next;
1748 released++;
1749 }
1750 if (released) {
1751 l_ptr->first_out = crs;
1752 l_ptr->out_queue_size -= released;
1753 }
1754 if (unlikely(l_ptr->next_out))
1755 link_push_queue(l_ptr);
1756 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757 link_wakeup_ports(l_ptr, 0);
1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759 l_ptr->stats.sent_acks++;
1760 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1761 }
1762
1763protocol_check:
1764 if (likely(link_working_working(l_ptr))) {
1765 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1766 l_ptr->next_in_no++;
1767 if (unlikely(l_ptr->oldest_deferred_in))
1768 head = link_insert_deferred_queue(l_ptr,
1769 head);
1770 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1771deliver:
1772 if (likely(msg_isdata(msg))) {
1773 node_unlock(n_ptr);
1774 port_recv_msg(buf);
1775 continue;
1776 }
1777 switch (msg_user(msg)) {
1778 case MSG_BUNDLER:
1779 l_ptr->stats.recv_bundles++;
1780 l_ptr->stats.recv_bundled +=
1781 msg_msgcnt(msg);
1782 node_unlock(n_ptr);
1783 link_recv_bundle(buf);
1784 continue;
1785 case ROUTE_DISTRIBUTOR:
1786 node_unlock(n_ptr);
1787 cluster_recv_routing_table(buf);
1788 continue;
1789 case NAME_DISTRIBUTOR:
1790 node_unlock(n_ptr);
1791 named_recv(buf);
1792 continue;
1793 case CONN_MANAGER:
1794 node_unlock(n_ptr);
1795 port_recv_proto_msg(buf);
1796 continue;
1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++;
1799 if (link_recv_fragment(
1800 &l_ptr->defragm_buf,
1801 &buf, &msg)) {
1802 l_ptr->stats.recv_fragmented++;
1803 goto deliver;
1804 }
1805 break;
1806 case CHANGEOVER_PROTOCOL:
1807 type = msg_type(msg);
1808 if (link_recv_changeover_msg(
1809 &l_ptr, &buf)) {
1810 msg = buf_msg(buf);
1811 seq_no = msg_seqno(msg);
1812 TIPC_SKB_CB(buf)->handle
1813 = b_ptr;
1814 if (type == ORIGINAL_MSG)
1815 goto deliver;
1816 goto protocol_check;
1817 }
1818 break;
1819 }
1820 }
1821 node_unlock(n_ptr);
1822 net_route_msg(buf);
1823 continue;
1824 }
1825 link_handle_out_of_seq_msg(l_ptr, buf);
1826 head = link_insert_deferred_queue(l_ptr, head);
1827 node_unlock(n_ptr);
1828 continue;
1829 }
1830
1831 if (msg_user(msg) == LINK_PROTOCOL) {
1832 link_recv_proto_msg(l_ptr, buf);
1833 head = link_insert_deferred_queue(l_ptr, head);
1834 node_unlock(n_ptr);
1835 continue;
1836 }
1837 msg_dbg(msg,"NSEQ<REC<");
1838 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1839
1840 if (link_working_working(l_ptr)) {
1841 /* Re-insert in front of queue */
1842 msg_dbg(msg,"RECV-REINS:");
1843 buf->next = head;
1844 head = buf;
1845 node_unlock(n_ptr);
1846 continue;
1847 }
1848 node_unlock(n_ptr);
1849cont:
1850 buf_discard(buf);
1851 }
1852 read_unlock_bh(&net_lock);
1853}
1854
1855/*
1856 * link_defer_buf(): Sort a received out-of-sequence packet
1857 * into the deferred reception queue.
1858 * Returns the increase of the queue length,i.e. 0 or 1
1859 */
1860
1861u32 link_defer_pkt(struct sk_buff **head,
1862 struct sk_buff **tail,
1863 struct sk_buff *buf)
1864{
1865 struct sk_buff *prev = 0;
1866 struct sk_buff *crs = *head;
1867 u32 seq_no = msg_seqno(buf_msg(buf));
1868
1869 buf->next = NULL;
1870
1871 /* Empty queue ? */
1872 if (*head == NULL) {
1873 *head = *tail = buf;
1874 return 1;
1875 }
1876
1877 /* Last ? */
1878 if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1879 (*tail)->next = buf;
1880 *tail = buf;
1881 return 1;
1882 }
1883
1884 /* Scan through queue and sort it in */
1885 do {
1886 struct tipc_msg *msg = buf_msg(crs);
1887
1888 if (less(seq_no, msg_seqno(msg))) {
1889 buf->next = crs;
1890 if (prev)
1891 prev->next = buf;
1892 else
1893 *head = buf;
1894 return 1;
1895 }
1896 if (seq_no == msg_seqno(msg)) {
1897 break;
1898 }
1899 prev = crs;
1900 crs = crs->next;
1901 }
1902 while (crs);
1903
1904 /* Message is a duplicate of an existing message */
1905
1906 buf_discard(buf);
1907 return 0;
1908}
1909
1910/**
1911 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1912 */
1913
1914static void link_handle_out_of_seq_msg(struct link *l_ptr,
1915 struct sk_buff *buf)
1916{
1917 u32 seq_no = msg_seqno(buf_msg(buf));
1918
1919 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1920 link_recv_proto_msg(l_ptr, buf);
1921 return;
1922 }
1923
1924 dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
1925 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1926
1927 /* Record OOS packet arrival (force mismatch on next timeout) */
1928
1929 l_ptr->checkpoint--;
1930
1931 /*
1932 * Discard packet if a duplicate; otherwise add it to deferred queue
1933 * and notify peer of gap as per protocol specification
1934 */
1935
1936 if (less(seq_no, mod(l_ptr->next_in_no))) {
1937 l_ptr->stats.duplicates++;
1938 buf_discard(buf);
1939 return;
1940 }
1941
1942 if (link_defer_pkt(&l_ptr->oldest_deferred_in,
1943 &l_ptr->newest_deferred_in, buf)) {
1944 l_ptr->deferred_inqueue_sz++;
1945 l_ptr->stats.deferred_recv++;
1946 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1947 link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1948 } else
1949 l_ptr->stats.duplicates++;
1950}
1951
1952/*
1953 * Send protocol message to the other endpoint.
1954 */
1955void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1956 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1957{
1958 struct sk_buff *buf = 0;
1959 struct tipc_msg *msg = l_ptr->pmsg;
1960 u32 msg_size = sizeof(l_ptr->proto_msg);
1961
1962 if (link_blocked(l_ptr))
1963 return;
1964 msg_set_type(msg, msg_typ);
1965 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1966 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1967 msg_set_last_bcast(msg, bclink_get_last_sent());
1968
1969 if (msg_typ == STATE_MSG) {
1970 u32 next_sent = mod(l_ptr->next_out_no);
1971
1972 if (!link_is_up(l_ptr))
1973 return;
1974 if (l_ptr->next_out)
1975 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1976 msg_set_next_sent(msg, next_sent);
1977 if (l_ptr->oldest_deferred_in) {
1978 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1979 gap = mod(rec - mod(l_ptr->next_in_no));
1980 }
1981 msg_set_seq_gap(msg, gap);
1982 if (gap)
1983 l_ptr->stats.sent_nacks++;
1984 msg_set_link_tolerance(msg, tolerance);
1985 msg_set_linkprio(msg, priority);
1986 msg_set_max_pkt(msg, ack_mtu);
1987 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1988 msg_set_probe(msg, probe_msg != 0);
1989 if (probe_msg) {
1990 u32 mtu = l_ptr->max_pkt;
1991
1992 if ((mtu < l_ptr->max_pkt_target) &&
1993 link_working_working(l_ptr) &&
1994 l_ptr->fsm_msg_cnt) {
1995 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1996 if (l_ptr->max_pkt_probes == 10) {
1997 l_ptr->max_pkt_target = (msg_size - 4);
1998 l_ptr->max_pkt_probes = 0;
1999 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
2000 }
2001 l_ptr->max_pkt_probes++;
2002 }
2003
2004 l_ptr->stats.sent_probes++;
2005 }
2006 l_ptr->stats.sent_states++;
2007 } else { /* RESET_MSG or ACTIVATE_MSG */
2008 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2009 msg_set_seq_gap(msg, 0);
2010 msg_set_next_sent(msg, 1);
2011 msg_set_link_tolerance(msg, l_ptr->tolerance);
2012 msg_set_linkprio(msg, l_ptr->priority);
2013 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2014 }
2015
2016 if (node_has_redundant_links(l_ptr->owner)) {
2017 msg_set_redundant_link(msg);
2018 } else {
2019 msg_clear_redundant_link(msg);
2020 }
2021 msg_set_linkprio(msg, l_ptr->priority);
2022
2023 /* Ensure sequence number will not fit : */
2024
2025 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2026
2027 /* Congestion? */
2028
2029 if (bearer_congested(l_ptr->b_ptr, l_ptr)) {
2030 if (!l_ptr->proto_msg_queue) {
2031 l_ptr->proto_msg_queue =
2032 buf_acquire(sizeof(l_ptr->proto_msg));
2033 }
2034 buf = l_ptr->proto_msg_queue;
2035 if (!buf)
2036 return;
2037 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2038 return;
2039 }
2040 msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
2041
2042 /* Message can be sent */
2043
2044 msg_dbg(msg, ">>");
2045
2046 buf = buf_acquire(msg_size);
2047 if (!buf)
2048 return;
2049
2050 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2051 msg_set_size(buf_msg(buf), msg_size);
2052
2053 if (bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2054 l_ptr->unacked_window = 0;
2055 buf_discard(buf);
2056 return;
2057 }
2058
2059 /* New congestion */
2060 bearer_schedule(l_ptr->b_ptr, l_ptr);
2061 l_ptr->proto_msg_queue = buf;
2062 l_ptr->stats.bearer_congs++;
2063}
2064
2065/*
2066 * Receive protocol message :
2067 * Note that network plane id propagates through the network, and may
2068 * change at any time. The node with lowest address rules
2069 */
2070
2071static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2072{
2073 u32 rec_gap = 0;
2074 u32 max_pkt_info;
2075 u32 max_pkt_ack;
2076 u32 msg_tol;
2077 struct tipc_msg *msg = buf_msg(buf);
2078
2079 dbg("AT(%u):", jiffies_to_msecs(jiffies));
2080 msg_dbg(msg, "<<");
2081 if (link_blocked(l_ptr))
2082 goto exit;
2083
2084 /* record unnumbered packet arrival (force mismatch on next timeout) */
2085
2086 l_ptr->checkpoint--;
2087
2088 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2089 if (tipc_own_addr > msg_prevnode(msg))
2090 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2091
2092 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2093
2094 switch (msg_type(msg)) {
2095
2096 case RESET_MSG:
2097 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2098 if (msg_session(msg) == l_ptr->peer_session) {
2099 dbg("Duplicate RESET: %u<->%u\n",
2100 msg_session(msg), l_ptr->peer_session);
2101 break; /* duplicate: ignore */
2102 }
2103 }
2104 /* fall thru' */
2105 case ACTIVATE_MSG:
2106 /* Update link settings according other endpoint's values */
2107
2108 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2109
2110 if ((msg_tol = msg_link_tolerance(msg)) &&
2111 (msg_tol > l_ptr->tolerance))
2112 link_set_supervision_props(l_ptr, msg_tol);
2113
2114 if (msg_linkprio(msg) > l_ptr->priority)
2115 l_ptr->priority = msg_linkprio(msg);
2116
2117 max_pkt_info = msg_max_pkt(msg);
2118 if (max_pkt_info) {
2119 if (max_pkt_info < l_ptr->max_pkt_target)
2120 l_ptr->max_pkt_target = max_pkt_info;
2121 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2122 l_ptr->max_pkt = l_ptr->max_pkt_target;
2123 } else {
2124 l_ptr->max_pkt = l_ptr->max_pkt_target;
2125 }
2126 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2127
2128 link_state_event(l_ptr, msg_type(msg));
2129
2130 l_ptr->peer_session = msg_session(msg);
2131 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2132
2133 /* Synchronize broadcast sequence numbers */
2134 if (!node_has_redundant_links(l_ptr->owner)) {
2135 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2136 }
2137 break;
2138 case STATE_MSG:
2139
2140 if ((msg_tol = msg_link_tolerance(msg)))
2141 link_set_supervision_props(l_ptr, msg_tol);
2142
2143 if (msg_linkprio(msg) &&
2144 (msg_linkprio(msg) != l_ptr->priority)) {
2145 warn("Changing prio <%s>: %u->%u\n",
2146 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2147 l_ptr->priority = msg_linkprio(msg);
2148 link_reset(l_ptr); /* Enforce change to take effect */
2149 break;
2150 }
2151 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2152 l_ptr->stats.recv_states++;
2153 if (link_reset_unknown(l_ptr))
2154 break;
2155
2156 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2157 rec_gap = mod(msg_next_sent(msg) -
2158 mod(l_ptr->next_in_no));
2159 }
2160
2161 max_pkt_ack = msg_max_pkt(msg);
2162 if (max_pkt_ack > l_ptr->max_pkt) {
2163 dbg("Link <%s> updated MTU %u -> %u\n",
2164 l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2165 l_ptr->max_pkt = max_pkt_ack;
2166 l_ptr->max_pkt_probes = 0;
2167 }
2168
2169 max_pkt_ack = 0;
2170 if (msg_probe(msg)) {
2171 l_ptr->stats.recv_probes++;
2172 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2173 max_pkt_ack = msg_size(msg);
2174 }
2175 }
2176
2177 /* Protocol message before retransmits, reduce loss risk */
2178
2179 bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2180
2181 if (rec_gap || (msg_probe(msg))) {
2182 link_send_proto_msg(l_ptr, STATE_MSG,
2183 0, rec_gap, 0, 0, max_pkt_ack);
2184 }
2185 if (msg_seq_gap(msg)) {
2186 msg_dbg(msg, "With Gap:");
2187 l_ptr->stats.recv_nacks++;
2188 link_retransmit(l_ptr, l_ptr->first_out,
2189 msg_seq_gap(msg));
2190 }
2191 break;
2192 default:
2193 msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
2194 }
2195exit:
2196 buf_discard(buf);
2197}
2198
2199
2200/*
2201 * link_tunnel(): Send one message via a link belonging to
2202 * another bearer. Owner node is locked.
2203 */
2204void link_tunnel(struct link *l_ptr,
2205 struct tipc_msg *tunnel_hdr,
2206 struct tipc_msg *msg,
2207 u32 selector)
2208{
2209 struct link *tunnel;
2210 struct sk_buff *buf;
2211 u32 length = msg_size(msg);
2212
2213 tunnel = l_ptr->owner->active_links[selector & 1];
2214 if (!link_is_up(tunnel))
2215 return;
2216 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2217 buf = buf_acquire(length + INT_H_SIZE);
2218 if (!buf)
2219 return;
2220 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2221 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2222 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2223 msg_dbg(buf_msg(buf), ">SEND>");
2224 assert(tunnel);
2225 link_send_buf(tunnel, buf);
2226}
2227
2228
2229
2230/*
2231 * changeover(): Send whole message queue via the remaining link
2232 * Owner node is locked.
2233 */
2234
2235void link_changeover(struct link *l_ptr)
2236{
2237 u32 msgcount = l_ptr->out_queue_size;
2238 struct sk_buff *crs = l_ptr->first_out;
2239 struct link *tunnel = l_ptr->owner->active_links[0];
2240 int split_bundles = node_has_redundant_links(l_ptr->owner);
2241 struct tipc_msg tunnel_hdr;
2242
2243 if (!tunnel)
2244 return;
2245
2246 if (!l_ptr->owner->permit_changeover)
2247 return;
2248
2249 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2250 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2251 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2252 msg_set_msgcnt(&tunnel_hdr, msgcount);
2253 if (!l_ptr->first_out) {
2254 struct sk_buff *buf;
2255
2256 assert(!msgcount);
2257 buf = buf_acquire(INT_H_SIZE);
2258 if (buf) {
2259 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2260 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2261 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2262 tunnel->b_ptr->net_plane);
2263 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2264 link_send_buf(tunnel, buf);
2265 } else {
2266 warn("Memory squeeze; link changeover failed\n");
2267 }
2268 return;
2269 }
2270 while (crs) {
2271 struct tipc_msg *msg = buf_msg(crs);
2272
2273 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2274 u32 msgcount = msg_msgcnt(msg);
2275 struct tipc_msg *m = msg_get_wrapped(msg);
2276 unchar* pos = (unchar*)m;
2277
2278 while (msgcount--) {
2279 msg_set_seqno(m,msg_seqno(msg));
2280 link_tunnel(l_ptr, &tunnel_hdr, m,
2281 msg_link_selector(m));
2282 pos += align(msg_size(m));
2283 m = (struct tipc_msg *)pos;
2284 }
2285 } else {
2286 link_tunnel(l_ptr, &tunnel_hdr, msg,
2287 msg_link_selector(msg));
2288 }
2289 crs = crs->next;
2290 }
2291}
2292
2293void link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2294{
2295 struct sk_buff *iter;
2296 struct tipc_msg tunnel_hdr;
2297
2298 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2299 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2300 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2301 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2302 iter = l_ptr->first_out;
2303 while (iter) {
2304 struct sk_buff *outbuf;
2305 struct tipc_msg *msg = buf_msg(iter);
2306 u32 length = msg_size(msg);
2307
2308 if (msg_user(msg) == MSG_BUNDLER)
2309 msg_set_type(msg, CLOSED_MSG);
2310 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2311 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2312 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2313 outbuf = buf_acquire(length + INT_H_SIZE);
2314 if (outbuf == NULL) {
2315 warn("Memory squeeze; buffer duplication failed\n");
2316 return;
2317 }
2318 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2319 memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
2320 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2321 tunnel->b_ptr->net_plane);
2322 msg_dbg(buf_msg(outbuf), ">SEND>");
2323 link_send_buf(tunnel, outbuf);
2324 if (!link_is_up(l_ptr))
2325 return;
2326 iter = iter->next;
2327 }
2328}
2329
2330
2331
2332/**
2333 * buf_extract - extracts embedded TIPC message from another message
2334 * @skb: encapsulating message buffer
2335 * @from_pos: offset to extract from
2336 *
2337 * Returns a new message buffer containing an embedded message. The
2338 * encapsulating message itself is left unchanged.
2339 */
2340
2341static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2342{
2343 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2344 u32 size = msg_size(msg);
2345 struct sk_buff *eb;
2346
2347 eb = buf_acquire(size);
2348 if (eb)
2349 memcpy(eb->data, (unchar *)msg, size);
2350 return eb;
2351}
2352
2353/*
2354 * link_recv_changeover_msg(): Receive tunneled packet sent
2355 * via other link. Node is locked. Return extracted buffer.
2356 */
2357
2358static int link_recv_changeover_msg(struct link **l_ptr,
2359 struct sk_buff **buf)
2360{
2361 struct sk_buff *tunnel_buf = *buf;
2362 struct link *dest_link;
2363 struct tipc_msg *msg;
2364 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2365 u32 msg_typ = msg_type(tunnel_msg);
2366 u32 msg_count = msg_msgcnt(tunnel_msg);
2367
2368 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2369 assert(dest_link != *l_ptr);
2370 if (!dest_link) {
2371 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2372 goto exit;
2373 }
2374 dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2375 (*l_ptr)->b_ptr->net_plane);
2376 *l_ptr = dest_link;
2377 msg = msg_get_wrapped(tunnel_msg);
2378
2379 if (msg_typ == DUPLICATE_MSG) {
2380 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
2381 msg_dbg(tunnel_msg, "DROP/<REC<");
2382 goto exit;
2383 }
2384 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2385 if (*buf == NULL) {
2386 warn("Memory squeeze; failed to extract msg\n");
2387 goto exit;
2388 }
2389 msg_dbg(tunnel_msg, "TNL<REC<");
2390 buf_discard(tunnel_buf);
2391 return 1;
2392 }
2393
2394 /* First original message ?: */
2395
2396 if (link_is_up(dest_link)) {
2397 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2398 link_reset(dest_link);
2399 dest_link->exp_msg_count = msg_count;
2400 if (!msg_count)
2401 goto exit;
2402 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2403 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2404 dest_link->exp_msg_count = msg_count;
2405 if (!msg_count)
2406 goto exit;
2407 }
2408
2409 /* Receive original message */
2410
2411 if (dest_link->exp_msg_count == 0) {
2412 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2413 dbg_print_link(dest_link, "LINK:");
2414 goto exit;
2415 }
2416 dest_link->exp_msg_count--;
2417 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2418 msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
2419 goto exit;
2420 } else {
2421 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2422 if (*buf != NULL) {
2423 msg_dbg(tunnel_msg, "TNL<REC<");
2424 buf_discard(tunnel_buf);
2425 return 1;
2426 } else {
2427 warn("Memory squeeze; dropped incoming msg\n");
2428 }
2429 }
2430exit:
2431 *buf = 0;
2432 buf_discard(tunnel_buf);
2433 return 0;
2434}
2435
2436/*
2437 * Bundler functionality:
2438 */
2439void link_recv_bundle(struct sk_buff *buf)
2440{
2441 u32 msgcount = msg_msgcnt(buf_msg(buf));
2442 u32 pos = INT_H_SIZE;
2443 struct sk_buff *obuf;
2444
2445 msg_dbg(buf_msg(buf), "<BNDL<: ");
2446 while (msgcount--) {
2447 obuf = buf_extract(buf, pos);
2448 if (obuf == NULL) {
2449 char addr_string[16];
2450
2451 warn("Buffer allocation failure;\n");
2452 warn(" incoming message(s) from %s lost\n",
2453 addr_string_fill(addr_string,
2454 msg_orignode(buf_msg(buf))));
2455 return;
2456 };
2457 pos += align(msg_size(buf_msg(obuf)));
2458 msg_dbg(buf_msg(obuf), " /");
2459 net_route_msg(obuf);
2460 }
2461 buf_discard(buf);
2462}
2463
2464/*
2465 * Fragmentation/defragmentation:
2466 */
2467
2468
2469/*
2470 * link_send_long_buf: Entry for buffers needing fragmentation.
2471 * The buffer is complete, inclusive total message length.
2472 * Returns user data length.
2473 */
2474int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2475{
2476 struct tipc_msg *inmsg = buf_msg(buf);
2477 struct tipc_msg fragm_hdr;
2478 u32 insize = msg_size(inmsg);
2479 u32 dsz = msg_data_sz(inmsg);
2480 unchar *crs = buf->data;
2481 u32 rest = insize;
2482 u32 pack_sz = link_max_pkt(l_ptr);
2483 u32 fragm_sz = pack_sz - INT_H_SIZE;
2484 u32 fragm_no = 1;
2485 u32 destaddr = msg_destnode(inmsg);
2486
2487 if (msg_short(inmsg))
2488 destaddr = l_ptr->addr;
2489
2490 if (msg_routed(inmsg))
2491 msg_set_prevnode(inmsg, tipc_own_addr);
2492
2493 /* Prepare reusable fragment header: */
2494
2495 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2496 TIPC_OK, INT_H_SIZE, destaddr);
2497 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2498 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2499 msg_set_fragm_no(&fragm_hdr, fragm_no);
2500 l_ptr->stats.sent_fragmented++;
2501
2502 /* Chop up message: */
2503
2504 while (rest > 0) {
2505 struct sk_buff *fragm;
2506
2507 if (rest <= fragm_sz) {
2508 fragm_sz = rest;
2509 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2510 }
2511 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2512 if (fragm == NULL) {
2513 warn("Memory squeeze; failed to fragment msg\n");
2514 dsz = -ENOMEM;
2515 goto exit;
2516 }
2517 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2518 memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
2519 memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
2520
2521 /* Send queued messages first, if any: */
2522
2523 l_ptr->stats.sent_fragments++;
2524 link_send_buf(l_ptr, fragm);
2525 if (!link_is_up(l_ptr))
2526 return dsz;
2527 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2528 rest -= fragm_sz;
2529 crs += fragm_sz;
2530 msg_set_type(&fragm_hdr, FRAGMENT);
2531 }
2532exit:
2533 buf_discard(buf);
2534 return dsz;
2535}
2536
2537/*
2538 * A pending message being re-assembled must store certain values
2539 * to handle subsequent fragments correctly. The following functions
2540 * help storing these values in unused, available fields in the
2541 * pending message. This makes dynamic memory allocation unecessary.
2542 */
2543
2544static inline u32 get_long_msg_seqno(struct sk_buff *buf)
2545{
2546 return msg_seqno(buf_msg(buf));
2547}
2548
2549static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2550{
2551 msg_set_seqno(buf_msg(buf), seqno);
2552}
2553
2554static inline u32 get_fragm_size(struct sk_buff *buf)
2555{
2556 return msg_ack(buf_msg(buf));
2557}
2558
2559static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
2560{
2561 msg_set_ack(buf_msg(buf), sz);
2562}
2563
2564static inline u32 get_expected_frags(struct sk_buff *buf)
2565{
2566 return msg_bcast_ack(buf_msg(buf));
2567}
2568
2569static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
2570{
2571 msg_set_bcast_ack(buf_msg(buf), exp);
2572}
2573
2574static inline u32 get_timer_cnt(struct sk_buff *buf)
2575{
2576 return msg_reroute_cnt(buf_msg(buf));
2577}
2578
2579static inline void incr_timer_cnt(struct sk_buff *buf)
2580{
2581 msg_incr_reroute_cnt(buf_msg(buf));
2582}
2583
2584/*
2585 * link_recv_fragment(): Called with node lock on. Returns
2586 * the reassembled buffer if message is complete.
2587 */
2588int link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2589 struct tipc_msg **m)
2590{
2591 struct sk_buff *prev = 0;
2592 struct sk_buff *fbuf = *fb;
2593 struct tipc_msg *fragm = buf_msg(fbuf);
2594 struct sk_buff *pbuf = *pending;
2595 u32 long_msg_seq_no = msg_long_msgno(fragm);
2596
2597 *fb = 0;
2598 msg_dbg(fragm,"FRG<REC<");
2599
2600 /* Is there an incomplete message waiting for this fragment? */
2601
2602 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
2603 || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2604 prev = pbuf;
2605 pbuf = pbuf->next;
2606 }
2607
2608 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2609 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2610 u32 msg_sz = msg_size(imsg);
2611 u32 fragm_sz = msg_data_sz(fragm);
2612 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2613 u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2614 if (msg_type(imsg) == TIPC_MCAST_MSG)
2615 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2616 if (msg_size(imsg) > max) {
2617 msg_dbg(fragm,"<REC<Oversized: ");
2618 buf_discard(fbuf);
2619 return 0;
2620 }
2621 pbuf = buf_acquire(msg_size(imsg));
2622 if (pbuf != NULL) {
2623 pbuf->next = *pending;
2624 *pending = pbuf;
2625 memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
2626
2627 /* Prepare buffer for subsequent fragments. */
2628
2629 set_long_msg_seqno(pbuf, long_msg_seq_no);
2630 set_fragm_size(pbuf,fragm_sz);
2631 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2632 } else {
2633 warn("Memory squeeze; got no defragmenting buffer\n");
2634 }
2635 buf_discard(fbuf);
2636 return 0;
2637 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2638 u32 dsz = msg_data_sz(fragm);
2639 u32 fsz = get_fragm_size(pbuf);
2640 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2641 u32 exp_frags = get_expected_frags(pbuf) - 1;
2642 memcpy(pbuf->data + crs, msg_data(fragm), dsz);
2643 buf_discard(fbuf);
2644
2645 /* Is message complete? */
2646
2647 if (exp_frags == 0) {
2648 if (prev)
2649 prev->next = pbuf->next;
2650 else
2651 *pending = pbuf->next;
2652 msg_reset_reroute_cnt(buf_msg(pbuf));
2653 *fb = pbuf;
2654 *m = buf_msg(pbuf);
2655 return 1;
2656 }
2657 set_expected_frags(pbuf,exp_frags);
2658 return 0;
2659 }
2660 dbg(" Discarding orphan fragment %x\n",fbuf);
2661 msg_dbg(fragm,"ORPHAN:");
2662 dbg("Pending long buffers:\n");
2663 dbg_print_buf_chain(*pending);
2664 buf_discard(fbuf);
2665 return 0;
2666}
2667
2668/**
2669 * link_check_defragm_bufs - flush stale incoming message fragments
2670 * @l_ptr: pointer to link
2671 */
2672
2673static void link_check_defragm_bufs(struct link *l_ptr)
2674{
2675 struct sk_buff *prev = 0;
2676 struct sk_buff *next = 0;
2677 struct sk_buff *buf = l_ptr->defragm_buf;
2678
2679 if (!buf)
2680 return;
2681 if (!link_working_working(l_ptr))
2682 return;
2683 while (buf) {
2684 u32 cnt = get_timer_cnt(buf);
2685
2686 next = buf->next;
2687 if (cnt < 4) {
2688 incr_timer_cnt(buf);
2689 prev = buf;
2690 } else {
2691 dbg(" Discarding incomplete long buffer\n");
2692 msg_dbg(buf_msg(buf), "LONG:");
2693 dbg_print_link(l_ptr, "curr:");
2694 dbg("Pending long buffers:\n");
2695 dbg_print_buf_chain(l_ptr->defragm_buf);
2696 if (prev)
2697 prev->next = buf->next;
2698 else
2699 l_ptr->defragm_buf = buf->next;
2700 buf_discard(buf);
2701 }
2702 buf = next;
2703 }
2704}
2705
2706
2707
2708static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2709{
2710 l_ptr->tolerance = tolerance;
2711 l_ptr->continuity_interval =
2712 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2713 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2714}
2715
2716
2717void link_set_queue_limits(struct link *l_ptr, u32 window)
2718{
2719 /* Data messages from this node, inclusive FIRST_FRAGM */
2720 l_ptr->queue_limit[DATA_LOW] = window;
2721 l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2722 l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2723 l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2724 /* Transiting data messages,inclusive FIRST_FRAGM */
2725 l_ptr->queue_limit[DATA_LOW + 4] = 300;
2726 l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2727 l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2728 l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2729 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2730 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2731 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2732 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2733 /* FRAGMENT and LAST_FRAGMENT packets */
2734 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2735}
2736
2737/**
2738 * link_find_link - locate link by name
2739 * @name - ptr to link name string
2740 * @node - ptr to area to be filled with ptr to associated node
2741 *
2742 * Caller must hold 'net_lock' to ensure node and bearer are not deleted;
2743 * this also prevents link deletion.
2744 *
2745 * Returns pointer to link (or 0 if invalid link name).
2746 */
2747
2748static struct link *link_find_link(const char *name, struct node **node)
2749{
2750 struct link_name link_name_parts;
2751 struct bearer *b_ptr;
2752 struct link *l_ptr;
2753
2754 if (!link_name_validate(name, &link_name_parts))
2755 return 0;
2756
2757 b_ptr = bearer_find_interface(link_name_parts.if_local);
2758 if (!b_ptr)
2759 return 0;
2760
2761 *node = node_find(link_name_parts.addr_peer);
2762 if (!*node)
2763 return 0;
2764
2765 l_ptr = (*node)->links[b_ptr->identity];
2766 if (!l_ptr || strcmp(l_ptr->name, name))
2767 return 0;
2768
2769 return l_ptr;
2770}
2771
2772struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2773 u16 cmd)
2774{
2775 struct tipc_link_config *args;
2776 u32 new_value;
2777 struct link *l_ptr;
2778 struct node *node;
2779 int res;
2780
2781 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2782 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2783
2784 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2785 new_value = ntohl(args->value);
2786
2787 if (!strcmp(args->name, bc_link_name)) {
2788 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2789 (bclink_set_queue_limits(new_value) == 0))
2790 return cfg_reply_none();
2791 return cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2792 " (cannot change setting on broadcast link)");
2793 }
2794
2795 read_lock_bh(&net_lock);
2796 l_ptr = link_find_link(args->name, &node);
2797 if (!l_ptr) {
2798 read_unlock_bh(&net_lock);
2799 return cfg_reply_error_string("link not found");
2800 }
2801
2802 node_lock(node);
2803 res = -EINVAL;
2804 switch (cmd) {
2805 case TIPC_CMD_SET_LINK_TOL:
2806 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2807 (new_value <= TIPC_MAX_LINK_TOL)) {
2808 link_set_supervision_props(l_ptr, new_value);
2809 link_send_proto_msg(l_ptr, STATE_MSG,
2810 0, 0, new_value, 0, 0);
2811 res = TIPC_OK;
2812 }
2813 break;
2814 case TIPC_CMD_SET_LINK_PRI:
2815 if (new_value < TIPC_NUM_LINK_PRI) {
2816 l_ptr->priority = new_value;
2817 link_send_proto_msg(l_ptr, STATE_MSG,
2818 0, 0, 0, new_value, 0);
2819 res = TIPC_OK;
2820 }
2821 break;
2822 case TIPC_CMD_SET_LINK_WINDOW:
2823 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2824 (new_value <= TIPC_MAX_LINK_WIN)) {
2825 link_set_queue_limits(l_ptr, new_value);
2826 res = TIPC_OK;
2827 }
2828 break;
2829 }
2830 node_unlock(node);
2831
2832 read_unlock_bh(&net_lock);
2833 if (res)
2834 return cfg_reply_error_string("cannot change link setting");
2835
2836 return cfg_reply_none();
2837}
2838
2839/**
2840 * link_reset_statistics - reset link statistics
2841 * @l_ptr: pointer to link
2842 */
2843
2844static void link_reset_statistics(struct link *l_ptr)
2845{
2846 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2847 l_ptr->stats.sent_info = l_ptr->next_out_no;
2848 l_ptr->stats.recv_info = l_ptr->next_in_no;
2849}
2850
2851struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2852{
2853 char *link_name;
2854 struct link *l_ptr;
2855 struct node *node;
2856
2857 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2858 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2859
2860 link_name = (char *)TLV_DATA(req_tlv_area);
2861 if (!strcmp(link_name, bc_link_name)) {
2862 if (bclink_reset_stats())
2863 return cfg_reply_error_string("link not found");
2864 return cfg_reply_none();
2865 }
2866
2867 read_lock_bh(&net_lock);
2868 l_ptr = link_find_link(link_name, &node);
2869 if (!l_ptr) {
2870 read_unlock_bh(&net_lock);
2871 return cfg_reply_error_string("link not found");
2872 }
2873
2874 node_lock(node);
2875 link_reset_statistics(l_ptr);
2876 node_unlock(node);
2877 read_unlock_bh(&net_lock);
2878 return cfg_reply_none();
2879}
2880
2881/**
2882 * percent - convert count to a percentage of total (rounding up or down)
2883 */
2884
2885static u32 percent(u32 count, u32 total)
2886{
2887 return (count * 100 + (total / 2)) / total;
2888}
2889
2890/**
2891 * link_stats - print link statistics
2892 * @name: link name
2893 * @buf: print buffer area
2894 * @buf_size: size of print buffer area
2895 *
2896 * Returns length of print buffer data string (or 0 if error)
2897 */
2898
2899static int link_stats(const char *name, char *buf, const u32 buf_size)
2900{
2901 struct print_buf pb;
2902 struct link *l_ptr;
2903 struct node *node;
2904 char *status;
2905 u32 profile_total = 0;
2906
2907 if (!strcmp(name, bc_link_name))
2908 return bclink_stats(buf, buf_size);
2909
2910 printbuf_init(&pb, buf, buf_size);
2911
2912 read_lock_bh(&net_lock);
2913 l_ptr = link_find_link(name, &node);
2914 if (!l_ptr) {
2915 read_unlock_bh(&net_lock);
2916 return 0;
2917 }
2918 node_lock(node);
2919
2920 if (link_is_active(l_ptr))
2921 status = "ACTIVE";
2922 else if (link_is_up(l_ptr))
2923 status = "STANDBY";
2924 else
2925 status = "DEFUNCT";
2926 tipc_printf(&pb, "Link <%s>\n"
2927 " %s MTU:%u Priority:%u Tolerance:%u ms"
2928 " Window:%u packets\n",
2929 l_ptr->name, status, link_max_pkt(l_ptr),
2930 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2931 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2932 l_ptr->next_in_no - l_ptr->stats.recv_info,
2933 l_ptr->stats.recv_fragments,
2934 l_ptr->stats.recv_fragmented,
2935 l_ptr->stats.recv_bundles,
2936 l_ptr->stats.recv_bundled);
2937 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2938 l_ptr->next_out_no - l_ptr->stats.sent_info,
2939 l_ptr->stats.sent_fragments,
2940 l_ptr->stats.sent_fragmented,
2941 l_ptr->stats.sent_bundles,
2942 l_ptr->stats.sent_bundled);
2943 profile_total = l_ptr->stats.msg_length_counts;
2944 if (!profile_total)
2945 profile_total = 1;
2946 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
2947 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2948 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2949 l_ptr->stats.msg_length_counts,
2950 l_ptr->stats.msg_lengths_total / profile_total,
2951 percent(l_ptr->stats.msg_length_profile[0], profile_total),
2952 percent(l_ptr->stats.msg_length_profile[1], profile_total),
2953 percent(l_ptr->stats.msg_length_profile[2], profile_total),
2954 percent(l_ptr->stats.msg_length_profile[3], profile_total),
2955 percent(l_ptr->stats.msg_length_profile[4], profile_total),
2956 percent(l_ptr->stats.msg_length_profile[5], profile_total),
2957 percent(l_ptr->stats.msg_length_profile[6], profile_total));
2958 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2959 l_ptr->stats.recv_states,
2960 l_ptr->stats.recv_probes,
2961 l_ptr->stats.recv_nacks,
2962 l_ptr->stats.deferred_recv,
2963 l_ptr->stats.duplicates);
2964 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2965 l_ptr->stats.sent_states,
2966 l_ptr->stats.sent_probes,
2967 l_ptr->stats.sent_nacks,
2968 l_ptr->stats.sent_acks,
2969 l_ptr->stats.retransmitted);
2970 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
2971 l_ptr->stats.bearer_congs,
2972 l_ptr->stats.link_congs,
2973 l_ptr->stats.max_queue_sz,
2974 l_ptr->stats.queue_sz_counts
2975 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2976 : 0);
2977
2978 node_unlock(node);
2979 read_unlock_bh(&net_lock);
2980 return printbuf_validate(&pb);
2981}
2982
2983#define MAX_LINK_STATS_INFO 2000
2984
2985struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2986{
2987 struct sk_buff *buf;
2988 struct tlv_desc *rep_tlv;
2989 int str_len;
2990
2991 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2992 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2993
2994 buf = cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2995 if (!buf)
2996 return NULL;
2997
2998 rep_tlv = (struct tlv_desc *)buf->data;
2999
3000 str_len = link_stats((char *)TLV_DATA(req_tlv_area),
3001 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3002 if (!str_len) {
3003 buf_discard(buf);
3004 return cfg_reply_error_string("link not found");
3005 }
3006
3007 skb_put(buf, TLV_SPACE(str_len));
3008 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3009
3010 return buf;
3011}
3012
3013#if 0
3014int link_control(const char *name, u32 op, u32 val)
3015{
3016 int res = -EINVAL;
3017 struct link *l_ptr;
3018 u32 bearer_id;
3019 struct node * node;
3020 u32 a;
3021
3022 a = link_name2addr(name, &bearer_id);
3023 read_lock_bh(&net_lock);
3024 node = node_find(a);
3025 if (node) {
3026 node_lock(node);
3027 l_ptr = node->links[bearer_id];
3028 if (l_ptr) {
3029 if (op == TIPC_REMOVE_LINK) {
3030 struct bearer *b_ptr = l_ptr->b_ptr;
3031 spin_lock_bh(&b_ptr->publ.lock);
3032 link_delete(l_ptr);
3033 spin_unlock_bh(&b_ptr->publ.lock);
3034 }
3035 if (op == TIPC_CMD_BLOCK_LINK) {
3036 link_reset(l_ptr);
3037 l_ptr->blocked = 1;
3038 }
3039 if (op == TIPC_CMD_UNBLOCK_LINK) {
3040 l_ptr->blocked = 0;
3041 }
3042 res = TIPC_OK;
3043 }
3044 node_unlock(node);
3045 }
3046 read_unlock_bh(&net_lock);
3047 return res;
3048}
3049#endif
3050
3051/**
3052 * link_get_max_pkt - get maximum packet size to use when sending to destination
3053 * @dest: network address of destination node
3054 * @selector: used to select from set of active links
3055 *
3056 * If no active link can be found, uses default maximum packet size.
3057 */
3058
3059u32 link_get_max_pkt(u32 dest, u32 selector)
3060{
3061 struct node *n_ptr;
3062 struct link *l_ptr;
3063 u32 res = MAX_PKT_DEFAULT;
3064
3065 if (dest == tipc_own_addr)
3066 return MAX_MSG_SIZE;
3067
3068 read_lock_bh(&net_lock);
3069 n_ptr = node_select(dest, selector);
3070 if (n_ptr) {
3071 node_lock(n_ptr);
3072 l_ptr = n_ptr->active_links[selector & 1];
3073 if (l_ptr)
3074 res = link_max_pkt(l_ptr);
3075 node_unlock(n_ptr);
3076 }
3077 read_unlock_bh(&net_lock);
3078 return res;
3079}
3080
3081#if 0
3082static void link_dump_rec_queue(struct link *l_ptr)
3083{
3084 struct sk_buff *crs;
3085
3086 if (!l_ptr->oldest_deferred_in) {
3087 info("Reception queue empty\n");
3088 return;
3089 }
3090 info("Contents of Reception queue:\n");
3091 crs = l_ptr->oldest_deferred_in;
3092 while (crs) {
3093 if (crs->data == (void *)0x0000a3a3) {
3094 info("buffer %x invalid\n", crs);
3095 return;
3096 }
3097 msg_dbg(buf_msg(crs), "In rec queue: \n");
3098 crs = crs->next;
3099 }
3100}
3101#endif
3102
3103static void link_dump_send_queue(struct link *l_ptr)
3104{
3105 if (l_ptr->next_out) {
3106 info("\nContents of unsent queue:\n");
3107 dbg_print_buf_chain(l_ptr->next_out);
3108 }
3109 info("\nContents of send queue:\n");
3110 if (l_ptr->first_out) {
3111 dbg_print_buf_chain(l_ptr->first_out);
3112 }
3113 info("Empty send queue\n");
3114}
3115
3116static void link_print(struct link *l_ptr, struct print_buf *buf,
3117 const char *str)
3118{
3119 tipc_printf(buf, str);
3120 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3121 return;
3122 tipc_printf(buf, "Link %x<%s>:",
3123 l_ptr->addr, l_ptr->b_ptr->publ.name);
3124 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3125 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3126 tipc_printf(buf, "SQUE");
3127 if (l_ptr->first_out) {
3128 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3129 if (l_ptr->next_out)
3130 tipc_printf(buf, "%u..",
3131 msg_seqno(buf_msg(l_ptr->next_out)));
3132 tipc_printf(buf, "%u]",
3133 msg_seqno(buf_msg
3134 (l_ptr->last_out)), l_ptr->out_queue_size);
3135 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3136 msg_seqno(buf_msg(l_ptr->first_out)))
3137 != (l_ptr->out_queue_size - 1))
3138 || (l_ptr->last_out->next != 0)) {
3139 tipc_printf(buf, "\nSend queue inconsistency\n");
3140 tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3141 tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3142 tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3143 link_dump_send_queue(l_ptr);
3144 }
3145 } else
3146 tipc_printf(buf, "[]");
3147 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3148 if (l_ptr->oldest_deferred_in) {
3149 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3150 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3151 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3152 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3153 tipc_printf(buf, ":RQSIZ(%u)",
3154 l_ptr->deferred_inqueue_sz);
3155 }
3156 }
3157 if (link_working_unknown(l_ptr))
3158 tipc_printf(buf, ":WU");
3159 if (link_reset_reset(l_ptr))
3160 tipc_printf(buf, ":RR");
3161 if (link_reset_unknown(l_ptr))
3162 tipc_printf(buf, ":RU");
3163 if (link_working_working(l_ptr))
3164 tipc_printf(buf, ":WW");
3165 tipc_printf(buf, "\n");
3166}
3167
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644
index 000000000000..c2553f073757
--- /dev/null
+++ b/net/tipc/link.h
@@ -0,0 +1,296 @@
1/*
2 * net/tipc/link.h: Include file for TIPC link code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_LINK_H
38#define _TIPC_LINK_H
39
40#include "dbg.h"
41#include "msg.h"
42#include "bearer.h"
43#include "node.h"
44
45#define PUSH_FAILED 1
46#define PUSH_FINISHED 2
47
48/*
49 * Link states
50 */
51
52#define WORKING_WORKING 560810u
53#define WORKING_UNKNOWN 560811u
54#define RESET_UNKNOWN 560812u
55#define RESET_RESET 560813u
56
57/*
58 * Starting value for maximum packet size negotiation on unicast links
59 * (unless bearer MTU is less)
60 */
61
62#define MAX_PKT_DEFAULT 1500
63
64/**
65 * struct link - TIPC link data structure
66 * @addr: network address of link's peer node
67 * @name: link name character string
68 * @media_addr: media address to use when sending messages over link
69 * @timer: link timer
70 * @owner: pointer to peer node
71 * @link_list: adjacent links in bearer's list of links
72 * @started: indicates if link has been started
73 * @checkpoint: reference point for triggering link continuity checking
74 * @peer_session: link session # being used by peer end of link
75 * @peer_bearer_id: bearer id used by link's peer endpoint
76 * @b_ptr: pointer to bearer used by link
77 * @tolerance: minimum link continuity loss needed to reset link [in ms]
78 * @continuity_interval: link continuity testing interval [in ms]
79 * @abort_limit: # of unacknowledged continuity probes needed to reset link
80 * @state: current state of link FSM
81 * @blocked: indicates if link has been administratively blocked
82 * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
83 * @proto_msg: template for control messages generated by link
84 * @pmsg: convenience pointer to "proto_msg" field
85 * @priority: current link priority
86 * @queue_limit: outbound message queue congestion thresholds (indexed by user)
87 * @exp_msg_count: # of tunnelled messages expected during link changeover
88 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
89 * @max_pkt: current maximum packet size for this link
90 * @max_pkt_target: desired maximum packet size for this link
91 * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
92 * @out_queue_size: # of messages in outbound message queue
93 * @first_out: ptr to first outbound message in queue
94 * @last_out: ptr to last outbound message in queue
95 * @next_out_no: next sequence number to use for outbound messages
96 * @last_retransmitted: sequence number of most recently retransmitted message
97 * @stale_count: # of identical retransmit requests made by peer
98 * @next_in_no: next sequence number to expect for inbound messages
99 * @deferred_inqueue_sz: # of messages in inbound message queue
100 * @oldest_deferred_in: ptr to first inbound message in queue
101 * @newest_deferred_in: ptr to last inbound message in queue
102 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
103 * @proto_msg_queue: ptr to (single) outbound control message
104 * @retransm_queue_size: number of messages to retransmit
105 * @retransm_queue_head: sequence number of first message to retransmit
106 * @next_out: ptr to first unsent outbound message in queue
107 * @waiting_ports: linked list of ports waiting for link congestion to abate
108 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
109 * @defragm_buf: list of partially reassembled inbound message fragments
110 * @stats: collects statistics regarding link activity
111 * @print_buf: print buffer used to log link activity
112 */
113
114struct link {
115 u32 addr;
116 char name[TIPC_MAX_LINK_NAME];
117 struct tipc_media_addr media_addr;
118 struct timer_list timer;
119 struct node *owner;
120 struct list_head link_list;
121
122 /* Management and link supervision data */
123 int started;
124 u32 checkpoint;
125 u32 peer_session;
126 u32 peer_bearer_id;
127 struct bearer *b_ptr;
128 u32 tolerance;
129 u32 continuity_interval;
130 u32 abort_limit;
131 int state;
132 int blocked;
133 u32 fsm_msg_cnt;
134 struct {
135 unchar hdr[INT_H_SIZE];
136 unchar body[TIPC_MAX_IF_NAME];
137 } proto_msg;
138 struct tipc_msg *pmsg;
139 u32 priority;
140 u32 queue_limit[15]; /* queue_limit[0]==window limit */
141
142 /* Changeover */
143 u32 exp_msg_count;
144 u32 reset_checkpoint;
145
146 /* Max packet negotiation */
147 u32 max_pkt;
148 u32 max_pkt_target;
149 u32 max_pkt_probes;
150
151 /* Sending */
152 u32 out_queue_size;
153 struct sk_buff *first_out;
154 struct sk_buff *last_out;
155 u32 next_out_no;
156 u32 last_retransmitted;
157 u32 stale_count;
158
159 /* Reception */
160 u32 next_in_no;
161 u32 deferred_inqueue_sz;
162 struct sk_buff *oldest_deferred_in;
163 struct sk_buff *newest_deferred_in;
164 u32 unacked_window;
165
166 /* Congestion handling */
167 struct sk_buff *proto_msg_queue;
168 u32 retransm_queue_size;
169 u32 retransm_queue_head;
170 struct sk_buff *next_out;
171 struct list_head waiting_ports;
172
173 /* Fragmentation/defragmentation */
174 u32 long_msg_seq_no;
175 struct sk_buff *defragm_buf;
176
177 /* Statistics */
178 struct {
179 u32 sent_info; /* used in counting # sent packets */
180 u32 recv_info; /* used in counting # recv'd packets */
181 u32 sent_states;
182 u32 recv_states;
183 u32 sent_probes;
184 u32 recv_probes;
185 u32 sent_nacks;
186 u32 recv_nacks;
187 u32 sent_acks;
188 u32 sent_bundled;
189 u32 sent_bundles;
190 u32 recv_bundled;
191 u32 recv_bundles;
192 u32 retransmitted;
193 u32 sent_fragmented;
194 u32 sent_fragments;
195 u32 recv_fragmented;
196 u32 recv_fragments;
197 u32 link_congs; /* # port sends blocked by congestion */
198 u32 bearer_congs;
199 u32 deferred_recv;
200 u32 duplicates;
201
202 /* for statistical profiling of send queue size */
203
204 u32 max_queue_sz;
205 u32 accu_queue_sz;
206 u32 queue_sz_counts;
207
208 /* for statistical profiling of message lengths */
209
210 u32 msg_length_counts;
211 u32 msg_lengths_total;
212 u32 msg_length_profile[7];
213#if 0
214 u32 sent_tunneled;
215 u32 recv_tunneled;
216#endif
217 } stats;
218
219 struct print_buf print_buf;
220};
221
222struct port;
223
224struct link *link_create(struct bearer *b_ptr, const u32 peer,
225 const struct tipc_media_addr *media_addr);
226void link_delete(struct link *l_ptr);
227void link_changeover(struct link *l_ptr);
228void link_send_duplicate(struct link *l_ptr, struct link *dest);
229void link_reset_fragments(struct link *l_ptr);
230int link_is_up(struct link *l_ptr);
231int link_is_active(struct link *l_ptr);
232void link_start(struct link *l_ptr);
233u32 link_push_packet(struct link *l_ptr);
234void link_stop(struct link *l_ptr);
235struct sk_buff *link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
236struct sk_buff *link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
237struct sk_buff *link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
238void link_reset(struct link *l_ptr);
239int link_send(struct sk_buff *buf, u32 dest, u32 selector);
240int link_send_buf(struct link *l_ptr, struct sk_buff *buf);
241u32 link_get_max_pkt(u32 dest,u32 selector);
242int link_send_sections_fast(struct port* sender,
243 struct iovec const *msg_sect,
244 const u32 num_sect,
245 u32 destnode);
246
247int link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
248void link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
249 struct tipc_msg *msg, u32 selector);
250void link_recv_bundle(struct sk_buff *buf);
251int link_recv_fragment(struct sk_buff **pending,
252 struct sk_buff **fb,
253 struct tipc_msg **msg);
254void link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
255 u32 tolerance, u32 priority, u32 acked_mtu);
256void link_push_queue(struct link *l_ptr);
257u32 link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
258 struct sk_buff *buf);
259void link_wakeup_ports(struct link *l_ptr, int all);
260void link_set_queue_limits(struct link *l_ptr, u32 window);
261void link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
262
263/*
264 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
265 */
266
267static inline u32 mod(u32 x)
268{
269 return x & 0xffffu;
270}
271
272static inline int between(u32 lower, u32 upper, u32 n)
273{
274 if ((lower < n) && (n < upper))
275 return 1;
276 if ((upper < lower) && ((n > lower) || (n < upper)))
277 return 1;
278 return 0;
279}
280
281static inline int less_eq(u32 left, u32 right)
282{
283 return (mod(right - left) < 32768u);
284}
285
286static inline int less(u32 left, u32 right)
287{
288 return (less_eq(left, right) && (mod(right) != mod(left)));
289}
290
291static inline u32 lesser(u32 left, u32 right)
292{
293 return less_eq(left, right) ? left : right;
294}
295
296#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644
index 000000000000..03dbc55cb04c
--- /dev/null
+++ b/net/tipc/msg.c
@@ -0,0 +1,334 @@
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "addr.h"
39#include "dbg.h"
40#include "msg.h"
41#include "bearer.h"
42
43
44void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
45{
46 memcpy(&((int *)m)[5], a, sizeof(*a));
47}
48
49void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
50{
51 memcpy(a, &((int*)m)[5], sizeof(*a));
52}
53
54
55void msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
56{
57 u32 usr = msg_user(msg);
58 tipc_printf(buf, str);
59
60 switch (usr) {
61 case MSG_BUNDLER:
62 tipc_printf(buf, "BNDL::");
63 tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
64 break;
65 case BCAST_PROTOCOL:
66 tipc_printf(buf, "BCASTP::");
67 break;
68 case MSG_FRAGMENTER:
69 tipc_printf(buf, "FRAGM::");
70 switch (msg_type(msg)) {
71 case FIRST_FRAGMENT:
72 tipc_printf(buf, "FIRST:");
73 break;
74 case FRAGMENT:
75 tipc_printf(buf, "BODY:");
76 break;
77 case LAST_FRAGMENT:
78 tipc_printf(buf, "LAST:");
79 break;
80 default:
81 tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
82
83 }
84 tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
85 msg_fragm_no(msg));
86 break;
87 case DATA_LOW:
88 case DATA_MEDIUM:
89 case DATA_HIGH:
90 case DATA_CRITICAL:
91 tipc_printf(buf, "DAT%u:", msg_user(msg));
92 if (msg_short(msg)) {
93 tipc_printf(buf, "CON:");
94 break;
95 }
96 switch (msg_type(msg)) {
97 case TIPC_CONN_MSG:
98 tipc_printf(buf, "CON:");
99 break;
100 case TIPC_MCAST_MSG:
101 tipc_printf(buf, "MCST:");
102 break;
103 case TIPC_NAMED_MSG:
104 tipc_printf(buf, "NAM:");
105 break;
106 case TIPC_DIRECT_MSG:
107 tipc_printf(buf, "DIR:");
108 break;
109 default:
110 tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
111 }
112 if (msg_routed(msg) && !msg_non_seq(msg))
113 tipc_printf(buf, "ROUT:");
114 if (msg_reroute_cnt(msg))
115 tipc_printf(buf, "REROUTED(%u):",
116 msg_reroute_cnt(msg));
117 break;
118 case NAME_DISTRIBUTOR:
119 tipc_printf(buf, "NMD::");
120 switch (msg_type(msg)) {
121 case PUBLICATION:
122 tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
123 break;
124 case WITHDRAWAL:
125 tipc_printf(buf, "WDRW:");
126 break;
127 default:
128 tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
129 }
130 if (msg_routed(msg))
131 tipc_printf(buf, "ROUT:");
132 if (msg_reroute_cnt(msg))
133 tipc_printf(buf, "REROUTED(%u):",
134 msg_reroute_cnt(msg));
135 break;
136 case CONN_MANAGER:
137 tipc_printf(buf, "CONN_MNG:");
138 switch (msg_type(msg)) {
139 case CONN_PROBE:
140 tipc_printf(buf, "PROBE:");
141 break;
142 case CONN_PROBE_REPLY:
143 tipc_printf(buf, "PROBE_REPLY:");
144 break;
145 case CONN_ACK:
146 tipc_printf(buf, "CONN_ACK:");
147 tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
148 break;
149 default:
150 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
151 }
152 if (msg_routed(msg))
153 tipc_printf(buf, "ROUT:");
154 if (msg_reroute_cnt(msg))
155 tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
156 break;
157 case LINK_PROTOCOL:
158 tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
159 switch (msg_type(msg)) {
160 case STATE_MSG:
161 tipc_printf(buf, "STATE:");
162 tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
163 tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
164 tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
165 tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
166 break;
167 case RESET_MSG:
168 tipc_printf(buf, "RESET:");
169 if (msg_size(msg) != msg_hdr_sz(msg))
170 tipc_printf(buf, "BEAR:%s:",msg_data(msg));
171 break;
172 case ACTIVATE_MSG:
173 tipc_printf(buf, "ACTIVATE:");
174 break;
175 default:
176 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
177 }
178 tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
179 tipc_printf(buf, "SESS(%u):",msg_session(msg));
180 break;
181 case CHANGEOVER_PROTOCOL:
182 tipc_printf(buf, "TUNL:");
183 switch (msg_type(msg)) {
184 case DUPLICATE_MSG:
185 tipc_printf(buf, "DUPL:");
186 break;
187 case ORIGINAL_MSG:
188 tipc_printf(buf, "ORIG:");
189 tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
190 break;
191 default:
192 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
193 }
194 break;
195 case ROUTE_DISTRIBUTOR:
196 tipc_printf(buf, "ROUTING_MNG:");
197 switch (msg_type(msg)) {
198 case EXT_ROUTING_TABLE:
199 tipc_printf(buf, "EXT_TBL:");
200 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
201 break;
202 case LOCAL_ROUTING_TABLE:
203 tipc_printf(buf, "LOCAL_TBL:");
204 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
205 break;
206 case SLAVE_ROUTING_TABLE:
207 tipc_printf(buf, "DP_TBL:");
208 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
209 break;
210 case ROUTE_ADDITION:
211 tipc_printf(buf, "ADD:");
212 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
213 break;
214 case ROUTE_REMOVAL:
215 tipc_printf(buf, "REMOVE:");
216 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
217 break;
218 default:
219 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
220 }
221 break;
222 case LINK_CONFIG:
223 tipc_printf(buf, "CFG:");
224 switch (msg_type(msg)) {
225 case DSC_REQ_MSG:
226 tipc_printf(buf, "DSC_REQ:");
227 break;
228 case DSC_RESP_MSG:
229 tipc_printf(buf, "DSC_RESP:");
230 break;
231 default:
232 tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
233 break;
234 }
235 break;
236 default:
237 tipc_printf(buf, "UNKNOWN USER:");
238 }
239
240 switch (usr) {
241 case CONN_MANAGER:
242 case NAME_DISTRIBUTOR:
243 case DATA_LOW:
244 case DATA_MEDIUM:
245 case DATA_HIGH:
246 case DATA_CRITICAL:
247 if (msg_short(msg))
248 break; /* No error */
249 switch (msg_errcode(msg)) {
250 case TIPC_OK:
251 break;
252 case TIPC_ERR_NO_NAME:
253 tipc_printf(buf, "NO_NAME:");
254 break;
255 case TIPC_ERR_NO_PORT:
256 tipc_printf(buf, "NO_PORT:");
257 break;
258 case TIPC_ERR_NO_NODE:
259 tipc_printf(buf, "NO_PROC:");
260 break;
261 case TIPC_ERR_OVERLOAD:
262 tipc_printf(buf, "OVERLOAD:");
263 break;
264 case TIPC_CONN_SHUTDOWN:
265 tipc_printf(buf, "SHUTDOWN:");
266 break;
267 default:
268 tipc_printf(buf, "UNKNOWN ERROR(%x):",
269 msg_errcode(msg));
270 }
271 default:{}
272 }
273
274 tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
275 tipc_printf(buf, "SZ(%u):", msg_size(msg));
276 tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
277
278 if (msg_non_seq(msg))
279 tipc_printf(buf, "NOSEQ:");
280 else {
281 tipc_printf(buf, "ACK(%u):", msg_ack(msg));
282 }
283 tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
284 tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
285
286 if (msg_isdata(msg)) {
287 if (msg_named(msg)) {
288 tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
289 tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
290 }
291 }
292
293 if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
294 (usr != MSG_BUNDLER)) {
295 if (!msg_short(msg)) {
296 tipc_printf(buf, ":ORIG(%x:%u):",
297 msg_orignode(msg), msg_origport(msg));
298 tipc_printf(buf, ":DEST(%x:%u):",
299 msg_destnode(msg), msg_destport(msg));
300 } else {
301 tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
302 tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
303 }
304 if (msg_routed(msg) && !msg_non_seq(msg))
305 tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
306 }
307 if (msg_user(msg) == NAME_DISTRIBUTOR) {
308 tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
309 tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
310 if (msg_routed(msg)) {
311 tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
312 }
313 }
314
315 if (msg_user(msg) == LINK_CONFIG) {
316 u32* raw = (u32*)msg;
317 struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
318 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
319 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
320 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
321 media_addr_printf(buf, orig);
322 }
323 if (msg_user(msg) == BCAST_PROTOCOL) {
324 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
325 tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
326 }
327 tipc_printf(buf, "\n");
328 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
329 msg_print(buf,msg_get_wrapped(msg)," /");
330 }
331 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
332 msg_print(buf,msg_get_wrapped(msg)," /");
333 }
334}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644
index 000000000000..662c81862a0c
--- /dev/null
+++ b/net/tipc/msg.h
@@ -0,0 +1,818 @@
1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H
39
40#include <net/tipc/tipc_msg.h>
41
42#define TIPC_VERSION 2
43#define DATA_LOW TIPC_LOW_IMPORTANCE
44#define DATA_MEDIUM TIPC_MEDIUM_IMPORTANCE
45#define DATA_HIGH TIPC_HIGH_IMPORTANCE
46#define DATA_CRITICAL TIPC_CRITICAL_IMPORTANCE
47#define SHORT_H_SIZE 24 /* Connected,in cluster */
48#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
49#define CONN_MSG_H_SIZE 36 /* Routed connected msgs*/
50#define LONG_H_SIZE 40 /* Named Messages */
51#define MCAST_H_SIZE 44 /* Multicast messages */
52#define MAX_H_SIZE 60 /* Inclusive full options */
53#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
54#define LINK_CONFIG 13
55
56
57/*
58 TIPC user data message header format, version 2
59
60 - Fundamental definitions available to privileged TIPC users
61 are located in tipc_msg.h.
62 - Remaining definitions available to TIPC internal users appear below.
63*/
64
65
66static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
67{
68 m->hdr[w] = htonl(val);
69}
70
71static inline void msg_set_bits(struct tipc_msg *m, u32 w,
72 u32 pos, u32 mask, u32 val)
73{
74 u32 word = msg_word(m,w) & ~(mask << pos);
75 msg_set_word(m, w, (word |= (val << pos)));
76}
77
78/*
79 * Word 0
80 */
81
82static inline u32 msg_version(struct tipc_msg *m)
83{
84 return msg_bits(m, 0, 29, 7);
85}
86
87static inline void msg_set_version(struct tipc_msg *m)
88{
89 msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
90}
91
92static inline u32 msg_user(struct tipc_msg *m)
93{
94 return msg_bits(m, 0, 25, 0xf);
95}
96
97static inline u32 msg_isdata(struct tipc_msg *m)
98{
99 return (msg_user(m) <= DATA_CRITICAL);
100}
101
102static inline void msg_set_user(struct tipc_msg *m, u32 n)
103{
104 msg_set_bits(m, 0, 25, 0xf, n);
105}
106
107static inline void msg_set_importance(struct tipc_msg *m, u32 i)
108{
109 msg_set_user(m, i);
110}
111
112static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
113{
114 msg_set_bits(m, 0, 21, 0xf, n>>2);
115}
116
117static inline int msg_non_seq(struct tipc_msg *m)
118{
119 return msg_bits(m, 0, 20, 1);
120}
121
122static inline void msg_set_non_seq(struct tipc_msg *m)
123{
124 msg_set_bits(m, 0, 20, 1, 1);
125}
126
127static inline int msg_dest_droppable(struct tipc_msg *m)
128{
129 return msg_bits(m, 0, 19, 1);
130}
131
132static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
133{
134 msg_set_bits(m, 0, 19, 1, d);
135}
136
137static inline int msg_src_droppable(struct tipc_msg *m)
138{
139 return msg_bits(m, 0, 18, 1);
140}
141
142static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
143{
144 msg_set_bits(m, 0, 18, 1, d);
145}
146
147static inline void msg_set_size(struct tipc_msg *m, u32 sz)
148{
149 m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
150}
151
152
153/*
154 * Word 1
155 */
156
157static inline void msg_set_type(struct tipc_msg *m, u32 n)
158{
159 msg_set_bits(m, 1, 29, 0x7, n);
160}
161
162static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
163{
164 msg_set_bits(m, 1, 25, 0xf, err);
165}
166
167static inline u32 msg_reroute_cnt(struct tipc_msg *m)
168{
169 return msg_bits(m, 1, 21, 0xf);
170}
171
172static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
173{
174 msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
175}
176
177static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
178{
179 msg_set_bits(m, 1, 21, 0xf, 0);
180}
181
182static inline u32 msg_lookup_scope(struct tipc_msg *m)
183{
184 return msg_bits(m, 1, 19, 0x3);
185}
186
187static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
188{
189 msg_set_bits(m, 1, 19, 0x3, n);
190}
191
192static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
193{
194 u32 hsz = msg_hdr_sz(m);
195 char *to = (char *)&m->hdr[hsz/4];
196
197 if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
198 return;
199 msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
200 msg_set_hdr_sz(m, hsz + sz);
201 memcpy(to, opt, sz);
202}
203
204static inline u32 msg_bcast_ack(struct tipc_msg *m)
205{
206 return msg_bits(m, 1, 0, 0xffff);
207}
208
209static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
210{
211 msg_set_bits(m, 1, 0, 0xffff, n);
212}
213
214
215/*
216 * Word 2
217 */
218
219static inline u32 msg_ack(struct tipc_msg *m)
220{
221 return msg_bits(m, 2, 16, 0xffff);
222}
223
224static inline void msg_set_ack(struct tipc_msg *m, u32 n)
225{
226 msg_set_bits(m, 2, 16, 0xffff, n);
227}
228
229static inline u32 msg_seqno(struct tipc_msg *m)
230{
231 return msg_bits(m, 2, 0, 0xffff);
232}
233
234static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
235{
236 msg_set_bits(m, 2, 0, 0xffff, n);
237}
238
239
240/*
241 * Words 3-10
242 */
243
244
245static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
246{
247 msg_set_word(m, 3, a);
248}
249
250static inline void msg_set_origport(struct tipc_msg *m, u32 p)
251{
252 msg_set_word(m, 4, p);
253}
254
255static inline void msg_set_destport(struct tipc_msg *m, u32 p)
256{
257 msg_set_word(m, 5, p);
258}
259
260static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
261{
262 msg_set_word(m, 5, p);
263}
264
265static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
266{
267 msg_set_word(m, 6, a);
268}
269
270static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
271{
272 msg_set_word(m, 7, a);
273}
274
275static inline int msg_is_dest(struct tipc_msg *m, u32 d)
276{
277 return(msg_short(m) || (msg_destnode(m) == d));
278}
279
280static inline u32 msg_routed(struct tipc_msg *m)
281{
282 if (likely(msg_short(m)))
283 return 0;
284 return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
285}
286
287static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
288{
289 msg_set_word(m, 8, n);
290}
291
292static inline u32 msg_transp_seqno(struct tipc_msg *m)
293{
294 return msg_word(m, 8);
295}
296
297static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
298{
299 msg_set_word(m, 8, n);
300}
301
302static inline u32 msg_timestamp(struct tipc_msg *m)
303{
304 return msg_word(m, 8);
305}
306
307static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
308{
309 msg_set_word(m, 8, n);
310}
311
312static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
313{
314 msg_set_word(m, 9, n);
315}
316
317static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
318{
319 msg_set_namelower(m, n);
320}
321
322static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
323{
324 msg_set_word(m, 10, n);
325}
326
327static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
328{
329 return (struct tipc_msg *)msg_data(m);
330}
331
332static inline void msg_expand(struct tipc_msg *m, u32 destnode)
333{
334 if (!msg_short(m))
335 return;
336 msg_set_hdr_sz(m, LONG_H_SIZE);
337 msg_set_orignode(m, msg_prevnode(m));
338 msg_set_destnode(m, destnode);
339 memset(&m->hdr[8], 0, 12);
340}
341
342
343
344/*
345 TIPC internal message header format, version 2
346
347 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
348 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
349 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
351 w1:|m typ|rsv=0| sequence gap | broadcast ack no |
352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
353 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
354 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
355 w3:| previous node |
356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
357 w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
359 w5:| session no |rsv=0|r|berid|link prio|netpl|p|
360 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
361 w6:| originating node |
362 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
363 w7:| destination node |
364 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
365 w8:| transport sequence number |
366 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
367 w9:| msg count / bcast tag | link tolerance |
368 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
369 \ \
370 / User Specific Data /
371 \ \
372 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
373
374 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
375*/
376
377/*
378 * Internal users
379 */
380
381#define BCAST_PROTOCOL 5
382#define MSG_BUNDLER 6
383#define LINK_PROTOCOL 7
384#define CONN_MANAGER 8
385#define ROUTE_DISTRIBUTOR 9
386#define CHANGEOVER_PROTOCOL 10
387#define NAME_DISTRIBUTOR 11
388#define MSG_FRAGMENTER 12
389#define LINK_CONFIG 13
390#define INT_H_SIZE 40
391#define DSC_H_SIZE 40
392
393/*
394 * Connection management protocol messages
395 */
396
397#define CONN_PROBE 0
398#define CONN_PROBE_REPLY 1
399#define CONN_ACK 2
400
401/*
402 * Name distributor messages
403 */
404
405#define PUBLICATION 0
406#define WITHDRAWAL 1
407
408
409/*
410 * Word 1
411 */
412
413static inline u32 msg_seq_gap(struct tipc_msg *m)
414{
415 return msg_bits(m, 1, 16, 0xff);
416}
417
418static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
419{
420 msg_set_bits(m, 1, 16, 0xff, n);
421}
422
423static inline u32 msg_req_links(struct tipc_msg *m)
424{
425 return msg_bits(m, 1, 16, 0xfff);
426}
427
428static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
429{
430 msg_set_bits(m, 1, 16, 0xfff, n);
431}
432
433
434/*
435 * Word 2
436 */
437
438static inline u32 msg_dest_domain(struct tipc_msg *m)
439{
440 return msg_word(m, 2);
441}
442
443static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
444{
445 msg_set_word(m, 2, n);
446}
447
448static inline u32 msg_bcgap_after(struct tipc_msg *m)
449{
450 return msg_bits(m, 2, 16, 0xffff);
451}
452
453static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
454{
455 msg_set_bits(m, 2, 16, 0xffff, n);
456}
457
458static inline u32 msg_bcgap_to(struct tipc_msg *m)
459{
460 return msg_bits(m, 2, 0, 0xffff);
461}
462
463static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
464{
465 msg_set_bits(m, 2, 0, 0xffff, n);
466}
467
468
469/*
470 * Word 4
471 */
472
473static inline u32 msg_last_bcast(struct tipc_msg *m)
474{
475 return msg_bits(m, 4, 16, 0xffff);
476}
477
478static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
479{
480 msg_set_bits(m, 4, 16, 0xffff, n);
481}
482
483
484static inline u32 msg_fragm_no(struct tipc_msg *m)
485{
486 return msg_bits(m, 4, 16, 0xffff);
487}
488
489static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
490{
491 msg_set_bits(m, 4, 16, 0xffff, n);
492}
493
494
495static inline u32 msg_next_sent(struct tipc_msg *m)
496{
497 return msg_bits(m, 4, 0, 0xffff);
498}
499
500static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
501{
502 msg_set_bits(m, 4, 0, 0xffff, n);
503}
504
505
506static inline u32 msg_long_msgno(struct tipc_msg *m)
507{
508 return msg_bits(m, 4, 0, 0xffff);
509}
510
511static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
512{
513 msg_set_bits(m, 4, 0, 0xffff, n);
514}
515
516static inline u32 msg_bc_netid(struct tipc_msg *m)
517{
518 return msg_word(m, 4);
519}
520
521static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
522{
523 msg_set_word(m, 4, id);
524}
525
526static inline u32 msg_link_selector(struct tipc_msg *m)
527{
528 return msg_bits(m, 4, 0, 1);
529}
530
531static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
532{
533 msg_set_bits(m, 4, 0, 1, (n & 1));
534}
535
536/*
537 * Word 5
538 */
539
540static inline u32 msg_session(struct tipc_msg *m)
541{
542 return msg_bits(m, 5, 16, 0xffff);
543}
544
545static inline void msg_set_session(struct tipc_msg *m, u32 n)
546{
547 msg_set_bits(m, 5, 16, 0xffff, n);
548}
549
550static inline u32 msg_probe(struct tipc_msg *m)
551{
552 return msg_bits(m, 5, 0, 1);
553}
554
555static inline void msg_set_probe(struct tipc_msg *m, u32 val)
556{
557 msg_set_bits(m, 5, 0, 1, (val & 1));
558}
559
560static inline char msg_net_plane(struct tipc_msg *m)
561{
562 return msg_bits(m, 5, 1, 7) + 'A';
563}
564
565static inline void msg_set_net_plane(struct tipc_msg *m, char n)
566{
567 msg_set_bits(m, 5, 1, 7, (n - 'A'));
568}
569
570static inline u32 msg_linkprio(struct tipc_msg *m)
571{
572 return msg_bits(m, 5, 4, 0x1f);
573}
574
575static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
576{
577 msg_set_bits(m, 5, 4, 0x1f, n);
578}
579
580static inline u32 msg_bearer_id(struct tipc_msg *m)
581{
582 return msg_bits(m, 5, 9, 0x7);
583}
584
585static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
586{
587 msg_set_bits(m, 5, 9, 0x7, n);
588}
589
590static inline u32 msg_redundant_link(struct tipc_msg *m)
591{
592 return msg_bits(m, 5, 12, 0x1);
593}
594
595static inline void msg_set_redundant_link(struct tipc_msg *m)
596{
597 msg_set_bits(m, 5, 12, 0x1, 1);
598}
599
600static inline void msg_clear_redundant_link(struct tipc_msg *m)
601{
602 msg_set_bits(m, 5, 12, 0x1, 0);
603}
604
605
606/*
607 * Word 9
608 */
609
610static inline u32 msg_msgcnt(struct tipc_msg *m)
611{
612 return msg_bits(m, 9, 16, 0xffff);
613}
614
615static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
616{
617 msg_set_bits(m, 9, 16, 0xffff, n);
618}
619
620static inline u32 msg_bcast_tag(struct tipc_msg *m)
621{
622 return msg_bits(m, 9, 16, 0xffff);
623}
624
625static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
626{
627 msg_set_bits(m, 9, 16, 0xffff, n);
628}
629
630static inline u32 msg_max_pkt(struct tipc_msg *m)
631{
632 return (msg_bits(m, 9, 16, 0xffff) * 4);
633}
634
635static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
636{
637 msg_set_bits(m, 9, 16, 0xffff, (n / 4));
638}
639
640static inline u32 msg_link_tolerance(struct tipc_msg *m)
641{
642 return msg_bits(m, 9, 0, 0xffff);
643}
644
645static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
646{
647 msg_set_bits(m, 9, 0, 0xffff, n);
648}
649
650/*
651 * Routing table message data
652 */
653
654
655static inline u32 msg_remote_node(struct tipc_msg *m)
656{
657 return msg_word(m, msg_hdr_sz(m)/4);
658}
659
660static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
661{
662 msg_set_word(m, msg_hdr_sz(m)/4, a);
663}
664
665static inline int msg_dataoctet(struct tipc_msg *m, u32 pos)
666{
667 return(msg_data(m)[pos + 4] != 0);
668}
669
670static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
671{
672 msg_data(m)[pos + 4] = 1;
673}
674
675/*
676 * Segmentation message types
677 */
678
679#define FIRST_FRAGMENT 0
680#define FRAGMENT 1
681#define LAST_FRAGMENT 2
682
683/*
684 * Link management protocol message types
685 */
686
687#define STATE_MSG 0
688#define RESET_MSG 1
689#define ACTIVATE_MSG 2
690
691/*
692 * Changeover tunnel message types
693 */
694#define DUPLICATE_MSG 0
695#define ORIGINAL_MSG 1
696
697/*
698 * Routing table message types
699 */
700#define EXT_ROUTING_TABLE 0
701#define LOCAL_ROUTING_TABLE 1
702#define SLAVE_ROUTING_TABLE 2
703#define ROUTE_ADDITION 3
704#define ROUTE_REMOVAL 4
705
706/*
707 * Config protocol message types
708 */
709
710#define DSC_REQ_MSG 0
711#define DSC_RESP_MSG 1
712
713static inline u32 msg_tot_importance(struct tipc_msg *m)
714{
715 if (likely(msg_isdata(m))) {
716 if (likely(msg_orignode(m) == tipc_own_addr))
717 return msg_importance(m);
718 return msg_importance(m) + 4;
719 }
720 if ((msg_user(m) == MSG_FRAGMENTER) &&
721 (msg_type(m) == FIRST_FRAGMENT))
722 return msg_importance(msg_get_wrapped(m));
723 return msg_importance(m);
724}
725
726
727static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
728 u32 err, u32 hsize, u32 destnode)
729{
730 memset(m, 0, hsize);
731 msg_set_version(m);
732 msg_set_user(m, user);
733 msg_set_hdr_sz(m, hsize);
734 msg_set_size(m, hsize);
735 msg_set_prevnode(m, tipc_own_addr);
736 msg_set_type(m, type);
737 msg_set_errcode(m, err);
738 if (!msg_short(m)) {
739 msg_set_orignode(m, tipc_own_addr);
740 msg_set_destnode(m, destnode);
741 }
742}
743
744/**
745 * msg_calc_data_size - determine total data size for message
746 */
747
748static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
749{
750 int dsz = 0;
751 int i;
752
753 for (i = 0; i < num_sect; i++)
754 dsz += msg_sect[i].iov_len;
755 return dsz;
756}
757
758/**
759 * msg_build - create message using specified header and data
760 *
761 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
762 *
763 * Returns message data size or errno
764 */
765
766static inline int msg_build(struct tipc_msg *hdr,
767 struct iovec const *msg_sect, u32 num_sect,
768 int max_size, int usrmem, struct sk_buff** buf)
769{
770 int dsz, sz, hsz, pos, res, cnt;
771
772 dsz = msg_calc_data_size(msg_sect, num_sect);
773 if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
774 *buf = NULL;
775 return -EINVAL;
776 }
777
778 pos = hsz = msg_hdr_sz(hdr);
779 sz = hsz + dsz;
780 msg_set_size(hdr, sz);
781 if (unlikely(sz > max_size)) {
782 *buf = NULL;
783 return dsz;
784 }
785
786 *buf = buf_acquire(sz);
787 if (!(*buf))
788 return -ENOMEM;
789 memcpy((*buf)->data, (unchar *)hdr, hsz);
790 for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
791 if (likely(usrmem))
792 res = !copy_from_user((*buf)->data + pos,
793 msg_sect[cnt].iov_base,
794 msg_sect[cnt].iov_len);
795 else
796 memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
797 msg_sect[cnt].iov_len);
798 pos += msg_sect[cnt].iov_len;
799 }
800 if (likely(res))
801 return dsz;
802
803 buf_discard(*buf);
804 *buf = NULL;
805 return -EFAULT;
806}
807
808
809struct tipc_media_addr;
810
811extern void msg_set_media_addr(struct tipc_msg *m,
812 struct tipc_media_addr *a);
813
814extern void msg_get_media_addr(struct tipc_msg *m,
815 struct tipc_media_addr *a);
816
817
818#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644
index 000000000000..41cbaf1a4a73
--- /dev/null
+++ b/net/tipc/name_distr.c
@@ -0,0 +1,309 @@
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "cluster.h"
39#include "dbg.h"
40#include "link.h"
41#include "msg.h"
42#include "name_distr.h"
43
44#undef DBG_OUTPUT
45#define DBG_OUTPUT NULL
46
47#define ITEM_SIZE sizeof(struct distr_item)
48
49/**
50 * struct distr_item - publication info distributed to other nodes
51 * @type: name sequence type
52 * @lower: name sequence lower bound
53 * @upper: name sequence upper bound
54 * @ref: publishing port reference
55 * @key: publication key
56 *
57 * ===> All fields are stored in network byte order. <===
58 *
59 * First 3 fields identify (name or) name sequence being published.
60 * Reference field uniquely identifies port that published name sequence.
61 * Key field uniquely identifies publication, in the event a port has
62 * multiple publications of the same name sequence.
63 *
64 * Note: There is no field that identifies the publishing node because it is
65 * the same for all items contained within a publication message.
66 */
67
68struct distr_item {
69 u32 type;
70 u32 lower;
71 u32 upper;
72 u32 ref;
73 u32 key;
74};
75
76/**
77 * List of externally visible publications by this node --
78 * that is, all publications having scope > TIPC_NODE_SCOPE.
79 */
80
81static LIST_HEAD(publ_root);
82static u32 publ_cnt = 0;
83
84/**
85 * publ_to_item - add publication info to a publication message
86 */
87
88static void publ_to_item(struct distr_item *i, struct publication *p)
89{
90 i->type = htonl(p->type);
91 i->lower = htonl(p->lower);
92 i->upper = htonl(p->upper);
93 i->ref = htonl(p->ref);
94 i->key = htonl(p->key);
95 dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
96}
97
98/**
99 * named_prepare_buf - allocate & initialize a publication message
100 */
101
102static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
103{
104 struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
105 struct tipc_msg *msg;
106
107 if (buf != NULL) {
108 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size);
112 }
113 return buf;
114}
115
116/**
117 * named_publish - tell other nodes about a new publication by this node
118 */
119
120void named_publish(struct publication *publ)
121{
122 struct sk_buff *buf;
123 struct distr_item *item;
124
125 list_add(&publ->local_list, &publ_root);
126 publ_cnt++;
127
128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
129 if (!buf) {
130 warn("Memory squeeze; failed to distribute publication\n");
131 return;
132 }
133
134 item = (struct distr_item *)msg_data(buf_msg(buf));
135 publ_to_item(item, publ);
136 dbg("named_withdraw: broadcasting publish msg\n");
137 cluster_broadcast(buf);
138}
139
140/**
141 * named_withdraw - tell other nodes about a withdrawn publication by this node
142 */
143
144void named_withdraw(struct publication *publ)
145{
146 struct sk_buff *buf;
147 struct distr_item *item;
148
149 list_del(&publ->local_list);
150 publ_cnt--;
151
152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
153 if (!buf) {
154 warn("Memory squeeze; failed to distribute withdrawal\n");
155 return;
156 }
157
158 item = (struct distr_item *)msg_data(buf_msg(buf));
159 publ_to_item(item, publ);
160 dbg("named_withdraw: broadcasting withdraw msg\n");
161 cluster_broadcast(buf);
162}
163
164/**
165 * named_node_up - tell specified node about all publications by this node
166 */
167
168void named_node_up(unsigned long node)
169{
170 struct publication *publ;
171 struct distr_item *item = 0;
172 struct sk_buff *buf = 0;
173 u32 left = 0;
174 u32 rest;
175 u32 max_item_buf;
176
177 assert(in_own_cluster(node));
178 read_lock_bh(&nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE;
181 rest = publ_cnt * ITEM_SIZE;
182
183 list_for_each_entry(publ, &publ_root, local_list) {
184 if (!buf) {
185 left = (rest <= max_item_buf) ? rest : max_item_buf;
186 rest -= left;
187 buf = named_prepare_buf(PUBLICATION, left, node);
188 if (buf == NULL) {
189 warn("Memory Squeeze; could not send publication\n");
190 goto exit;
191 }
192 item = (struct distr_item *)msg_data(buf_msg(buf));
193 }
194 publ_to_item(item, publ);
195 item++;
196 left -= ITEM_SIZE;
197 if (!left) {
198 msg_set_link_selector(buf_msg(buf), node);
199 dbg("named_node_up: sending publish msg to "
200 "<%u.%u.%u>\n", tipc_zone(node),
201 tipc_cluster(node), tipc_node(node));
202 link_send(buf, node, node);
203 buf = 0;
204 }
205 }
206exit:
207 read_unlock_bh(&nametbl_lock);
208}
209
210/**
211 * node_is_down - remove publication associated with a failed node
212 *
213 * Invoked for each publication issued by a newly failed node.
214 * Removes publication structure from name table & deletes it.
215 * In rare cases the link may have come back up again when this
216 * function is called, and we have two items representing the same
217 * publication. Nudge this item's key to distinguish it from the other.
218 * (Note: Publication's node subscription is already unsubscribed.)
219 */
220
221static void node_is_down(struct publication *publ)
222{
223 struct publication *p;
224 write_lock_bh(&nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345;
228 p = nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key);
230 assert(p == publ);
231 write_unlock_bh(&nametbl_lock);
232 if (publ)
233 kfree(publ);
234}
235
236/**
237 * named_recv - process name table update message sent by another node
238 */
239
240void named_recv(struct sk_buff *buf)
241{
242 struct publication *publ;
243 struct tipc_msg *msg = buf_msg(buf);
244 struct distr_item *item = (struct distr_item *)msg_data(msg);
245 u32 count = msg_data_sz(msg) / ITEM_SIZE;
246
247 write_lock_bh(&nametbl_lock);
248 while (count--) {
249 if (msg_type(msg) == PUBLICATION) {
250 dbg("named_recv: got publication for %u, %u, %u\n",
251 ntohl(item->type), ntohl(item->lower),
252 ntohl(item->upper));
253 publ = nametbl_insert_publ(ntohl(item->type),
254 ntohl(item->lower),
255 ntohl(item->upper),
256 TIPC_CLUSTER_SCOPE,
257 msg_orignode(msg),
258 ntohl(item->ref),
259 ntohl(item->key));
260 if (publ) {
261 nodesub_subscribe(&publ->subscr,
262 msg_orignode(msg),
263 publ,
264 (net_ev_handler)node_is_down);
265 }
266 } else if (msg_type(msg) == WITHDRAWAL) {
267 dbg("named_recv: got withdrawl for %u, %u, %u\n",
268 ntohl(item->type), ntohl(item->lower),
269 ntohl(item->upper));
270 publ = nametbl_remove_publ(ntohl(item->type),
271 ntohl(item->lower),
272 msg_orignode(msg),
273 ntohl(item->ref),
274 ntohl(item->key));
275
276 if (publ) {
277 nodesub_unsubscribe(&publ->subscr);
278 kfree(publ);
279 }
280 } else {
281 warn("named_recv: unknown msg\n");
282 }
283 item++;
284 }
285 write_unlock_bh(&nametbl_lock);
286 buf_discard(buf);
287}
288
289/**
290 * named_reinit - re-initialize local publication list
291 *
292 * This routine is called whenever TIPC networking is (re)enabled.
293 * All existing publications by this node that have "cluster" or "zone" scope
294 * are updated to reflect the node's current network address.
295 * (If the node's address is unchanged, the update loop terminates immediately.)
296 */
297
298void named_reinit(void)
299{
300 struct publication *publ;
301
302 write_lock_bh(&nametbl_lock);
303 list_for_each_entry(publ, &publ_root, local_list) {
304 if (publ->node == tipc_own_addr)
305 break;
306 publ->node = tipc_own_addr;
307 }
308 write_unlock_bh(&nametbl_lock);
309}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644
index 000000000000..a04bdeac84ea
--- /dev/null
+++ b/net/tipc/name_distr.h
@@ -0,0 +1,48 @@
1/*
2 * net/tipc/name_distr.h: Include file for TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NAME_DISTR_H
38#define _TIPC_NAME_DISTR_H
39
40#include "name_table.h"
41
42void named_publish(struct publication *publ);
43void named_withdraw(struct publication *publ);
44void named_node_up(unsigned long node);
45void named_recv(struct sk_buff *buf);
46void named_reinit(void);
47
48#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644
index 000000000000..972c83eb83b4
--- /dev/null
+++ b/net/tipc/name_table.c
@@ -0,0 +1,1079 @@
1/*
2 * net/tipc/name_table.c: TIPC name table code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "name_table.h"
41#include "name_distr.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "subscr.h"
45#include "port.h"
46#include "cluster.h"
47#include "bcast.h"
48
49int tipc_nametbl_size = 1024; /* must be a power of 2 */
50
51/**
52 * struct sub_seq - container for all published instances of a name sequence
53 * @lower: name sequence lower bound
54 * @upper: name sequence upper bound
55 * @node_list: circular list of matching publications with >= node scope
56 * @cluster_list: circular list of matching publications with >= cluster scope
57 * @zone_list: circular list of matching publications with >= zone scope
58 */
59
60struct sub_seq {
61 u32 lower;
62 u32 upper;
63 struct publication *node_list;
64 struct publication *cluster_list;
65 struct publication *zone_list;
66};
67
68/**
69 * struct name_seq - container for all published instances of a name type
70 * @type: 32 bit 'type' value for name sequence
71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
72 * sub-sequences are sorted in ascending order
73 * @alloc: number of sub-sequences currently in array
74 * @first_free: upper bound of highest sub-sequence + 1
75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure
78 */
79
80struct name_seq {
81 u32 type;
82 struct sub_seq *sseqs;
83 u32 alloc;
84 u32 first_free;
85 struct hlist_node ns_list;
86 struct list_head subscriptions;
87 spinlock_t lock;
88};
89
90/**
91 * struct name_table - table containing all existing port name publications
92 * @types: pointer to fixed-sized array of name sequence lists,
93 * accessed via hashing on 'type'; name sequence lists are *not* sorted
94 * @local_publ_count: number of publications issued by this node
95 */
96
97struct name_table {
98 struct hlist_head *types;
99 u32 local_publ_count;
100};
101
102struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t nametbl_lock = RW_LOCK_UNLOCKED;
105
106
107static inline int hash(int x)
108{
109 return(x & (tipc_nametbl_size - 1));
110}
111
112/**
113 * publ_create - create a publication structure
114 */
115
116static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref,
118 u32 key)
119{
120 struct publication *publ =
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) {
123 warn("Memory squeeze; failed to create publication\n");
124 return 0;
125 }
126
127 memset(publ, 0, sizeof(*publ));
128 publ->type = type;
129 publ->lower = lower;
130 publ->upper = upper;
131 publ->scope = scope;
132 publ->node = node;
133 publ->ref = port_ref;
134 publ->key = key;
135 INIT_LIST_HEAD(&publ->local_list);
136 INIT_LIST_HEAD(&publ->pport_list);
137 INIT_LIST_HEAD(&publ->subscr.nodesub_list);
138 return publ;
139}
140
141/**
142 * subseq_alloc - allocate a specified number of sub-sequence structures
143 */
144
145struct sub_seq *subseq_alloc(u32 cnt)
146{
147 u32 sz = cnt * sizeof(struct sub_seq);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
149
150 if (sseq)
151 memset(sseq, 0, sz);
152 return sseq;
153}
154
155/**
156 * nameseq_create - create a name sequence structure for the specified 'type'
157 *
158 * Allocates a single sub-sequence structure and sets it to all 0's.
159 */
160
161struct name_seq *nameseq_create(u32 type, struct hlist_head *seq_head)
162{
163 struct name_seq *nseq =
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = subseq_alloc(1);
166
167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n");
169 kfree(nseq);
170 kfree(sseq);
171 return 0;
172 }
173
174 memset(nseq, 0, sizeof(*nseq));
175 nseq->lock = SPIN_LOCK_UNLOCKED;
176 nseq->type = type;
177 nseq->sseqs = sseq;
178 dbg("nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list);
182 INIT_LIST_HEAD(&nseq->subscriptions);
183 hlist_add_head(&nseq->ns_list, seq_head);
184 return nseq;
185}
186
187/**
188 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
189 *
190 * Very time-critical, so binary searches through sub-sequence array.
191 */
192
193static inline struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
194 u32 instance)
195{
196 struct sub_seq *sseqs = nseq->sseqs;
197 int low = 0;
198 int high = nseq->first_free - 1;
199 int mid;
200
201 while (low <= high) {
202 mid = (low + high) / 2;
203 if (instance < sseqs[mid].lower)
204 high = mid - 1;
205 else if (instance > sseqs[mid].upper)
206 low = mid + 1;
207 else
208 return &sseqs[mid];
209 }
210 return 0;
211}
212
213/**
214 * nameseq_locate_subseq - determine position of name instance in sub-sequence
215 *
216 * Returns index in sub-sequence array of the entry that contains the specified
217 * instance value; if no entry contains that value, returns the position
218 * where a new entry for it would be inserted in the array.
219 *
220 * Note: Similar to binary search code for locating a sub-sequence.
221 */
222
223static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
224{
225 struct sub_seq *sseqs = nseq->sseqs;
226 int low = 0;
227 int high = nseq->first_free - 1;
228 int mid;
229
230 while (low <= high) {
231 mid = (low + high) / 2;
232 if (instance < sseqs[mid].lower)
233 high = mid - 1;
234 else if (instance > sseqs[mid].upper)
235 low = mid + 1;
236 else
237 return mid;
238 }
239 return low;
240}
241
242/**
243 * nameseq_insert_publ -
244 */
245
246struct publication *nameseq_insert_publ(struct name_seq *nseq,
247 u32 type, u32 lower, u32 upper,
248 u32 scope, u32 node, u32 port, u32 key)
249{
250 struct subscription *s;
251 struct subscription *st;
252 struct publication *publ;
253 struct sub_seq *sseq;
254 int created_subseq = 0;
255
256 assert(nseq->first_free <= nseq->alloc);
257 sseq = nameseq_find_subseq(nseq, lower);
258 dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
259 nseq, type, lower, sseq);
260 if (sseq) {
261
262 /* Lower end overlaps existing entry => need an exact match */
263
264 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
266 return 0;
267 }
268 } else {
269 u32 inspos;
270 struct sub_seq *freesseq;
271
272 /* Find where lower end should be inserted */
273
274 inspos = nameseq_locate_subseq(nseq, lower);
275
276 /* Fail if upper end overlaps into an existing entry */
277
278 if ((inspos < nseq->first_free) &&
279 (upper >= nseq->sseqs[inspos].lower)) {
280 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
281 return 0;
282 }
283
284 /* Ensure there is space for new sub-sequence */
285
286 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs;
288 nseq->sseqs = subseq_alloc(nseq->alloc * 2);
289 if (nseq->sseqs != NULL) {
290 memcpy(nseq->sseqs, sseqs,
291 nseq->alloc * sizeof (struct sub_seq));
292 kfree(sseqs);
293 dbg("Allocated %u sseqs\n", nseq->alloc);
294 nseq->alloc *= 2;
295 } else {
296 warn("Memory squeeze; failed to create sub-sequence\n");
297 return 0;
298 }
299 }
300 dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
301
302 /* Insert new sub-sequence */
303
304 dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
305 sseq = &nseq->sseqs[inspos];
306 freesseq = &nseq->sseqs[nseq->first_free];
307 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
308 memset(sseq, 0, sizeof (*sseq));
309 nseq->first_free++;
310 sseq->lower = lower;
311 sseq->upper = upper;
312 created_subseq = 1;
313 }
314 dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
315 type, lower, upper, node, port, sseq,
316 sseq->lower, sseq->upper, nseq);
317
318 /* Insert a publication: */
319
320 publ = publ_create(type, lower, upper, scope, node, port, key);
321 if (!publ)
322 return 0;
323 dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
324 publ, node, publ->node, publ->subscr.node);
325
326 if (!sseq->zone_list)
327 sseq->zone_list = publ->zone_list_next = publ;
328 else {
329 publ->zone_list_next = sseq->zone_list->zone_list_next;
330 sseq->zone_list->zone_list_next = publ;
331 }
332
333 if (in_own_cluster(node)) {
334 if (!sseq->cluster_list)
335 sseq->cluster_list = publ->cluster_list_next = publ;
336 else {
337 publ->cluster_list_next =
338 sseq->cluster_list->cluster_list_next;
339 sseq->cluster_list->cluster_list_next = publ;
340 }
341 }
342
343 if (node == tipc_own_addr) {
344 if (!sseq->node_list)
345 sseq->node_list = publ->node_list_next = publ;
346 else {
347 publ->node_list_next = sseq->node_list->node_list_next;
348 sseq->node_list->node_list_next = publ;
349 }
350 }
351
352 /*
353 * Any subscriptions waiting for notification?
354 */
355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
356 dbg("calling report_overlap()\n");
357 subscr_report_overlap(s,
358 publ->lower,
359 publ->upper,
360 TIPC_PUBLISHED,
361 publ->ref,
362 publ->node,
363 created_subseq);
364 }
365 return publ;
366}
367
368/**
369 * nameseq_remove_publ -
370 */
371
372struct publication *nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key)
374{
375 struct publication *publ;
376 struct publication *prev;
377 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
378 struct sub_seq *free;
379 struct subscription *s, *st;
380 int removed_subseq = 0;
381
382 assert(nseq);
383
384 if (!sseq) {
385 int i;
386
387 warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
388 assert(nseq->sseqs);
389 dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
390 nseq->sseqs, nseq, nseq->alloc,
391 nseq->first_free);
392 for (i = 0; i < nseq->first_free; i++) {
393 dbg("Subseq %u(%x): lower = %u,upper = %u\n",
394 i, &nseq->sseqs[i], nseq->sseqs[i].lower,
395 nseq->sseqs[i].upper);
396 }
397 return 0;
398 }
399 dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
400 nseq, sseq, nseq->type, inst, key);
401
402 prev = sseq->zone_list;
403 publ = sseq->zone_list->zone_list_next;
404 while ((publ->key != key) || (publ->ref != ref) ||
405 (publ->node && (publ->node != node))) {
406 prev = publ;
407 publ = publ->zone_list_next;
408 assert(prev != sseq->zone_list);
409 }
410 if (publ != sseq->zone_list)
411 prev->zone_list_next = publ->zone_list_next;
412 else if (publ->zone_list_next != publ) {
413 prev->zone_list_next = publ->zone_list_next;
414 sseq->zone_list = publ->zone_list_next;
415 } else {
416 sseq->zone_list = 0;
417 }
418
419 if (in_own_cluster(node)) {
420 prev = sseq->cluster_list;
421 publ = sseq->cluster_list->cluster_list_next;
422 while ((publ->key != key) || (publ->ref != ref) ||
423 (publ->node && (publ->node != node))) {
424 prev = publ;
425 publ = publ->cluster_list_next;
426 assert(prev != sseq->cluster_list);
427 }
428 if (publ != sseq->cluster_list)
429 prev->cluster_list_next = publ->cluster_list_next;
430 else if (publ->cluster_list_next != publ) {
431 prev->cluster_list_next = publ->cluster_list_next;
432 sseq->cluster_list = publ->cluster_list_next;
433 } else {
434 sseq->cluster_list = 0;
435 }
436 }
437
438 if (node == tipc_own_addr) {
439 prev = sseq->node_list;
440 publ = sseq->node_list->node_list_next;
441 while ((publ->key != key) || (publ->ref != ref) ||
442 (publ->node && (publ->node != node))) {
443 prev = publ;
444 publ = publ->node_list_next;
445 assert(prev != sseq->node_list);
446 }
447 if (publ != sseq->node_list)
448 prev->node_list_next = publ->node_list_next;
449 else if (publ->node_list_next != publ) {
450 prev->node_list_next = publ->node_list_next;
451 sseq->node_list = publ->node_list_next;
452 } else {
453 sseq->node_list = 0;
454 }
455 }
456 assert(!publ->node || (publ->node == node));
457 assert(publ->ref == ref);
458 assert(publ->key == key);
459
460 /*
461 * Contract subseq list if no more publications:
462 */
463 if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
464 free = &nseq->sseqs[nseq->first_free--];
465 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
466 removed_subseq = 1;
467 }
468
469 /*
470 * Any subscriptions waiting ?
471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 subscr_report_overlap(s,
474 publ->lower,
475 publ->upper,
476 TIPC_WITHDRAWN,
477 publ->ref,
478 publ->node,
479 removed_subseq);
480 }
481 return publ;
482}
483
484/**
485 * nameseq_subscribe: attach a subscription, and issue
486 * the prescribed number of events if there is any sub-
487 * sequence overlapping with the requested sequence
488 */
489
490void nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
491{
492 struct sub_seq *sseq = nseq->sseqs;
493
494 list_add(&s->nameseq_list, &nseq->subscriptions);
495
496 if (!sseq)
497 return;
498
499 while (sseq != &nseq->sseqs[nseq->first_free]) {
500 struct publication *zl = sseq->zone_list;
501 if (zl && subscr_overlap(s,sseq->lower,sseq->upper)) {
502 struct publication *crs = zl;
503 int must_report = 1;
504
505 do {
506 subscr_report_overlap(s,
507 sseq->lower,
508 sseq->upper,
509 TIPC_PUBLISHED,
510 crs->ref,
511 crs->node,
512 must_report);
513 must_report = 0;
514 crs = crs->zone_list_next;
515 } while (crs != zl);
516 }
517 sseq++;
518 }
519}
520
521static struct name_seq *nametbl_find_seq(u32 type)
522{
523 struct hlist_head *seq_head;
524 struct hlist_node *seq_node;
525 struct name_seq *ns;
526
527 dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
528 type, ntohl(type), type, table.types, hash(type));
529
530 seq_head = &table.types[hash(type)];
531 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
532 if (ns->type == type) {
533 dbg("found %x\n", ns);
534 return ns;
535 }
536 }
537
538 return 0;
539};
540
541struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
542 u32 scope, u32 node, u32 port, u32 key)
543{
544 struct name_seq *seq = nametbl_find_seq(type);
545
546 dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
547 if (lower > upper) {
548 warn("Failed to publish illegal <%u,%u,%u>\n",
549 type, lower, upper);
550 return 0;
551 }
552
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
554 if (!seq) {
555 seq = nameseq_create(type, &table.types[hash(type)]);
556 dbg("nametbl_insert_publ: created %x\n", seq);
557 }
558 if (!seq)
559 return 0;
560
561 assert(seq->type == type);
562 return nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key);
564}
565
566struct publication *nametbl_remove_publ(u32 type, u32 lower,
567 u32 node, u32 ref, u32 key)
568{
569 struct publication *publ;
570 struct name_seq *seq = nametbl_find_seq(type);
571
572 if (!seq)
573 return 0;
574
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
576 publ = nameseq_remove_publ(seq, lower, node, ref, key);
577
578 if (!seq->first_free && list_empty(&seq->subscriptions)) {
579 hlist_del_init(&seq->ns_list);
580 kfree(seq->sseqs);
581 kfree(seq);
582 }
583 return publ;
584}
585
586/*
587 * nametbl_translate(): Translate tipc_name -> tipc_portid.
588 * Very time-critical.
589 *
590 * Note: on entry 'destnode' is the search domain used during translation;
591 * on exit it passes back the node address of the matching port (if any)
592 */
593
594u32 nametbl_translate(u32 type, u32 instance, u32 *destnode)
595{
596 struct sub_seq *sseq;
597 struct publication *publ = 0;
598 struct name_seq *seq;
599 u32 ref;
600
601 if (!in_scope(*destnode, tipc_own_addr))
602 return 0;
603
604 read_lock_bh(&nametbl_lock);
605 seq = nametbl_find_seq(type);
606 if (unlikely(!seq))
607 goto not_found;
608 sseq = nameseq_find_subseq(seq, instance);
609 if (unlikely(!sseq))
610 goto not_found;
611 spin_lock_bh(&seq->lock);
612
613 /* Closest-First Algorithm: */
614 if (likely(!*destnode)) {
615 publ = sseq->node_list;
616 if (publ) {
617 sseq->node_list = publ->node_list_next;
618found:
619 ref = publ->ref;
620 *destnode = publ->node;
621 spin_unlock_bh(&seq->lock);
622 read_unlock_bh(&nametbl_lock);
623 return ref;
624 }
625 publ = sseq->cluster_list;
626 if (publ) {
627 sseq->cluster_list = publ->cluster_list_next;
628 goto found;
629 }
630 publ = sseq->zone_list;
631 if (publ) {
632 sseq->zone_list = publ->zone_list_next;
633 goto found;
634 }
635 }
636
637 /* Round-Robin Algorithm: */
638 else if (*destnode == tipc_own_addr) {
639 publ = sseq->node_list;
640 if (publ) {
641 sseq->node_list = publ->node_list_next;
642 goto found;
643 }
644 } else if (in_own_cluster(*destnode)) {
645 publ = sseq->cluster_list;
646 if (publ) {
647 sseq->cluster_list = publ->cluster_list_next;
648 goto found;
649 }
650 } else {
651 publ = sseq->zone_list;
652 if (publ) {
653 sseq->zone_list = publ->zone_list_next;
654 goto found;
655 }
656 }
657 spin_unlock_bh(&seq->lock);
658not_found:
659 *destnode = 0;
660 read_unlock_bh(&nametbl_lock);
661 return 0;
662}
663
664/**
665 * nametbl_mc_translate - find multicast destinations
666 *
667 * Creates list of all local ports that overlap the given multicast address;
668 * also determines if any off-node ports overlap.
669 *
670 * Note: Publications with a scope narrower than 'limit' are ignored.
671 * (i.e. local node-scope publications mustn't receive messages arriving
672 * from another node, even if the multcast link brought it here)
673 *
674 * Returns non-zero if any off-node ports overlap
675 */
676
677int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
678 struct port_list *dports)
679{
680 struct name_seq *seq;
681 struct sub_seq *sseq;
682 struct sub_seq *sseq_stop;
683 int res = 0;
684
685 read_lock_bh(&nametbl_lock);
686 seq = nametbl_find_seq(type);
687 if (!seq)
688 goto exit;
689
690 spin_lock_bh(&seq->lock);
691
692 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
693 sseq_stop = seq->sseqs + seq->first_free;
694 for (; sseq != sseq_stop; sseq++) {
695 struct publication *publ;
696
697 if (sseq->lower > upper)
698 break;
699 publ = sseq->cluster_list;
700 if (publ && (publ->scope <= limit))
701 do {
702 if (publ->node == tipc_own_addr)
703 port_list_add(dports, publ->ref);
704 else
705 res = 1;
706 publ = publ->cluster_list_next;
707 } while (publ != sseq->cluster_list);
708 }
709
710 spin_unlock_bh(&seq->lock);
711exit:
712 read_unlock_bh(&nametbl_lock);
713 return res;
714}
715
716/**
717 * nametbl_publish_rsv - publish port name using a reserved name type
718 */
719
720int nametbl_publish_rsv(u32 ref, unsigned int scope,
721 struct tipc_name_seq const *seq)
722{
723 int res;
724
725 atomic_inc(&rsv_publ_ok);
726 res = tipc_publish(ref, scope, seq);
727 atomic_dec(&rsv_publ_ok);
728 return res;
729}
730
731/**
732 * nametbl_publish - add name publication to network name tables
733 */
734
735struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
736 u32 scope, u32 port_ref, u32 key)
737{
738 struct publication *publ;
739
740 if (table.local_publ_count >= tipc_max_publications) {
741 warn("Failed publish: max %u local publication\n",
742 tipc_max_publications);
743 return 0;
744 }
745 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
746 warn("Failed to publish reserved name <%u,%u,%u>\n",
747 type, lower, upper);
748 return 0;
749 }
750
751 write_lock_bh(&nametbl_lock);
752 table.local_publ_count++;
753 publ = nametbl_insert_publ(type, lower, upper, scope,
754 tipc_own_addr, port_ref, key);
755 if (publ && (scope != TIPC_NODE_SCOPE)) {
756 named_publish(publ);
757 }
758 write_unlock_bh(&nametbl_lock);
759 return publ;
760}
761
762/**
763 * nametbl_withdraw - withdraw name publication from network name tables
764 */
765
766int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{
768 struct publication *publ;
769
770 dbg("nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
771 write_lock_bh(&nametbl_lock);
772 publ = nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) {
774 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE)
776 named_withdraw(publ);
777 write_unlock_bh(&nametbl_lock);
778 list_del_init(&publ->pport_list);
779 kfree(publ);
780 return 1;
781 }
782 write_unlock_bh(&nametbl_lock);
783 return 0;
784}
785
786/**
787 * nametbl_subscribe - add a subscription object to the name table
788 */
789
790void
791nametbl_subscribe(struct subscription *s)
792{
793 u32 type = s->seq.type;
794 struct name_seq *seq;
795
796 write_lock_bh(&nametbl_lock);
797 seq = nametbl_find_seq(type);
798 if (!seq) {
799 seq = nameseq_create(type, &table.types[hash(type)]);
800 }
801 if (seq){
802 spin_lock_bh(&seq->lock);
803 dbg("nametbl_subscribe:found %x for <%u,%u,%u>\n",
804 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type);
806 nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock);
808 }
809 write_unlock_bh(&nametbl_lock);
810}
811
812/**
813 * nametbl_unsubscribe - remove a subscription object from name table
814 */
815
816void
817nametbl_unsubscribe(struct subscription *s)
818{
819 struct name_seq *seq;
820
821 write_lock_bh(&nametbl_lock);
822 seq = nametbl_find_seq(s->seq.type);
823 if (seq != NULL){
824 spin_lock_bh(&seq->lock);
825 list_del_init(&s->nameseq_list);
826 spin_unlock_bh(&seq->lock);
827 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
828 hlist_del_init(&seq->ns_list);
829 kfree(seq->sseqs);
830 kfree(seq);
831 }
832 }
833 write_unlock_bh(&nametbl_lock);
834}
835
836
837/**
838 * subseq_list: print specified sub-sequence contents into the given buffer
839 */
840
841static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
842 u32 index)
843{
844 char portIdStr[27];
845 char *scopeStr;
846 struct publication *publ = sseq->zone_list;
847
848 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
849
850 if (depth == 2 || !publ) {
851 tipc_printf(buf, "\n");
852 return;
853 }
854
855 do {
856 sprintf (portIdStr, "<%u.%u.%u:%u>",
857 tipc_zone(publ->node), tipc_cluster(publ->node),
858 tipc_node(publ->node), publ->ref);
859 tipc_printf(buf, "%-26s ", portIdStr);
860 if (depth > 3) {
861 if (publ->node != tipc_own_addr)
862 scopeStr = "";
863 else if (publ->scope == TIPC_NODE_SCOPE)
864 scopeStr = "node";
865 else if (publ->scope == TIPC_CLUSTER_SCOPE)
866 scopeStr = "cluster";
867 else
868 scopeStr = "zone";
869 tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
870 }
871
872 publ = publ->zone_list_next;
873 if (publ == sseq->zone_list)
874 break;
875
876 tipc_printf(buf, "\n%33s", " ");
877 } while (1);
878
879 tipc_printf(buf, "\n");
880}
881
882/**
883 * nameseq_list: print specified name sequence contents into the given buffer
884 */
885
886static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
887 u32 type, u32 lowbound, u32 upbound, u32 index)
888{
889 struct sub_seq *sseq;
890 char typearea[11];
891
892 sprintf(typearea, "%-10u", seq->type);
893
894 if (depth == 1) {
895 tipc_printf(buf, "%s\n", typearea);
896 return;
897 }
898
899 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
900 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
901 tipc_printf(buf, "%s ", typearea);
902 subseq_list(sseq, buf, depth, index);
903 sprintf(typearea, "%10s", " ");
904 }
905 }
906}
907
908/**
909 * nametbl_header - print name table header into the given buffer
910 */
911
912static void nametbl_header(struct print_buf *buf, u32 depth)
913{
914 tipc_printf(buf, "Type ");
915
916 if (depth > 1)
917 tipc_printf(buf, "Lower Upper ");
918 if (depth > 2)
919 tipc_printf(buf, "Port Identity ");
920 if (depth > 3)
921 tipc_printf(buf, "Publication");
922
923 tipc_printf(buf, "\n-----------");
924
925 if (depth > 1)
926 tipc_printf(buf, "--------------------- ");
927 if (depth > 2)
928 tipc_printf(buf, "-------------------------- ");
929 if (depth > 3)
930 tipc_printf(buf, "------------------");
931
932 tipc_printf(buf, "\n");
933}
934
935/**
936 * nametbl_list - print specified name table contents into the given buffer
937 */
938
939static void nametbl_list(struct print_buf *buf, u32 depth_info,
940 u32 type, u32 lowbound, u32 upbound)
941{
942 struct hlist_head *seq_head;
943 struct hlist_node *seq_node;
944 struct name_seq *seq;
945 int all_types;
946 u32 depth;
947 u32 i;
948
949 all_types = (depth_info & TIPC_NTQ_ALLTYPES);
950 depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
951
952 if (depth == 0)
953 return;
954
955 if (all_types) {
956 /* display all entries in name table to specified depth */
957 nametbl_header(buf, depth);
958 lowbound = 0;
959 upbound = ~0;
960 for (i = 0; i < tipc_nametbl_size; i++) {
961 seq_head = &table.types[i];
962 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
963 nameseq_list(seq, buf, depth, seq->type,
964 lowbound, upbound, i);
965 }
966 }
967 } else {
968 /* display only the sequence that matches the specified type */
969 if (upbound < lowbound) {
970 tipc_printf(buf, "invalid name sequence specified\n");
971 return;
972 }
973 nametbl_header(buf, depth);
974 i = hash(type);
975 seq_head = &table.types[i];
976 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
977 if (seq->type == type) {
978 nameseq_list(seq, buf, depth, type,
979 lowbound, upbound, i);
980 break;
981 }
982 }
983 }
984}
985
986void nametbl_print(struct print_buf *buf, const char *str)
987{
988 tipc_printf(buf, str);
989 read_lock_bh(&nametbl_lock);
990 nametbl_list(buf, 0, 0, 0, 0);
991 read_unlock_bh(&nametbl_lock);
992}
993
994#define MAX_NAME_TBL_QUERY 32768
995
996struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space)
997{
998 struct sk_buff *buf;
999 struct tipc_name_table_query *argv;
1000 struct tlv_desc *rep_tlv;
1001 struct print_buf b;
1002 int str_len;
1003
1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
1005 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
1006
1007 buf = cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
1008 if (!buf)
1009 return NULL;
1010
1011 rep_tlv = (struct tlv_desc *)buf->data;
1012 printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
1014 read_lock_bh(&nametbl_lock);
1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
1016 ntohl(argv->lowbound), ntohl(argv->upbound));
1017 read_unlock_bh(&nametbl_lock);
1018 str_len = printbuf_validate(&b);
1019
1020 skb_put(buf, TLV_SPACE(str_len));
1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
1022
1023 return buf;
1024}
1025
1026void nametbl_dump(void)
1027{
1028 nametbl_list(CONS, 0, 0, 0, 0);
1029}
1030
1031int nametbl_init(void)
1032{
1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1034
1035 table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
1036 if (!table.types)
1037 return -ENOMEM;
1038
1039 write_lock_bh(&nametbl_lock);
1040 memset(table.types, 0, array_size);
1041 table.local_publ_count = 0;
1042 write_unlock_bh(&nametbl_lock);
1043 return 0;
1044}
1045
1046void nametbl_stop(void)
1047{
1048 struct hlist_head *seq_head;
1049 struct hlist_node *seq_node;
1050 struct hlist_node *tmp;
1051 struct name_seq *seq;
1052 u32 i;
1053
1054 if (!table.types)
1055 return;
1056
1057 write_lock_bh(&nametbl_lock);
1058 for (i = 0; i < tipc_nametbl_size; i++) {
1059 seq_head = &table.types[i];
1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
1061 struct sub_seq *sseq = seq->sseqs;
1062
1063 for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
1064 struct publication *publ = sseq->zone_list;
1065 assert(publ);
1066 do {
1067 struct publication *next =
1068 publ->zone_list_next;
1069 kfree(publ);
1070 publ = next;
1071 }
1072 while (publ != sseq->zone_list);
1073 }
1074 }
1075 }
1076 kfree(table.types);
1077 table.types = NULL;
1078 write_unlock_bh(&nametbl_lock);
1079}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644
index 000000000000..f82693384f60
--- /dev/null
+++ b/net/tipc/name_table.h
@@ -0,0 +1,108 @@
1/*
2 * net/tipc/name_table.h: Include file for TIPC name table code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NAME_TABLE_H
38#define _TIPC_NAME_TABLE_H
39
40#include "node_subscr.h"
41
42struct subscription;
43struct port_list;
44
45/*
46 * TIPC name types reserved for internal TIPC use (both current and planned)
47 */
48
49#define TIPC_ZM_SRV 3 /* zone master service name type */
50
51
52/**
53 * struct publication - info about a published (name or) name sequence
54 * @type: name sequence type
55 * @lower: name sequence lower bound
56 * @upper: name sequence upper bound
57 * @scope: scope of publication
58 * @node: network address of publishing port's node
59 * @ref: publishing port
60 * @key: publication key
61 * @subscr: subscription to "node down" event (for off-node publications only)
62 * @local_list: adjacent entries in list of publications made by this node
63 * @pport_list: adjacent entries in list of publications made by this port
64 * @node_list: next matching name seq publication with >= node scope
65 * @cluster_list: next matching name seq publication with >= cluster scope
66 * @zone_list: next matching name seq publication with >= zone scope
67 *
68 * Note that the node list, cluster list, and zone list are circular lists.
69 */
70
71struct publication {
72 u32 type;
73 u32 lower;
74 u32 upper;
75 u32 scope;
76 u32 node;
77 u32 ref;
78 u32 key;
79 struct node_subscr subscr;
80 struct list_head local_list;
81 struct list_head pport_list;
82 struct publication *node_list_next;
83 struct publication *cluster_list_next;
84 struct publication *zone_list_next;
85};
86
87
88extern rwlock_t nametbl_lock;
89
90struct sk_buff *nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 nametbl_translate(u32 type, u32 instance, u32 *node);
92int nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct port_list *dports);
94int nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq);
96struct publication *nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key);
98int nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
99struct publication *nametbl_insert_publ(u32 type, u32 lower, u32 upper,
100 u32 scope, u32 node, u32 ref, u32 key);
101struct publication *nametbl_remove_publ(u32 type, u32 lower,
102 u32 node, u32 ref, u32 key);
103void nametbl_subscribe(struct subscription *s);
104void nametbl_unsubscribe(struct subscription *s);
105int nametbl_init(void);
106void nametbl_stop(void);
107
108#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644
index 000000000000..6826b493c1d6
--- /dev/null
+++ b/net/tipc/net.c
@@ -0,0 +1,311 @@
1/*
2 * net/tipc/net.c: TIPC network routing code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "bearer.h"
39#include "net.h"
40#include "zone.h"
41#include "addr.h"
42#include "name_table.h"
43#include "name_distr.h"
44#include "subscr.h"
45#include "link.h"
46#include "msg.h"
47#include "port.h"
48#include "bcast.h"
49#include "discover.h"
50#include "config.h"
51
52/*
53 * The TIPC locking policy is designed to ensure a very fine locking
54 * granularity, permitting complete parallel access to individual
55 * port and node/link instances. The code consists of three major
56 * locking domains, each protected with their own disjunct set of locks.
57 *
58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they
64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before
66 * it has released net_lock and other protective locks.
67 *
68 * Within the net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a
78 * net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs net_lock(write) to remove/add any bearers.
80 *
81 *
82 * 2: The transport level of the protocol.
83 * This consists of the structures port, (and its user level
84 * representations, such as user_port and tipc_sock), reference and
85 * tipc_user (port.c, reg.c, socket.c).
86 *
87 * This layer has four different locks:
88 * - The tipc_port spin_lock. This is protecting each port instance
89 * from parallel data access and removal. Since we can not place
90 * this lock in the port itself, it has been placed in the
91 * corresponding reference table entry, which has the same life
92 * cycle as the module. This entry is difficult to access from
93 * outside the TIPC core, however, so a pointer to the lock has
94 * been added in the port instance, -to be used for unlocking
95 * only.
96 * - A read/write lock to protect the reference table itself (teg.c).
97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion.
105 *
106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the nametbl_lock domain. It is used only
112 * for translation operations, and is needed because a translation
113 * steps the root of the 'publication' linked list between each lookup.
114 * This is always used within the scope of a nametbl_lock(read).
115 * - A local spin_lock protecting the queue of subscriber events.
116*/
117
118rwlock_t net_lock = RW_LOCK_UNLOCKED;
119struct network net = { 0 };
120
121struct node *net_select_remote_node(u32 addr, u32 ref)
122{
123 return zone_select_remote_node(net.zones[tipc_zone(addr)], addr, ref);
124}
125
126u32 net_select_router(u32 addr, u32 ref)
127{
128 return zone_select_router(net.zones[tipc_zone(addr)], addr, ref);
129}
130
131
132u32 net_next_node(u32 a)
133{
134 if (net.zones[tipc_zone(a)])
135 return zone_next_node(a);
136 return 0;
137}
138
139void net_remove_as_router(u32 router)
140{
141 u32 z_num;
142
143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
144 if (!net.zones[z_num])
145 continue;
146 zone_remove_as_router(net.zones[z_num], router);
147 }
148}
149
150void net_send_external_routes(u32 dest)
151{
152 u32 z_num;
153
154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
155 if (net.zones[z_num])
156 zone_send_external_routes(net.zones[z_num], dest);
157 }
158}
159
160int net_init(void)
161{
162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
163
164 memset(&net, 0, sizeof(net));
165 net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
166 if (!net.zones) {
167 return -ENOMEM;
168 }
169 memset(net.zones, 0, sz);
170 return TIPC_OK;
171}
172
173void net_stop(void)
174{
175 u32 z_num;
176
177 if (!net.zones)
178 return;
179
180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
181 zone_delete(net.zones[z_num]);
182 }
183 kfree(net.zones);
184 net.zones = 0;
185}
186
187static void net_route_named_msg(struct sk_buff *buf)
188{
189 struct tipc_msg *msg = buf_msg(buf);
190 u32 dnode;
191 u32 dport;
192
193 if (!msg_named(msg)) {
194 msg_dbg(msg, "net->drop_nam:");
195 buf_discard(buf);
196 return;
197 }
198
199 dnode = addr_domain(msg_lookup_scope(msg));
200 dport = nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
201 dbg("net->lookup<%u,%u>-><%u,%x>\n",
202 msg_nametype(msg), msg_nameinst(msg), dport, dnode);
203 if (dport) {
204 msg_set_destnode(msg, dnode);
205 msg_set_destport(msg, dport);
206 net_route_msg(buf);
207 return;
208 }
209 msg_dbg(msg, "net->rej:NO NAME: ");
210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
211}
212
213void net_route_msg(struct sk_buff *buf)
214{
215 struct tipc_msg *msg;
216 u32 dnode;
217
218 if (!buf)
219 return;
220 msg = buf_msg(buf);
221
222 msg_incr_reroute_cnt(msg);
223 if (msg_reroute_cnt(msg) > 6) {
224 if (msg_errcode(msg)) {
225 msg_dbg(msg, "NET>DISC>:");
226 buf_discard(buf);
227 } else {
228 msg_dbg(msg, "NET>REJ>:");
229 tipc_reject_msg(buf, msg_destport(msg) ?
230 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
231 }
232 return;
233 }
234
235 msg_dbg(msg, "net->rout: ");
236
237 /* Handle message for this node */
238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
239 if (in_scope(dnode, tipc_own_addr)) {
240 if (msg_isdata(msg)) {
241 if (msg_mcast(msg))
242 port_recv_mcast(buf, NULL);
243 else if (msg_destport(msg))
244 port_recv_msg(buf);
245 else
246 net_route_named_msg(buf);
247 return;
248 }
249 switch (msg_user(msg)) {
250 case ROUTE_DISTRIBUTOR:
251 cluster_recv_routing_table(buf);
252 break;
253 case NAME_DISTRIBUTOR:
254 named_recv(buf);
255 break;
256 case CONN_MANAGER:
257 port_recv_proto_msg(buf);
258 break;
259 default:
260 msg_dbg(msg,"DROP/NET/<REC<");
261 buf_discard(buf);
262 }
263 return;
264 }
265
266 /* Handle message for another node */
267 msg_dbg(msg, "NET>SEND>: ");
268 link_send(buf, dnode, msg_link_selector(msg));
269}
270
271int tipc_start_net(void)
272{
273 char addr_string[16];
274 int res;
275
276 if (tipc_mode != TIPC_NODE_MODE)
277 return -ENOPROTOOPT;
278
279 tipc_mode = TIPC_NET_MODE;
280 named_reinit();
281 port_reinit();
282
283 if ((res = bearer_init()) ||
284 (res = net_init()) ||
285 (res = cluster_init()) ||
286 (res = bclink_init())) {
287 return res;
288 }
289 subscr_stop();
290 cfg_stop();
291 k_signal((Handler)subscr_start, 0);
292 k_signal((Handler)cfg_init, 0);
293 info("Started in network mode\n");
294 info("Own node address %s, network identity %u\n",
295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
296 return TIPC_OK;
297}
298
299void tipc_stop_net(void)
300{
301 if (tipc_mode != TIPC_NET_MODE)
302 return;
303 write_lock_bh(&net_lock);
304 bearer_stop();
305 tipc_mode = TIPC_NODE_MODE;
306 bclink_stop();
307 net_stop();
308 write_unlock_bh(&net_lock);
309 info("Left network mode \n");
310}
311
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644
index 000000000000..948c6d42102c
--- /dev/null
+++ b/net/tipc/net.h
@@ -0,0 +1,66 @@
1/*
2 * net/tipc/net.h: Include file for TIPC network routing code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H
39
40struct _zone;
41
42/**
43 * struct network - TIPC network structure
44 * @zones: array of pointers to all zones within network
45 */
46
47struct network {
48 struct _zone **zones;
49};
50
51
52extern struct network net;
53extern rwlock_t net_lock;
54
55int net_init(void);
56void net_stop(void);
57void net_remove_as_router(u32 router);
58void net_send_external_routes(u32 dest);
59void net_route_msg(struct sk_buff *buf);
60struct node *net_select_remote_node(u32 addr, u32 ref);
61u32 net_select_router(u32 addr, u32 ref);
62
63int tipc_start_net(void);
64void tipc_stop_net(void);
65
66#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644
index 000000000000..19b3f4022532
--- /dev/null
+++ b/net/tipc/netlink.c
@@ -0,0 +1,112 @@
1/*
2 * net/tipc/netlink.c: TIPC configuration handling
3 *
4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include <net/genetlink.h>
40
41static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
42{
43 struct sk_buff *rep_buf;
44 struct nlmsghdr *rep_nlh;
45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
51 else
52 rep_buf = cfg_do_cmd(req_userhdr->dest,
53 req_userhdr->cmd,
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
56 hdr_space);
57
58 if (rep_buf) {
59 skb_push(rep_buf, hdr_space);
60 rep_nlh = (struct nlmsghdr *)rep_buf->data;
61 memcpy(rep_nlh, req_nlh, hdr_space);
62 rep_nlh->nlmsg_len = rep_buf->len;
63 genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
64 }
65
66 return 0;
67}
68
69static struct genl_family family = {
70 .id = GENL_ID_GENERATE,
71 .name = TIPC_GENL_NAME,
72 .version = TIPC_GENL_VERSION,
73 .hdrsize = TIPC_GENL_HDRLEN,
74 .maxattr = 0,
75};
76
77static struct genl_ops ops = {
78 .cmd = TIPC_GENL_CMD,
79 .doit = handle_cmd,
80};
81
82static int family_registered = 0;
83
84int netlink_start(void)
85{
86
87
88 if (genl_register_family(&family))
89 goto err;
90
91 family_registered = 1;
92
93 if (genl_register_ops(&family, &ops))
94 goto err_unregister;
95
96 return 0;
97
98 err_unregister:
99 genl_unregister_family(&family);
100 family_registered = 0;
101 err:
102 err("Failed to register netlink interface\n");
103 return -EFAULT;
104}
105
106void netlink_stop(void)
107{
108 if (family_registered) {
109 genl_unregister_family(&family);
110 family_registered = 0;
111 }
112}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644
index 000000000000..05688d01138b
--- /dev/null
+++ b/net/tipc/node.c
@@ -0,0 +1,679 @@
1/*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "node.h"
40#include "cluster.h"
41#include "net.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "link.h"
45#include "port.h"
46#include "bearer.h"
47#include "name_distr.h"
48#include "net.h"
49
50void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
51static void node_lost_contact(struct node *n_ptr);
52static void node_established_contact(struct node *n_ptr);
53
54struct node *nodes = NULL; /* sorted list of nodes within cluster */
55
56u32 tipc_own_tag = 0;
57
58struct node *node_create(u32 addr)
59{
60 struct cluster *c_ptr;
61 struct node *n_ptr;
62 struct node **curr_node;
63
64 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
65 if (n_ptr != NULL) {
66 memset(n_ptr, 0, sizeof(*n_ptr));
67 n_ptr->addr = addr;
68 n_ptr->lock = SPIN_LOCK_UNLOCKED;
69 INIT_LIST_HEAD(&n_ptr->nsub);
70
71 c_ptr = cluster_find(addr);
72 if (c_ptr == NULL)
73 c_ptr = cluster_create(addr);
74 if (c_ptr != NULL) {
75 n_ptr->owner = c_ptr;
76 cluster_attach_node(c_ptr, n_ptr);
77 n_ptr->last_router = -1;
78
79 /* Insert node into ordered list */
80 for (curr_node = &nodes; *curr_node;
81 curr_node = &(*curr_node)->next) {
82 if (addr < (*curr_node)->addr) {
83 n_ptr->next = *curr_node;
84 break;
85 }
86 }
87 (*curr_node) = n_ptr;
88 } else {
89 kfree(n_ptr);
90 n_ptr = NULL;
91 }
92 }
93 return n_ptr;
94}
95
96void node_delete(struct node *n_ptr)
97{
98 if (!n_ptr)
99 return;
100
101#if 0
102 /* Not needed because links are already deleted via bearer_stop() */
103
104 u32 l_num;
105
106 for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
107 link_delete(n_ptr->links[l_num]);
108 }
109#endif
110
111 dbg("node %x deleted\n", n_ptr->addr);
112 kfree(n_ptr);
113}
114
115
116/**
117 * node_link_up - handle addition of link
118 *
119 * Link becomes active (alone or shared) or standby, depending on its priority.
120 */
121
122void node_link_up(struct node *n_ptr, struct link *l_ptr)
123{
124 struct link **active = &n_ptr->active_links[0];
125
126 info("Established link <%s> on network plane %c\n",
127 l_ptr->name, l_ptr->b_ptr->net_plane);
128
129 if (!active[0]) {
130 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
131 active[0] = active[1] = l_ptr;
132 node_established_contact(n_ptr);
133 return;
134 }
135 if (l_ptr->priority < active[0]->priority) {
136 info("Link is standby\n");
137 return;
138 }
139 link_send_duplicate(active[0], l_ptr);
140 if (l_ptr->priority == active[0]->priority) {
141 active[0] = l_ptr;
142 return;
143 }
144 info("Link <%s> on network plane %c becomes standby\n",
145 active[0]->name, active[0]->b_ptr->net_plane);
146 active[0] = active[1] = l_ptr;
147}
148
149/**
150 * node_select_active_links - select active link
151 */
152
153static void node_select_active_links(struct node *n_ptr)
154{
155 struct link **active = &n_ptr->active_links[0];
156 u32 i;
157 u32 highest_prio = 0;
158
159 active[0] = active[1] = 0;
160
161 for (i = 0; i < MAX_BEARERS; i++) {
162 struct link *l_ptr = n_ptr->links[i];
163
164 if (!l_ptr || !link_is_up(l_ptr) ||
165 (l_ptr->priority < highest_prio))
166 continue;
167
168 if (l_ptr->priority > highest_prio) {
169 highest_prio = l_ptr->priority;
170 active[0] = active[1] = l_ptr;
171 } else {
172 active[1] = l_ptr;
173 }
174 }
175}
176
177/**
178 * node_link_down - handle loss of link
179 */
180
181void node_link_down(struct node *n_ptr, struct link *l_ptr)
182{
183 struct link **active;
184
185 if (!link_is_active(l_ptr)) {
186 info("Lost standby link <%s> on network plane %c\n",
187 l_ptr->name, l_ptr->b_ptr->net_plane);
188 return;
189 }
190 info("Lost link <%s> on network plane %c\n",
191 l_ptr->name, l_ptr->b_ptr->net_plane);
192
193 active = &n_ptr->active_links[0];
194 if (active[0] == l_ptr)
195 active[0] = active[1];
196 if (active[1] == l_ptr)
197 active[1] = active[0];
198 if (active[0] == l_ptr)
199 node_select_active_links(n_ptr);
200 if (node_is_up(n_ptr))
201 link_changeover(l_ptr);
202 else
203 node_lost_contact(n_ptr);
204}
205
206int node_has_active_links(struct node *n_ptr)
207{
208 return (n_ptr &&
209 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
210}
211
212int node_has_redundant_links(struct node *n_ptr)
213{
214 return (node_has_active_links(n_ptr) &&
215 (n_ptr->active_links[0] != n_ptr->active_links[1]));
216}
217
218int node_has_active_routes(struct node *n_ptr)
219{
220 return (n_ptr && (n_ptr->last_router >= 0));
221}
222
223int node_is_up(struct node *n_ptr)
224{
225 return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr));
226}
227
228struct node *node_attach_link(struct link *l_ptr)
229{
230 struct node *n_ptr = node_find(l_ptr->addr);
231
232 if (!n_ptr)
233 n_ptr = node_create(l_ptr->addr);
234 if (n_ptr) {
235 u32 bearer_id = l_ptr->b_ptr->identity;
236 char addr_string[16];
237
238 assert(bearer_id < MAX_BEARERS);
239 if (n_ptr->link_cnt >= 2) {
240 char addr_string[16];
241
242 err("Attempt to create third link to %s\n",
243 addr_string_fill(addr_string, n_ptr->addr));
244 return 0;
245 }
246
247 if (!n_ptr->links[bearer_id]) {
248 n_ptr->links[bearer_id] = l_ptr;
249 net.zones[tipc_zone(l_ptr->addr)]->links++;
250 n_ptr->link_cnt++;
251 return n_ptr;
252 }
253 err("Attempt to establish second link on <%s> to <%s> \n",
254 l_ptr->b_ptr->publ.name,
255 addr_string_fill(addr_string, l_ptr->addr));
256 }
257 return 0;
258}
259
260void node_detach_link(struct node *n_ptr, struct link *l_ptr)
261{
262 n_ptr->links[l_ptr->b_ptr->identity] = 0;
263 net.zones[tipc_zone(l_ptr->addr)]->links--;
264 n_ptr->link_cnt--;
265}
266
267/*
268 * Routing table management - five cases to handle:
269 *
270 * 1: A link towards a zone/cluster external node comes up.
271 * => Send a multicast message updating routing tables of all
272 * system nodes within own cluster that the new destination
273 * can be reached via this node.
274 * (node.establishedContact()=>cluster.multicastNewRoute())
275 *
276 * 2: A link towards a slave node comes up.
277 * => Send a multicast message updating routing tables of all
278 * system nodes within own cluster that the new destination
279 * can be reached via this node.
280 * (node.establishedContact()=>cluster.multicastNewRoute())
281 * => Send a message to the slave node about existence
282 * of all system nodes within cluster:
283 * (node.establishedContact()=>cluster.sendLocalRoutes())
284 *
285 * 3: A new cluster local system node becomes available.
286 * => Send message(s) to this particular node containing
287 * information about all cluster external and slave
288 * nodes which can be reached via this node.
289 * (node.establishedContact()==>network.sendExternalRoutes())
290 * (node.establishedContact()==>network.sendSlaveRoutes())
291 * => Send messages to all directly connected slave nodes
292 * containing information about the existence of the new node
293 * (node.establishedContact()=>cluster.multicastNewRoute())
294 *
295 * 4: The link towards a zone/cluster external node or slave
296 * node goes down.
297 * => Send a multcast message updating routing tables of all
298 * nodes within cluster that the new destination can not any
299 * longer be reached via this node.
300 * (node.lostAllLinks()=>cluster.bcastLostRoute())
301 *
302 * 5: A cluster local system node becomes unavailable.
303 * => Remove all references to this node from the local
304 * routing tables. Note: This is a completely node
305 * local operation.
306 * (node.lostAllLinks()=>network.removeAsRouter())
307 * => Send messages to all directly connected slave nodes
308 * containing information about loss of the node
309 * (node.establishedContact()=>cluster.multicastLostRoute())
310 *
311 */
312
313static void node_established_contact(struct node *n_ptr)
314{
315 struct cluster *c_ptr;
316
317 dbg("node_established_contact:-> %x\n", n_ptr->addr);
318 if (!node_has_active_routes(n_ptr)) {
319 k_signal((Handler)named_node_up, n_ptr->addr);
320 }
321
322 /* Syncronize broadcast acks */
323 n_ptr->bclink.acked = bclink_get_last_sent();
324
325 if (is_slave(tipc_own_addr))
326 return;
327 if (!in_own_cluster(n_ptr->addr)) {
328 /* Usage case 1 (see above) */
329 c_ptr = cluster_find(tipc_own_addr);
330 if (!c_ptr)
331 c_ptr = cluster_create(tipc_own_addr);
332 if (c_ptr)
333 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1,
334 tipc_max_nodes);
335 return;
336 }
337
338 c_ptr = n_ptr->owner;
339 if (is_slave(n_ptr->addr)) {
340 /* Usage case 2 (see above) */
341 cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
342 cluster_send_local_routes(c_ptr, n_ptr->addr);
343 return;
344 }
345
346 if (n_ptr->bclink.supported) {
347 nmap_add(&cluster_bcast_nodes, n_ptr->addr);
348 if (n_ptr->addr < tipc_own_addr)
349 tipc_own_tag++;
350 }
351
352 /* Case 3 (see above) */
353 net_send_external_routes(n_ptr->addr);
354 cluster_send_slave_routes(c_ptr, n_ptr->addr);
355 cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
356 highest_allowed_slave);
357}
358
359static void node_lost_contact(struct node *n_ptr)
360{
361 struct cluster *c_ptr;
362 struct node_subscr *ns, *tns;
363 char addr_string[16];
364 u32 i;
365
366 /* Clean up broadcast reception remains */
367 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
368 while (n_ptr->bclink.deferred_head) {
369 struct sk_buff* buf = n_ptr->bclink.deferred_head;
370 n_ptr->bclink.deferred_head = buf->next;
371 buf_discard(buf);
372 }
373 if (n_ptr->bclink.defragm) {
374 buf_discard(n_ptr->bclink.defragm);
375 n_ptr->bclink.defragm = NULL;
376 }
377 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
378 bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
379 }
380
381 /* Update routing tables */
382 if (is_slave(tipc_own_addr)) {
383 net_remove_as_router(n_ptr->addr);
384 } else {
385 if (!in_own_cluster(n_ptr->addr)) {
386 /* Case 4 (see above) */
387 c_ptr = cluster_find(tipc_own_addr);
388 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
389 tipc_max_nodes);
390 } else {
391 /* Case 5 (see above) */
392 c_ptr = cluster_find(n_ptr->addr);
393 if (is_slave(n_ptr->addr)) {
394 cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1,
395 tipc_max_nodes);
396 } else {
397 if (n_ptr->bclink.supported) {
398 nmap_remove(&cluster_bcast_nodes,
399 n_ptr->addr);
400 if (n_ptr->addr < tipc_own_addr)
401 tipc_own_tag--;
402 }
403 net_remove_as_router(n_ptr->addr);
404 cluster_bcast_lost_route(c_ptr, n_ptr->addr,
405 LOWEST_SLAVE,
406 highest_allowed_slave);
407 }
408 }
409 }
410 if (node_has_active_routes(n_ptr))
411 return;
412
413 info("Lost contact with %s\n",
414 addr_string_fill(addr_string, n_ptr->addr));
415
416 /* Abort link changeover */
417 for (i = 0; i < MAX_BEARERS; i++) {
418 struct link *l_ptr = n_ptr->links[i];
419 if (!l_ptr)
420 continue;
421 l_ptr->reset_checkpoint = l_ptr->next_in_no;
422 l_ptr->exp_msg_count = 0;
423 link_reset_fragments(l_ptr);
424 }
425
426 /* Notify subscribers */
427 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
428 ns->node = 0;
429 list_del_init(&ns->nodesub_list);
430 k_signal((Handler)ns->handle_node_down,
431 (unsigned long)ns->usr_handle);
432 }
433}
434
435/**
436 * node_select_next_hop - find the next-hop node for a message
437 *
438 * Called by when cluster local lookup has failed.
439 */
440
441struct node *node_select_next_hop(u32 addr, u32 selector)
442{
443 struct node *n_ptr;
444 u32 router_addr;
445
446 if (!addr_domain_valid(addr))
447 return 0;
448
449 /* Look for direct link to destination processsor */
450 n_ptr = node_find(addr);
451 if (n_ptr && node_has_active_links(n_ptr))
452 return n_ptr;
453
454 /* Cluster local system nodes *must* have direct links */
455 if (!is_slave(addr) && in_own_cluster(addr))
456 return 0;
457
458 /* Look for cluster local router with direct link to node */
459 router_addr = node_select_router(n_ptr, selector);
460 if (router_addr)
461 return node_select(router_addr, selector);
462
463 /* Slave nodes can only be accessed within own cluster via a
464 known router with direct link -- if no router was found,give up */
465 if (is_slave(addr))
466 return 0;
467
468 /* Inter zone/cluster -- find any direct link to remote cluster */
469 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
470 n_ptr = net_select_remote_node(addr, selector);
471 if (n_ptr && node_has_active_links(n_ptr))
472 return n_ptr;
473
474 /* Last resort -- look for any router to anywhere in remote zone */
475 router_addr = net_select_router(addr, selector);
476 if (router_addr)
477 return node_select(router_addr, selector);
478
479 return 0;
480}
481
482/**
483 * node_select_router - select router to reach specified node
484 *
485 * Uses a deterministic and fair algorithm for selecting router node.
486 */
487
488u32 node_select_router(struct node *n_ptr, u32 ref)
489{
490 u32 ulim;
491 u32 mask;
492 u32 start;
493 u32 r;
494
495 if (!n_ptr)
496 return 0;
497
498 if (n_ptr->last_router < 0)
499 return 0;
500 ulim = ((n_ptr->last_router + 1) * 32) - 1;
501
502 /* Start entry must be random */
503 mask = tipc_max_nodes;
504 while (mask > ulim)
505 mask >>= 1;
506 start = ref & mask;
507 r = start;
508
509 /* Lookup upwards with wrap-around */
510 do {
511 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
512 break;
513 } while (++r <= ulim);
514 if (r > ulim) {
515 r = 1;
516 do {
517 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
518 break;
519 } while (++r < start);
520 assert(r != start);
521 }
522 assert(r && (r <= ulim));
523 return tipc_addr(own_zone(), own_cluster(), r);
524}
525
526void node_add_router(struct node *n_ptr, u32 router)
527{
528 u32 r_num = tipc_node(router);
529
530 n_ptr->routers[r_num / 32] =
531 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
532 n_ptr->last_router = tipc_max_nodes / 32;
533 while ((--n_ptr->last_router >= 0) &&
534 !n_ptr->routers[n_ptr->last_router]);
535}
536
537void node_remove_router(struct node *n_ptr, u32 router)
538{
539 u32 r_num = tipc_node(router);
540
541 if (n_ptr->last_router < 0)
542 return; /* No routes */
543
544 n_ptr->routers[r_num / 32] =
545 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
546 n_ptr->last_router = tipc_max_nodes / 32;
547 while ((--n_ptr->last_router >= 0) &&
548 !n_ptr->routers[n_ptr->last_router]);
549
550 if (!node_is_up(n_ptr))
551 node_lost_contact(n_ptr);
552}
553
554#if 0
555void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
556{
557 u32 i;
558
559 tipc_printf(buf, "\n\n%s", str);
560 for (i = 0; i < MAX_BEARERS; i++) {
561 if (!n_ptr->links[i])
562 continue;
563 tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
564 }
565 tipc_printf(buf, "Active links: [%x,%x]\n",
566 n_ptr->active_links[0], n_ptr->active_links[1]);
567}
568#endif
569
570u32 tipc_available_nodes(const u32 domain)
571{
572 struct node *n_ptr;
573 u32 cnt = 0;
574
575 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
576 if (!in_scope(domain, n_ptr->addr))
577 continue;
578 if (node_is_up(n_ptr))
579 cnt++;
580 }
581 return cnt;
582}
583
584struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space)
585{
586 u32 domain;
587 struct sk_buff *buf;
588 struct node *n_ptr;
589 struct tipc_node_info node_info;
590
591 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
592 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
593
594 domain = *(u32 *)TLV_DATA(req_tlv_area);
595 domain = ntohl(domain);
596 if (!addr_domain_valid(domain))
597 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
598 " (network address)");
599
600 if (!nodes)
601 return cfg_reply_none();
602
603 /* For now, get space for all other nodes
604 (will need to modify this when slave nodes are supported */
605
606 buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
607 (tipc_max_nodes - 1));
608 if (!buf)
609 return NULL;
610
611 /* Add TLVs for all nodes in scope */
612
613 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
614 if (!in_scope(domain, n_ptr->addr))
615 continue;
616 node_info.addr = htonl(n_ptr->addr);
617 node_info.up = htonl(node_is_up(n_ptr));
618 cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
619 &node_info, sizeof(node_info));
620 }
621
622 return buf;
623}
624
625struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space)
626{
627 u32 domain;
628 struct sk_buff *buf;
629 struct node *n_ptr;
630 struct tipc_link_info link_info;
631
632 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
633 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
634
635 domain = *(u32 *)TLV_DATA(req_tlv_area);
636 domain = ntohl(domain);
637 if (!addr_domain_valid(domain))
638 return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
639 " (network address)");
640
641 if (!nodes)
642 return cfg_reply_none();
643
644 /* For now, get space for 2 links to all other nodes + bcast link
645 (will need to modify this when slave nodes are supported */
646
647 buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
648 (2 * (tipc_max_nodes - 1) + 1));
649 if (!buf)
650 return NULL;
651
652 /* Add TLV for broadcast link */
653
654 link_info.dest = tipc_own_addr & 0xfffff00;
655 link_info.dest = htonl(link_info.dest);
656 link_info.up = htonl(1);
657 sprintf(link_info.str, bc_link_name);
658 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
659
660 /* Add TLVs for any other links in scope */
661
662 for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) {
663 u32 i;
664
665 if (!in_scope(domain, n_ptr->addr))
666 continue;
667 for (i = 0; i < MAX_BEARERS; i++) {
668 if (!n_ptr->links[i])
669 continue;
670 link_info.dest = htonl(n_ptr->addr);
671 link_info.up = htonl(link_is_up(n_ptr->links[i]));
672 strcpy(link_info.str, n_ptr->links[i]->name);
673 cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
674 &link_info, sizeof(link_info));
675 }
676 }
677
678 return buf;
679}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644
index 000000000000..b39442badccf
--- /dev/null
+++ b/net/tipc/node.h
@@ -0,0 +1,144 @@
1/*
2 * net/tipc/node.h: Include file for TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NODE_H
38#define _TIPC_NODE_H
39
40#include "node_subscr.h"
41#include "addr.h"
42#include "cluster.h"
43#include "bearer.h"
44
45/**
46 * struct node - TIPC node structure
47 * @addr: network address of node
48 * @lock: spinlock governing access to structure
49 * @owner: pointer to cluster that node belongs to
50 * @next: pointer to next node in sorted list of cluster's nodes
51 * @nsub: list of "node down" subscriptions monitoring node
52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node
54 * @link_cnt: number of links to node
55 * @permit_changeover: non-zero if node has redundant links to this system
56 * @routers: bitmap (used for multicluster communication)
57 * @last_router: (used for multicluster communication)
58 * @bclink: broadcast-related info
59 * @supported: non-zero if node supports TIPC b'cast capability
60 * @acked: sequence # of last outbound b'cast message acknowledged by node
61 * @last_in: sequence # of last in-sequence b'cast message received from node
62 * @gap_after: sequence # of last message not requiring a NAK request
63 * @gap_to: sequence # of last message requiring a NAK request
64 * @nack_sync: counter that determines when NAK requests should be sent
65 * @deferred_head: oldest OOS b'cast message received from node
66 * @deferred_tail: newest OOS b'cast message received from node
67 * @defragm: list of partially reassembled b'cast message fragments from node
68 */
69
70struct node {
71 u32 addr;
72 spinlock_t lock;
73 struct cluster *owner;
74 struct node *next;
75 struct list_head nsub;
76 struct link *active_links[2];
77 struct link *links[MAX_BEARERS];
78 int link_cnt;
79 int permit_changeover;
80 u32 routers[512/32];
81 int last_router;
82 struct {
83 int supported;
84 u32 acked;
85 u32 last_in;
86 u32 gap_after;
87 u32 gap_to;
88 u32 nack_sync;
89 struct sk_buff *deferred_head;
90 struct sk_buff *deferred_tail;
91 struct sk_buff *defragm;
92 } bclink;
93};
94
95extern struct node *nodes;
96extern u32 tipc_own_tag;
97
98struct node *node_create(u32 addr);
99void node_delete(struct node *n_ptr);
100struct node *node_attach_link(struct link *l_ptr);
101void node_detach_link(struct node *n_ptr, struct link *l_ptr);
102void node_link_down(struct node *n_ptr, struct link *l_ptr);
103void node_link_up(struct node *n_ptr, struct link *l_ptr);
104int node_has_active_links(struct node *n_ptr);
105int node_has_redundant_links(struct node *n_ptr);
106u32 node_select_router(struct node *n_ptr, u32 ref);
107struct node *node_select_next_hop(u32 addr, u32 selector);
108int node_is_up(struct node *n_ptr);
109void node_add_router(struct node *n_ptr, u32 router);
110void node_remove_router(struct node *n_ptr, u32 router);
111struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space);
112struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space);
113
114static inline struct node *node_find(u32 addr)
115{
116 if (likely(in_own_cluster(addr)))
117 return local_nodes[tipc_node(addr)];
118 else if (addr_domain_valid(addr)) {
119 struct cluster *c_ptr = cluster_find(addr);
120
121 if (c_ptr)
122 return c_ptr->nodes[tipc_node(addr)];
123 }
124 return 0;
125}
126
127static inline struct node *node_select(u32 addr, u32 selector)
128{
129 if (likely(in_own_cluster(addr)))
130 return local_nodes[tipc_node(addr)];
131 return node_select_next_hop(addr, selector);
132}
133
134static inline void node_lock(struct node *n_ptr)
135{
136 spin_lock_bh(&n_ptr->lock);
137}
138
139static inline void node_unlock(struct node *n_ptr)
140{
141 spin_unlock_bh(&n_ptr->lock);
142}
143
144#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
new file mode 100644
index 000000000000..79375927916f
--- /dev/null
+++ b/net/tipc/node_subscr.c
@@ -0,0 +1,79 @@
1/*
2 * net/tipc/node_subscr.c: TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "node_subscr.h"
40#include "node.h"
41#include "addr.h"
42
43/**
44 * nodesub_subscribe - create "node down" subscription for specified node
45 */
46
47void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down)
49{
50 node_sub->node = 0;
51 if (addr == tipc_own_addr)
52 return;
53 if (!addr_node_valid(addr)) {
54 warn("node_subscr with illegal %x\n", addr);
55 return;
56 }
57
58 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle;
60 node_sub->node = node_find(addr);
61 assert(node_sub->node);
62 node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 node_unlock(node_sub->node);
65}
66
67/**
68 * nodesub_unsubscribe - cancel "node down" subscription (if any)
69 */
70
71void nodesub_unsubscribe(struct node_subscr *node_sub)
72{
73 if (!node_sub->node)
74 return;
75
76 node_lock(node_sub->node);
77 list_del_init(&node_sub->nodesub_list);
78 node_unlock(node_sub->node);
79}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
new file mode 100644
index 000000000000..a3b87ac4859b
--- /dev/null
+++ b/net/tipc/node_subscr.h
@@ -0,0 +1,63 @@
1/*
2 * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NODE_SUBSCR_H
38#define _TIPC_NODE_SUBSCR_H
39
40#include "addr.h"
41
42typedef void (*net_ev_handler) (void *usr_handle);
43
44/**
45 * struct node_subscr - "node down" subscription entry
46 * @node: ptr to node structure of interest (or NULL, if none)
47 * @handle_node_down: routine to invoke when node fails
48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */
51
52struct node_subscr {
53 struct node *node;
54 net_ev_handler handle_node_down;
55 void *usr_handle;
56 struct list_head nodesub_list;
57};
58
59void nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
60 void *usr_handle, net_ev_handler handle_down);
61void nodesub_unsubscribe(struct node_subscr *node_sub);
62
63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
new file mode 100644
index 000000000000..66caca7abe92
--- /dev/null
+++ b/net/tipc/port.c
@@ -0,0 +1,1708 @@
1/*
2 * net/tipc/port.c: TIPC port code
3 *
4 * Copyright (c) 1992-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "port.h"
41#include "addr.h"
42#include "link.h"
43#include "node.h"
44#include "port.h"
45#include "name_table.h"
46#include "user_reg.h"
47#include "msg.h"
48#include "bcast.h"
49
50/* Connection management: */
51#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
52#define CONFIRMED 0
53#define PROBING 1
54
55#define MAX_REJECT_SIZE 1024
56
57static struct sk_buff *msg_queue_head = 0;
58static struct sk_buff *msg_queue_tail = 0;
59
60spinlock_t port_list_lock = SPIN_LOCK_UNLOCKED;
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
62
63LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref);
65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
67static void port_timeout(unsigned long ref);
68
69
70static inline u32 port_peernode(struct port *p_ptr)
71{
72 return msg_destnode(&p_ptr->publ.phdr);
73}
74
75static inline u32 port_peerport(struct port *p_ptr)
76{
77 return msg_destport(&p_ptr->publ.phdr);
78}
79
80static inline u32 port_out_seqno(struct port *p_ptr)
81{
82 return msg_transp_seqno(&p_ptr->publ.phdr);
83}
84
85static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno)
86{
87 msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
88}
89
90static inline void port_incr_out_seqno(struct port *p_ptr)
91{
92 struct tipc_msg *m = &p_ptr->publ.phdr;
93
94 if (likely(!msg_routed(m)))
95 return;
96 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
97}
98
99/**
100 * tipc_multicast - send a multicast message to local and remote destinations
101 */
102
103int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
104 u32 num_sect, struct iovec const *msg_sect)
105{
106 struct tipc_msg *hdr;
107 struct sk_buff *buf;
108 struct sk_buff *ibuf = NULL;
109 struct port_list dports = {0, NULL, };
110 struct port *oport = port_deref(ref);
111 int ext_targets;
112 int res;
113
114 if (unlikely(!oport))
115 return -EINVAL;
116
117 /* Create multicast message */
118
119 hdr = &oport->publ.phdr;
120 msg_set_type(hdr, TIPC_MCAST_MSG);
121 msg_set_nametype(hdr, seq->type);
122 msg_set_namelower(hdr, seq->lower);
123 msg_set_nameupper(hdr, seq->upper);
124 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
125 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
126 !oport->user_port, &buf);
127 if (unlikely(!buf))
128 return res;
129
130 /* Figure out where to send multicast message */
131
132 ext_targets = nametbl_mc_translate(seq->type, seq->lower, seq->upper,
133 TIPC_NODE_SCOPE, &dports);
134
135 /* Send message to destinations (duplicate it only if necessary) */
136
137 if (ext_targets) {
138 if (dports.count != 0) {
139 ibuf = skb_copy(buf, GFP_ATOMIC);
140 if (ibuf == NULL) {
141 port_list_free(&dports);
142 buf_discard(buf);
143 return -ENOMEM;
144 }
145 }
146 res = bclink_send_msg(buf);
147 if ((res < 0) && (dports.count != 0)) {
148 buf_discard(ibuf);
149 }
150 } else {
151 ibuf = buf;
152 }
153
154 if (res >= 0) {
155 if (ibuf)
156 port_recv_mcast(ibuf, &dports);
157 } else {
158 port_list_free(&dports);
159 }
160 return res;
161}
162
163/**
164 * port_recv_mcast - deliver multicast message to all destination ports
165 *
166 * If there is no port list, perform a lookup to create one
167 */
168
169void port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
170{
171 struct tipc_msg* msg;
172 struct port_list dports = {0, NULL, };
173 struct port_list *item = dp;
174 int cnt = 0;
175
176 assert(buf);
177 msg = buf_msg(buf);
178
179 /* Create destination port list, if one wasn't supplied */
180
181 if (dp == NULL) {
182 nametbl_mc_translate(msg_nametype(msg),
183 msg_namelower(msg),
184 msg_nameupper(msg),
185 TIPC_CLUSTER_SCOPE,
186 &dports);
187 item = dp = &dports;
188 }
189
190 /* Deliver a copy of message to each destination port */
191
192 if (dp->count != 0) {
193 if (dp->count == 1) {
194 msg_set_destport(msg, dp->ports[0]);
195 port_recv_msg(buf);
196 port_list_free(dp);
197 return;
198 }
199 for (; cnt < dp->count; cnt++) {
200 int index = cnt % PLSIZE;
201 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
202
203 if (b == NULL) {
204 warn("Buffer allocation failure\n");
205 msg_dbg(msg, "LOST:");
206 goto exit;
207 }
208 if ((index == 0) && (cnt != 0)) {
209 item = item->next;
210 }
211 msg_set_destport(buf_msg(b),item->ports[index]);
212 port_recv_msg(b);
213 }
214 }
215exit:
216 buf_discard(buf);
217 port_list_free(dp);
218}
219
220/**
221 * tipc_createport_raw - create a native TIPC port
222 *
223 * Returns local port reference
224 */
225
226u32 tipc_createport_raw(void *usr_handle,
227 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
228 void (*wakeup)(struct tipc_port *),
229 const u32 importance)
230{
231 struct port *p_ptr;
232 struct tipc_msg *msg;
233 u32 ref;
234
235 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
236 if (p_ptr == NULL) {
237 warn("Memory squeeze; failed to create port\n");
238 return 0;
239 }
240 memset(p_ptr, 0, sizeof(*p_ptr));
241 ref = ref_acquire(p_ptr, &p_ptr->publ.lock);
242 if (!ref) {
243 warn("Reference Table Exhausted\n");
244 kfree(p_ptr);
245 return 0;
246 }
247
248 port_lock(ref);
249 p_ptr->publ.ref = ref;
250 msg = &p_ptr->publ.phdr;
251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
252 msg_set_orignode(msg, tipc_own_addr);
253 msg_set_prevnode(msg, tipc_own_addr);
254 msg_set_origport(msg, ref);
255 msg_set_importance(msg,importance);
256 p_ptr->last_in_seqno = 41;
257 p_ptr->sent = 1;
258 p_ptr->publ.usr_handle = usr_handle;
259 INIT_LIST_HEAD(&p_ptr->wait_list);
260 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
261 p_ptr->congested_link = 0;
262 p_ptr->max_pkt = MAX_PKT_DEFAULT;
263 p_ptr->dispatcher = dispatcher;
264 p_ptr->wakeup = wakeup;
265 p_ptr->user_port = 0;
266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
267 spin_lock_bh(&port_list_lock);
268 INIT_LIST_HEAD(&p_ptr->publications);
269 INIT_LIST_HEAD(&p_ptr->port_list);
270 list_add_tail(&p_ptr->port_list, &ports);
271 spin_unlock_bh(&port_list_lock);
272 port_unlock(p_ptr);
273 return ref;
274}
275
276int tipc_deleteport(u32 ref)
277{
278 struct port *p_ptr;
279 struct sk_buff *buf = 0;
280
281 tipc_withdraw(ref, 0, 0);
282 p_ptr = port_lock(ref);
283 if (!p_ptr)
284 return -EINVAL;
285
286 ref_discard(ref);
287 port_unlock(p_ptr);
288
289 k_cancel_timer(&p_ptr->timer);
290 if (p_ptr->publ.connected) {
291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
292 nodesub_unsubscribe(&p_ptr->subscription);
293 }
294 if (p_ptr->user_port) {
295 reg_remove_port(p_ptr->user_port);
296 kfree(p_ptr->user_port);
297 }
298
299 spin_lock_bh(&port_list_lock);
300 list_del(&p_ptr->port_list);
301 list_del(&p_ptr->wait_list);
302 spin_unlock_bh(&port_list_lock);
303 k_term_timer(&p_ptr->timer);
304 kfree(p_ptr);
305 dbg("Deleted port %u\n", ref);
306 net_route_msg(buf);
307 return TIPC_OK;
308}
309
310/**
311 * tipc_get_port() - return port associated with 'ref'
312 *
313 * Note: Port is not locked.
314 */
315
316struct tipc_port *tipc_get_port(const u32 ref)
317{
318 return (struct tipc_port *)ref_deref(ref);
319}
320
321/**
322 * tipc_get_handle - return user handle associated to port 'ref'
323 */
324
325void *tipc_get_handle(const u32 ref)
326{
327 struct port *p_ptr;
328 void * handle;
329
330 p_ptr = port_lock(ref);
331 if (!p_ptr)
332 return 0;
333 handle = p_ptr->publ.usr_handle;
334 port_unlock(p_ptr);
335 return handle;
336}
337
338static inline int port_unreliable(struct port *p_ptr)
339{
340 return msg_src_droppable(&p_ptr->publ.phdr);
341}
342
343int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
344{
345 struct port *p_ptr;
346
347 p_ptr = port_lock(ref);
348 if (!p_ptr)
349 return -EINVAL;
350 *isunreliable = port_unreliable(p_ptr);
351 spin_unlock_bh(p_ptr->publ.lock);
352 return TIPC_OK;
353}
354
355int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
356{
357 struct port *p_ptr;
358
359 p_ptr = port_lock(ref);
360 if (!p_ptr)
361 return -EINVAL;
362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
363 port_unlock(p_ptr);
364 return TIPC_OK;
365}
366
367static inline int port_unreturnable(struct port *p_ptr)
368{
369 return msg_dest_droppable(&p_ptr->publ.phdr);
370}
371
372int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
373{
374 struct port *p_ptr;
375
376 p_ptr = port_lock(ref);
377 if (!p_ptr)
378 return -EINVAL;
379 *isunrejectable = port_unreturnable(p_ptr);
380 spin_unlock_bh(p_ptr->publ.lock);
381 return TIPC_OK;
382}
383
384int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
385{
386 struct port *p_ptr;
387
388 p_ptr = port_lock(ref);
389 if (!p_ptr)
390 return -EINVAL;
391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
392 port_unlock(p_ptr);
393 return TIPC_OK;
394}
395
396/*
397 * port_build_proto_msg(): build a port level protocol
398 * or a connection abortion message. Called with
399 * tipc_port lock on.
400 */
401static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
402 u32 origport, u32 orignode,
403 u32 usr, u32 type, u32 err,
404 u32 seqno, u32 ack)
405{
406 struct sk_buff *buf;
407 struct tipc_msg *msg;
408
409 buf = buf_acquire(LONG_H_SIZE);
410 if (buf) {
411 msg = buf_msg(buf);
412 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
413 msg_set_destport(msg, destport);
414 msg_set_origport(msg, origport);
415 msg_set_destnode(msg, destnode);
416 msg_set_orignode(msg, orignode);
417 msg_set_transp_seqno(msg, seqno);
418 msg_set_msgcnt(msg, ack);
419 msg_dbg(msg, "PORT>SEND>:");
420 }
421 return buf;
422}
423
424int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
425{
426 msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
427 msg_set_options(&tp_ptr->phdr, opt, sz);
428 return TIPC_OK;
429}
430
431int tipc_reject_msg(struct sk_buff *buf, u32 err)
432{
433 struct tipc_msg *msg = buf_msg(buf);
434 struct sk_buff *rbuf;
435 struct tipc_msg *rmsg;
436 int hdr_sz;
437 u32 imp = msg_importance(msg);
438 u32 data_sz = msg_data_sz(msg);
439
440 if (data_sz > MAX_REJECT_SIZE)
441 data_sz = MAX_REJECT_SIZE;
442 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
443 imp++;
444 msg_dbg(msg, "port->rej: ");
445
446 /* discard rejected message if it shouldn't be returned to sender */
447 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
448 buf_discard(buf);
449 return data_sz;
450 }
451
452 /* construct rejected message */
453 if (msg_mcast(msg))
454 hdr_sz = MCAST_H_SIZE;
455 else
456 hdr_sz = LONG_H_SIZE;
457 rbuf = buf_acquire(data_sz + hdr_sz);
458 if (rbuf == NULL) {
459 buf_discard(buf);
460 return data_sz;
461 }
462 rmsg = buf_msg(rbuf);
463 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
464 msg_set_destport(rmsg, msg_origport(msg));
465 msg_set_prevnode(rmsg, tipc_own_addr);
466 msg_set_origport(rmsg, msg_destport(msg));
467 if (msg_short(msg))
468 msg_set_orignode(rmsg, tipc_own_addr);
469 else
470 msg_set_orignode(rmsg, msg_destnode(msg));
471 msg_set_size(rmsg, data_sz + hdr_sz);
472 msg_set_nametype(rmsg, msg_nametype(msg));
473 msg_set_nameinst(rmsg, msg_nameinst(msg));
474 memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
475
476 /* send self-abort message when rejecting on a connected port */
477 if (msg_connected(msg)) {
478 struct sk_buff *abuf = 0;
479 struct port *p_ptr = port_lock(msg_destport(msg));
480
481 if (p_ptr) {
482 if (p_ptr->publ.connected)
483 abuf = port_build_self_abort_msg(p_ptr, err);
484 port_unlock(p_ptr);
485 }
486 net_route_msg(abuf);
487 }
488
489 /* send rejected message */
490 buf_discard(buf);
491 net_route_msg(rbuf);
492 return data_sz;
493}
494
495int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
496 struct iovec const *msg_sect, u32 num_sect,
497 int err)
498{
499 struct sk_buff *buf;
500 int res;
501
502 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
503 !p_ptr->user_port, &buf);
504 if (!buf)
505 return res;
506
507 return tipc_reject_msg(buf, err);
508}
509
510static void port_timeout(unsigned long ref)
511{
512 struct port *p_ptr = port_lock(ref);
513 struct sk_buff *buf = 0;
514
515 if (!p_ptr || !p_ptr->publ.connected)
516 return;
517
518 /* Last probe answered ? */
519 if (p_ptr->probing_state == PROBING) {
520 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
521 } else {
522 buf = port_build_proto_msg(port_peerport(p_ptr),
523 port_peernode(p_ptr),
524 p_ptr->publ.ref,
525 tipc_own_addr,
526 CONN_MANAGER,
527 CONN_PROBE,
528 TIPC_OK,
529 port_out_seqno(p_ptr),
530 0);
531 port_incr_out_seqno(p_ptr);
532 p_ptr->probing_state = PROBING;
533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
534 }
535 port_unlock(p_ptr);
536 net_route_msg(buf);
537}
538
539
540static void port_handle_node_down(unsigned long ref)
541{
542 struct port *p_ptr = port_lock(ref);
543 struct sk_buff* buf = 0;
544
545 if (!p_ptr)
546 return;
547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
548 port_unlock(p_ptr);
549 net_route_msg(buf);
550}
551
552
553static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
554{
555 u32 imp = msg_importance(&p_ptr->publ.phdr);
556
557 if (!p_ptr->publ.connected)
558 return 0;
559 if (imp < TIPC_CRITICAL_IMPORTANCE)
560 imp++;
561 return port_build_proto_msg(p_ptr->publ.ref,
562 tipc_own_addr,
563 port_peerport(p_ptr),
564 port_peernode(p_ptr),
565 imp,
566 TIPC_CONN_MSG,
567 err,
568 p_ptr->last_in_seqno + 1,
569 0);
570}
571
572
573static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
574{
575 u32 imp = msg_importance(&p_ptr->publ.phdr);
576
577 if (!p_ptr->publ.connected)
578 return 0;
579 if (imp < TIPC_CRITICAL_IMPORTANCE)
580 imp++;
581 return port_build_proto_msg(port_peerport(p_ptr),
582 port_peernode(p_ptr),
583 p_ptr->publ.ref,
584 tipc_own_addr,
585 imp,
586 TIPC_CONN_MSG,
587 err,
588 port_out_seqno(p_ptr),
589 0);
590}
591
592void port_recv_proto_msg(struct sk_buff *buf)
593{
594 struct tipc_msg *msg = buf_msg(buf);
595 struct port *p_ptr = port_lock(msg_destport(msg));
596 u32 err = TIPC_OK;
597 struct sk_buff *r_buf = 0;
598 struct sk_buff *abort_buf = 0;
599
600 msg_dbg(msg, "PORT<RECV<:");
601
602 if (!p_ptr) {
603 err = TIPC_ERR_NO_PORT;
604 } else if (p_ptr->publ.connected) {
605 if (port_peernode(p_ptr) != msg_orignode(msg))
606 err = TIPC_ERR_NO_PORT;
607 if (port_peerport(p_ptr) != msg_origport(msg))
608 err = TIPC_ERR_NO_PORT;
609 if (!err && msg_routed(msg)) {
610 u32 seqno = msg_transp_seqno(msg);
611 u32 myno = ++p_ptr->last_in_seqno;
612 if (seqno != myno) {
613 err = TIPC_ERR_NO_PORT;
614 abort_buf = port_build_self_abort_msg(p_ptr, err);
615 }
616 }
617 if (msg_type(msg) == CONN_ACK) {
618 int wakeup = port_congested(p_ptr) &&
619 p_ptr->publ.congested &&
620 p_ptr->wakeup;
621 p_ptr->acked += msg_msgcnt(msg);
622 if (port_congested(p_ptr))
623 goto exit;
624 p_ptr->publ.congested = 0;
625 if (!wakeup)
626 goto exit;
627 p_ptr->wakeup(&p_ptr->publ);
628 goto exit;
629 }
630 } else if (p_ptr->publ.published) {
631 err = TIPC_ERR_NO_PORT;
632 }
633 if (err) {
634 r_buf = port_build_proto_msg(msg_origport(msg),
635 msg_orignode(msg),
636 msg_destport(msg),
637 tipc_own_addr,
638 DATA_HIGH,
639 TIPC_CONN_MSG,
640 err,
641 0,
642 0);
643 goto exit;
644 }
645
646 /* All is fine */
647 if (msg_type(msg) == CONN_PROBE) {
648 r_buf = port_build_proto_msg(msg_origport(msg),
649 msg_orignode(msg),
650 msg_destport(msg),
651 tipc_own_addr,
652 CONN_MANAGER,
653 CONN_PROBE_REPLY,
654 TIPC_OK,
655 port_out_seqno(p_ptr),
656 0);
657 }
658 p_ptr->probing_state = CONFIRMED;
659 port_incr_out_seqno(p_ptr);
660exit:
661 if (p_ptr)
662 port_unlock(p_ptr);
663 net_route_msg(r_buf);
664 net_route_msg(abort_buf);
665 buf_discard(buf);
666}
667
668static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
669{
670 struct publication *publ;
671
672 if (full_id)
673 tipc_printf(buf, "<%u.%u.%u:%u>:",
674 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
675 tipc_node(tipc_own_addr), p_ptr->publ.ref);
676 else
677 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
678
679 if (p_ptr->publ.connected) {
680 u32 dport = port_peerport(p_ptr);
681 u32 destnode = port_peernode(p_ptr);
682
683 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
684 tipc_zone(destnode), tipc_cluster(destnode),
685 tipc_node(destnode), dport);
686 if (p_ptr->publ.conn_type != 0)
687 tipc_printf(buf, " via {%u,%u}",
688 p_ptr->publ.conn_type,
689 p_ptr->publ.conn_instance);
690 }
691 else if (p_ptr->publ.published) {
692 tipc_printf(buf, " bound to");
693 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
694 if (publ->lower == publ->upper)
695 tipc_printf(buf, " {%u,%u}", publ->type,
696 publ->lower);
697 else
698 tipc_printf(buf, " {%u,%u,%u}", publ->type,
699 publ->lower, publ->upper);
700 }
701 }
702 tipc_printf(buf, "\n");
703}
704
705#define MAX_PORT_QUERY 32768
706
707struct sk_buff *port_get_ports(void)
708{
709 struct sk_buff *buf;
710 struct tlv_desc *rep_tlv;
711 struct print_buf pb;
712 struct port *p_ptr;
713 int str_len;
714
715 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
716 if (!buf)
717 return NULL;
718 rep_tlv = (struct tlv_desc *)buf->data;
719
720 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
721 spin_lock_bh(&port_list_lock);
722 list_for_each_entry(p_ptr, &ports, port_list) {
723 spin_lock_bh(p_ptr->publ.lock);
724 port_print(p_ptr, &pb, 0);
725 spin_unlock_bh(p_ptr->publ.lock);
726 }
727 spin_unlock_bh(&port_list_lock);
728 str_len = printbuf_validate(&pb);
729
730 skb_put(buf, TLV_SPACE(str_len));
731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
732
733 return buf;
734}
735
736#if 0
737
738#define MAX_PORT_STATS 2000
739
740struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
741{
742 u32 ref;
743 struct port *p_ptr;
744 struct sk_buff *buf;
745 struct tlv_desc *rep_tlv;
746 struct print_buf pb;
747 int str_len;
748
749 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
750 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
751
752 ref = *(u32 *)TLV_DATA(req_tlv_area);
753 ref = ntohl(ref);
754
755 p_ptr = port_lock(ref);
756 if (!p_ptr)
757 return cfg_reply_error_string("port not found");
758
759 buf = cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
760 if (!buf) {
761 port_unlock(p_ptr);
762 return NULL;
763 }
764 rep_tlv = (struct tlv_desc *)buf->data;
765
766 printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
767 port_print(p_ptr, &pb, 1);
768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
769 port_unlock(p_ptr);
770 str_len = printbuf_validate(&pb);
771
772 skb_put(buf, TLV_SPACE(str_len));
773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
774
775 return buf;
776}
777
778#endif
779
780void port_reinit(void)
781{
782 struct port *p_ptr;
783 struct tipc_msg *msg;
784
785 spin_lock_bh(&port_list_lock);
786 list_for_each_entry(p_ptr, &ports, port_list) {
787 msg = &p_ptr->publ.phdr;
788 if (msg_orignode(msg) == tipc_own_addr)
789 break;
790 msg_set_orignode(msg, tipc_own_addr);
791 }
792 spin_unlock_bh(&port_list_lock);
793}
794
795
796/*
797 * port_dispatcher_sigh(): Signal handler for messages destinated
798 * to the tipc_port interface.
799 */
800
801static void port_dispatcher_sigh(void *dummy)
802{
803 struct sk_buff *buf;
804
805 spin_lock_bh(&queue_lock);
806 buf = msg_queue_head;
807 msg_queue_head = 0;
808 spin_unlock_bh(&queue_lock);
809
810 while (buf) {
811 struct port *p_ptr;
812 struct user_port *up_ptr;
813 struct tipc_portid orig;
814 struct tipc_name_seq dseq;
815 void *usr_handle;
816 int connected;
817 int published;
818
819 struct sk_buff *next = buf->next;
820 struct tipc_msg *msg = buf_msg(buf);
821 u32 dref = msg_destport(msg);
822
823 p_ptr = port_lock(dref);
824 if (!p_ptr) {
825 /* Port deleted while msg in queue */
826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
827 buf = next;
828 continue;
829 }
830 orig.ref = msg_origport(msg);
831 orig.node = msg_orignode(msg);
832 up_ptr = p_ptr->user_port;
833 usr_handle = up_ptr->usr_handle;
834 connected = p_ptr->publ.connected;
835 published = p_ptr->publ.published;
836
837 if (unlikely(msg_errcode(msg)))
838 goto err;
839
840 switch (msg_type(msg)) {
841
842 case TIPC_CONN_MSG:{
843 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
844 u32 peer_port = port_peerport(p_ptr);
845 u32 peer_node = port_peernode(p_ptr);
846
847 spin_unlock_bh(p_ptr->publ.lock);
848 if (unlikely(!connected)) {
849 if (unlikely(published))
850 goto reject;
851 tipc_connect2port(dref,&orig);
852 }
853 if (unlikely(msg_origport(msg) != peer_port))
854 goto reject;
855 if (unlikely(msg_orignode(msg) != peer_node))
856 goto reject;
857 if (unlikely(!cb))
858 goto reject;
859 if (unlikely(++p_ptr->publ.conn_unacked >=
860 TIPC_FLOW_CONTROL_WIN))
861 tipc_acknowledge(dref,
862 p_ptr->publ.conn_unacked);
863 skb_pull(buf, msg_hdr_sz(msg));
864 cb(usr_handle, dref, &buf, msg_data(msg),
865 msg_data_sz(msg));
866 break;
867 }
868 case TIPC_DIRECT_MSG:{
869 tipc_msg_event cb = up_ptr->msg_cb;
870
871 spin_unlock_bh(p_ptr->publ.lock);
872 if (unlikely(connected))
873 goto reject;
874 if (unlikely(!cb))
875 goto reject;
876 skb_pull(buf, msg_hdr_sz(msg));
877 cb(usr_handle, dref, &buf, msg_data(msg),
878 msg_data_sz(msg), msg_importance(msg),
879 &orig);
880 break;
881 }
882 case TIPC_NAMED_MSG:{
883 tipc_named_msg_event cb = up_ptr->named_msg_cb;
884
885 spin_unlock_bh(p_ptr->publ.lock);
886 if (unlikely(connected))
887 goto reject;
888 if (unlikely(!cb))
889 goto reject;
890 if (unlikely(!published))
891 goto reject;
892 dseq.type = msg_nametype(msg);
893 dseq.lower = msg_nameinst(msg);
894 dseq.upper = dseq.lower;
895 skb_pull(buf, msg_hdr_sz(msg));
896 cb(usr_handle, dref, &buf, msg_data(msg),
897 msg_data_sz(msg), msg_importance(msg),
898 &orig, &dseq);
899 break;
900 }
901 }
902 if (buf)
903 buf_discard(buf);
904 buf = next;
905 continue;
906err:
907 switch (msg_type(msg)) {
908
909 case TIPC_CONN_MSG:{
910 tipc_conn_shutdown_event cb =
911 up_ptr->conn_err_cb;
912 u32 peer_port = port_peerport(p_ptr);
913 u32 peer_node = port_peernode(p_ptr);
914
915 spin_unlock_bh(p_ptr->publ.lock);
916 if (!connected || !cb)
917 break;
918 if (msg_origport(msg) != peer_port)
919 break;
920 if (msg_orignode(msg) != peer_node)
921 break;
922 tipc_disconnect(dref);
923 skb_pull(buf, msg_hdr_sz(msg));
924 cb(usr_handle, dref, &buf, msg_data(msg),
925 msg_data_sz(msg), msg_errcode(msg));
926 break;
927 }
928 case TIPC_DIRECT_MSG:{
929 tipc_msg_err_event cb = up_ptr->err_cb;
930
931 spin_unlock_bh(p_ptr->publ.lock);
932 if (connected || !cb)
933 break;
934 skb_pull(buf, msg_hdr_sz(msg));
935 cb(usr_handle, dref, &buf, msg_data(msg),
936 msg_data_sz(msg), msg_errcode(msg), &orig);
937 break;
938 }
939 case TIPC_NAMED_MSG:{
940 tipc_named_msg_err_event cb =
941 up_ptr->named_err_cb;
942
943 spin_unlock_bh(p_ptr->publ.lock);
944 if (connected || !cb)
945 break;
946 dseq.type = msg_nametype(msg);
947 dseq.lower = msg_nameinst(msg);
948 dseq.upper = dseq.lower;
949 skb_pull(buf, msg_hdr_sz(msg));
950 cb(usr_handle, dref, &buf, msg_data(msg),
951 msg_data_sz(msg), msg_errcode(msg), &dseq);
952 break;
953 }
954 }
955 if (buf)
956 buf_discard(buf);
957 buf = next;
958 continue;
959reject:
960 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
961 buf = next;
962 }
963}
964
965/*
966 * port_dispatcher(): Dispatcher for messages destinated
967 * to the tipc_port interface. Called with port locked.
968 */
969
970static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
971{
972 buf->next = NULL;
973 spin_lock_bh(&queue_lock);
974 if (msg_queue_head) {
975 msg_queue_tail->next = buf;
976 msg_queue_tail = buf;
977 } else {
978 msg_queue_tail = msg_queue_head = buf;
979 k_signal((Handler)port_dispatcher_sigh, 0);
980 }
981 spin_unlock_bh(&queue_lock);
982 return TIPC_OK;
983}
984
985/*
986 * Wake up port after congestion: Called with port locked,
987 *
988 */
989
990static void port_wakeup_sh(unsigned long ref)
991{
992 struct port *p_ptr;
993 struct user_port *up_ptr;
994 tipc_continue_event cb = 0;
995 void *uh = 0;
996
997 p_ptr = port_lock(ref);
998 if (p_ptr) {
999 up_ptr = p_ptr->user_port;
1000 if (up_ptr) {
1001 cb = up_ptr->continue_event_cb;
1002 uh = up_ptr->usr_handle;
1003 }
1004 port_unlock(p_ptr);
1005 }
1006 if (cb)
1007 cb(uh, ref);
1008}
1009
1010
1011static void port_wakeup(struct tipc_port *p_ptr)
1012{
1013 k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1014}
1015
1016void tipc_acknowledge(u32 ref, u32 ack)
1017{
1018 struct port *p_ptr;
1019 struct sk_buff *buf = 0;
1020
1021 p_ptr = port_lock(ref);
1022 if (!p_ptr)
1023 return;
1024 if (p_ptr->publ.connected) {
1025 p_ptr->publ.conn_unacked -= ack;
1026 buf = port_build_proto_msg(port_peerport(p_ptr),
1027 port_peernode(p_ptr),
1028 ref,
1029 tipc_own_addr,
1030 CONN_MANAGER,
1031 CONN_ACK,
1032 TIPC_OK,
1033 port_out_seqno(p_ptr),
1034 ack);
1035 }
1036 port_unlock(p_ptr);
1037 net_route_msg(buf);
1038}
1039
1040/*
1041 * tipc_createport(): user level call. Will add port to
1042 * registry if non-zero user_ref.
1043 */
1044
1045int tipc_createport(u32 user_ref,
1046 void *usr_handle,
1047 unsigned int importance,
1048 tipc_msg_err_event error_cb,
1049 tipc_named_msg_err_event named_error_cb,
1050 tipc_conn_shutdown_event conn_error_cb,
1051 tipc_msg_event msg_cb,
1052 tipc_named_msg_event named_msg_cb,
1053 tipc_conn_msg_event conn_msg_cb,
1054 tipc_continue_event continue_event_cb,/* May be zero */
1055 u32 *portref)
1056{
1057 struct user_port *up_ptr;
1058 struct port *p_ptr;
1059 u32 ref;
1060
1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 if (up_ptr == NULL) {
1063 return -ENOMEM;
1064 }
1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
1066 p_ptr = port_lock(ref);
1067 if (!p_ptr) {
1068 kfree(up_ptr);
1069 return -ENOMEM;
1070 }
1071
1072 p_ptr->user_port = up_ptr;
1073 up_ptr->user_ref = user_ref;
1074 up_ptr->usr_handle = usr_handle;
1075 up_ptr->ref = p_ptr->publ.ref;
1076 up_ptr->err_cb = error_cb;
1077 up_ptr->named_err_cb = named_error_cb;
1078 up_ptr->conn_err_cb = conn_error_cb;
1079 up_ptr->msg_cb = msg_cb;
1080 up_ptr->named_msg_cb = named_msg_cb;
1081 up_ptr->conn_msg_cb = conn_msg_cb;
1082 up_ptr->continue_event_cb = continue_event_cb;
1083 INIT_LIST_HEAD(&up_ptr->uport_list);
1084 reg_add_port(up_ptr);
1085 *portref = p_ptr->publ.ref;
1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1087 port_unlock(p_ptr);
1088 return TIPC_OK;
1089}
1090
1091int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1092{
1093 id->ref = ref;
1094 id->node = tipc_own_addr;
1095 return TIPC_OK;
1096}
1097
1098int tipc_portimportance(u32 ref, unsigned int *importance)
1099{
1100 struct port *p_ptr;
1101
1102 p_ptr = port_lock(ref);
1103 if (!p_ptr)
1104 return -EINVAL;
1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1106 spin_unlock_bh(p_ptr->publ.lock);
1107 return TIPC_OK;
1108}
1109
1110int tipc_set_portimportance(u32 ref, unsigned int imp)
1111{
1112 struct port *p_ptr;
1113
1114 if (imp > TIPC_CRITICAL_IMPORTANCE)
1115 return -EINVAL;
1116
1117 p_ptr = port_lock(ref);
1118 if (!p_ptr)
1119 return -EINVAL;
1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1121 spin_unlock_bh(p_ptr->publ.lock);
1122 return TIPC_OK;
1123}
1124
1125
1126int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1127{
1128 struct port *p_ptr;
1129 struct publication *publ;
1130 u32 key;
1131 int res = -EINVAL;
1132
1133 p_ptr = port_lock(ref);
1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1135 "lower = %u, upper = %u\n",
1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
1137 if (!p_ptr)
1138 return -EINVAL;
1139 if (p_ptr->publ.connected)
1140 goto exit;
1141 if (seq->lower > seq->upper)
1142 goto exit;
1143 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
1144 goto exit;
1145 key = ref + p_ptr->pub_count + 1;
1146 if (key == ref) {
1147 res = -EADDRINUSE;
1148 goto exit;
1149 }
1150 publ = nametbl_publish(seq->type, seq->lower, seq->upper,
1151 scope, p_ptr->publ.ref, key);
1152 if (publ) {
1153 list_add(&publ->pport_list, &p_ptr->publications);
1154 p_ptr->pub_count++;
1155 p_ptr->publ.published = 1;
1156 res = TIPC_OK;
1157 }
1158exit:
1159 port_unlock(p_ptr);
1160 return res;
1161}
1162
1163int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1164{
1165 struct port *p_ptr;
1166 struct publication *publ;
1167 struct publication *tpubl;
1168 int res = -EINVAL;
1169
1170 p_ptr = port_lock(ref);
1171 if (!p_ptr)
1172 return -EINVAL;
1173 if (!p_ptr->publ.published)
1174 goto exit;
1175 if (!seq) {
1176 list_for_each_entry_safe(publ, tpubl,
1177 &p_ptr->publications, pport_list) {
1178 nametbl_withdraw(publ->type, publ->lower,
1179 publ->ref, publ->key);
1180 }
1181 res = TIPC_OK;
1182 } else {
1183 list_for_each_entry_safe(publ, tpubl,
1184 &p_ptr->publications, pport_list) {
1185 if (publ->scope != scope)
1186 continue;
1187 if (publ->type != seq->type)
1188 continue;
1189 if (publ->lower != seq->lower)
1190 continue;
1191 if (publ->upper != seq->upper)
1192 break;
1193 nametbl_withdraw(publ->type, publ->lower,
1194 publ->ref, publ->key);
1195 res = TIPC_OK;
1196 break;
1197 }
1198 }
1199 if (list_empty(&p_ptr->publications))
1200 p_ptr->publ.published = 0;
1201exit:
1202 port_unlock(p_ptr);
1203 return res;
1204}
1205
1206int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1207{
1208 struct port *p_ptr;
1209 struct tipc_msg *msg;
1210 int res = -EINVAL;
1211
1212 p_ptr = port_lock(ref);
1213 if (!p_ptr)
1214 return -EINVAL;
1215 if (p_ptr->publ.published || p_ptr->publ.connected)
1216 goto exit;
1217 if (!peer->ref)
1218 goto exit;
1219
1220 msg = &p_ptr->publ.phdr;
1221 msg_set_destnode(msg, peer->node);
1222 msg_set_destport(msg, peer->ref);
1223 msg_set_orignode(msg, tipc_own_addr);
1224 msg_set_origport(msg, p_ptr->publ.ref);
1225 msg_set_transp_seqno(msg, 42);
1226 msg_set_type(msg, TIPC_CONN_MSG);
1227 if (!may_route(peer->node))
1228 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1229 else
1230 msg_set_hdr_sz(msg, LONG_H_SIZE);
1231
1232 p_ptr->probing_interval = PROBING_INTERVAL;
1233 p_ptr->probing_state = CONFIRMED;
1234 p_ptr->publ.connected = 1;
1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1236
1237 nodesub_subscribe(&p_ptr->subscription,peer->node,
1238 (void *)(unsigned long)ref,
1239 (net_ev_handler)port_handle_node_down);
1240 res = TIPC_OK;
1241exit:
1242 port_unlock(p_ptr);
1243 p_ptr->max_pkt = link_get_max_pkt(peer->node, ref);
1244 return res;
1245}
1246
1247/*
1248 * tipc_disconnect(): Disconnect port form peer.
1249 * This is a node local operation.
1250 */
1251
1252int tipc_disconnect(u32 ref)
1253{
1254 struct port *p_ptr;
1255 int res = -ENOTCONN;
1256
1257 p_ptr = port_lock(ref);
1258 if (!p_ptr)
1259 return -EINVAL;
1260 if (p_ptr->publ.connected) {
1261 p_ptr->publ.connected = 0;
1262 /* let timer expire on it's own to avoid deadlock! */
1263 nodesub_unsubscribe(&p_ptr->subscription);
1264 res = TIPC_OK;
1265 }
1266 port_unlock(p_ptr);
1267 return res;
1268}
1269
1270/*
1271 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
1272 */
1273int tipc_shutdown(u32 ref)
1274{
1275 struct port *p_ptr;
1276 struct sk_buff *buf = 0;
1277
1278 p_ptr = port_lock(ref);
1279 if (!p_ptr)
1280 return -EINVAL;
1281
1282 if (p_ptr->publ.connected) {
1283 u32 imp = msg_importance(&p_ptr->publ.phdr);
1284 if (imp < TIPC_CRITICAL_IMPORTANCE)
1285 imp++;
1286 buf = port_build_proto_msg(port_peerport(p_ptr),
1287 port_peernode(p_ptr),
1288 ref,
1289 tipc_own_addr,
1290 imp,
1291 TIPC_CONN_MSG,
1292 TIPC_CONN_SHUTDOWN,
1293 port_out_seqno(p_ptr),
1294 0);
1295 }
1296 port_unlock(p_ptr);
1297 net_route_msg(buf);
1298 return tipc_disconnect(ref);
1299}
1300
1301int tipc_isconnected(u32 ref, int *isconnected)
1302{
1303 struct port *p_ptr;
1304
1305 p_ptr = port_lock(ref);
1306 if (!p_ptr)
1307 return -EINVAL;
1308 *isconnected = p_ptr->publ.connected;
1309 port_unlock(p_ptr);
1310 return TIPC_OK;
1311}
1312
1313int tipc_peer(u32 ref, struct tipc_portid *peer)
1314{
1315 struct port *p_ptr;
1316 int res;
1317
1318 p_ptr = port_lock(ref);
1319 if (!p_ptr)
1320 return -EINVAL;
1321 if (p_ptr->publ.connected) {
1322 peer->ref = port_peerport(p_ptr);
1323 peer->node = port_peernode(p_ptr);
1324 res = TIPC_OK;
1325 } else
1326 res = -ENOTCONN;
1327 port_unlock(p_ptr);
1328 return res;
1329}
1330
1331int tipc_ref_valid(u32 ref)
1332{
1333 /* Works irrespective of type */
1334 return !!ref_deref(ref);
1335}
1336
1337
1338/*
1339 * port_recv_sections(): Concatenate and deliver sectioned
1340 * message for this node.
1341 */
1342
1343int port_recv_sections(struct port *sender, unsigned int num_sect,
1344 struct iovec const *msg_sect)
1345{
1346 struct sk_buff *buf;
1347 int res;
1348
1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1350 MAX_MSG_SIZE, !sender->user_port, &buf);
1351 if (likely(buf))
1352 port_recv_msg(buf);
1353 return res;
1354}
1355
1356/**
1357 * tipc_send - send message sections on connection
1358 */
1359
1360int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1361{
1362 struct port *p_ptr;
1363 u32 destnode;
1364 int res;
1365
1366 p_ptr = port_deref(ref);
1367 if (!p_ptr || !p_ptr->publ.connected)
1368 return -EINVAL;
1369
1370 p_ptr->publ.congested = 1;
1371 if (!port_congested(p_ptr)) {
1372 destnode = port_peernode(p_ptr);
1373 if (likely(destnode != tipc_own_addr))
1374 res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
1375 destnode);
1376 else
1377 res = port_recv_sections(p_ptr, num_sect, msg_sect);
1378
1379 if (likely(res != -ELINKCONG)) {
1380 port_incr_out_seqno(p_ptr);
1381 p_ptr->publ.congested = 0;
1382 p_ptr->sent++;
1383 return res;
1384 }
1385 }
1386 if (port_unreliable(p_ptr)) {
1387 p_ptr->publ.congested = 0;
1388 /* Just calculate msg length and return */
1389 return msg_calc_data_size(msg_sect, num_sect);
1390 }
1391 return -ELINKCONG;
1392}
1393
1394/**
1395 * tipc_send_buf - send message buffer on connection
1396 */
1397
1398int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1399{
1400 struct port *p_ptr;
1401 struct tipc_msg *msg;
1402 u32 destnode;
1403 u32 hsz;
1404 u32 sz;
1405 u32 res;
1406
1407 p_ptr = port_deref(ref);
1408 if (!p_ptr || !p_ptr->publ.connected)
1409 return -EINVAL;
1410
1411 msg = &p_ptr->publ.phdr;
1412 hsz = msg_hdr_sz(msg);
1413 sz = hsz + dsz;
1414 msg_set_size(msg, sz);
1415 if (skb_cow(buf, hsz))
1416 return -ENOMEM;
1417
1418 skb_push(buf, hsz);
1419 memcpy(buf->data, (unchar *)msg, hsz);
1420 destnode = msg_destnode(msg);
1421 p_ptr->publ.congested = 1;
1422 if (!port_congested(p_ptr)) {
1423 if (likely(destnode != tipc_own_addr))
1424 res = tipc_send_buf_fast(buf, destnode);
1425 else {
1426 port_recv_msg(buf);
1427 res = sz;
1428 }
1429 if (likely(res != -ELINKCONG)) {
1430 port_incr_out_seqno(p_ptr);
1431 p_ptr->sent++;
1432 p_ptr->publ.congested = 0;
1433 return res;
1434 }
1435 }
1436 if (port_unreliable(p_ptr)) {
1437 p_ptr->publ.congested = 0;
1438 return dsz;
1439 }
1440 return -ELINKCONG;
1441}
1442
1443/**
1444 * tipc_forward2name - forward message sections to port name
1445 */
1446
1447int tipc_forward2name(u32 ref,
1448 struct tipc_name const *name,
1449 u32 domain,
1450 u32 num_sect,
1451 struct iovec const *msg_sect,
1452 struct tipc_portid const *orig,
1453 unsigned int importance)
1454{
1455 struct port *p_ptr;
1456 struct tipc_msg *msg;
1457 u32 destnode = domain;
1458 u32 destport = 0;
1459 int res;
1460
1461 p_ptr = port_deref(ref);
1462 if (!p_ptr || p_ptr->publ.connected)
1463 return -EINVAL;
1464
1465 msg = &p_ptr->publ.phdr;
1466 msg_set_type(msg, TIPC_NAMED_MSG);
1467 msg_set_orignode(msg, orig->node);
1468 msg_set_origport(msg, orig->ref);
1469 msg_set_hdr_sz(msg, LONG_H_SIZE);
1470 msg_set_nametype(msg, name->type);
1471 msg_set_nameinst(msg, name->instance);
1472 msg_set_lookup_scope(msg, addr_scope(domain));
1473 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1474 msg_set_importance(msg,importance);
1475 destport = nametbl_translate(name->type, name->instance, &destnode);
1476 msg_set_destnode(msg, destnode);
1477 msg_set_destport(msg, destport);
1478
1479 if (likely(destport || destnode)) {
1480 p_ptr->sent++;
1481 if (likely(destnode == tipc_own_addr))
1482 return port_recv_sections(p_ptr, num_sect, msg_sect);
1483 res = link_send_sections_fast(p_ptr, msg_sect, num_sect,
1484 destnode);
1485 if (likely(res != -ELINKCONG))
1486 return res;
1487 if (port_unreliable(p_ptr)) {
1488 /* Just calculate msg length and return */
1489 return msg_calc_data_size(msg_sect, num_sect);
1490 }
1491 return -ELINKCONG;
1492 }
1493 return port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1494 TIPC_ERR_NO_NAME);
1495}
1496
1497/**
1498 * tipc_send2name - send message sections to port name
1499 */
1500
1501int tipc_send2name(u32 ref,
1502 struct tipc_name const *name,
1503 unsigned int domain,
1504 unsigned int num_sect,
1505 struct iovec const *msg_sect)
1506{
1507 struct tipc_portid orig;
1508
1509 orig.ref = ref;
1510 orig.node = tipc_own_addr;
1511 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1512 TIPC_PORT_IMPORTANCE);
1513}
1514
1515/**
1516 * tipc_forward_buf2name - forward message buffer to port name
1517 */
1518
1519int tipc_forward_buf2name(u32 ref,
1520 struct tipc_name const *name,
1521 u32 domain,
1522 struct sk_buff *buf,
1523 unsigned int dsz,
1524 struct tipc_portid const *orig,
1525 unsigned int importance)
1526{
1527 struct port *p_ptr;
1528 struct tipc_msg *msg;
1529 u32 destnode = domain;
1530 u32 destport = 0;
1531 int res;
1532
1533 p_ptr = (struct port *)ref_deref(ref);
1534 if (!p_ptr || p_ptr->publ.connected)
1535 return -EINVAL;
1536
1537 msg = &p_ptr->publ.phdr;
1538 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1539 msg_set_importance(msg, importance);
1540 msg_set_type(msg, TIPC_NAMED_MSG);
1541 msg_set_orignode(msg, orig->node);
1542 msg_set_origport(msg, orig->ref);
1543 msg_set_nametype(msg, name->type);
1544 msg_set_nameinst(msg, name->instance);
1545 msg_set_lookup_scope(msg, addr_scope(domain));
1546 msg_set_hdr_sz(msg, LONG_H_SIZE);
1547 msg_set_size(msg, LONG_H_SIZE + dsz);
1548 destport = nametbl_translate(name->type, name->instance, &destnode);
1549 msg_set_destnode(msg, destnode);
1550 msg_set_destport(msg, destport);
1551 msg_dbg(msg, "forw2name ==> ");
1552 if (skb_cow(buf, LONG_H_SIZE))
1553 return -ENOMEM;
1554 skb_push(buf, LONG_H_SIZE);
1555 memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
1556 msg_dbg(buf_msg(buf),"PREP:");
1557 if (likely(destport || destnode)) {
1558 p_ptr->sent++;
1559 if (destnode == tipc_own_addr)
1560 return port_recv_msg(buf);
1561 res = tipc_send_buf_fast(buf, destnode);
1562 if (likely(res != -ELINKCONG))
1563 return res;
1564 if (port_unreliable(p_ptr))
1565 return dsz;
1566 return -ELINKCONG;
1567 }
1568 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1569}
1570
1571/**
1572 * tipc_send_buf2name - send message buffer to port name
1573 */
1574
1575int tipc_send_buf2name(u32 ref,
1576 struct tipc_name const *dest,
1577 u32 domain,
1578 struct sk_buff *buf,
1579 unsigned int dsz)
1580{
1581 struct tipc_portid orig;
1582
1583 orig.ref = ref;
1584 orig.node = tipc_own_addr;
1585 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
1586 TIPC_PORT_IMPORTANCE);
1587}
1588
1589/**
1590 * tipc_forward2port - forward message sections to port identity
1591 */
1592
1593int tipc_forward2port(u32 ref,
1594 struct tipc_portid const *dest,
1595 unsigned int num_sect,
1596 struct iovec const *msg_sect,
1597 struct tipc_portid const *orig,
1598 unsigned int importance)
1599{
1600 struct port *p_ptr;
1601 struct tipc_msg *msg;
1602 int res;
1603
1604 p_ptr = port_deref(ref);
1605 if (!p_ptr || p_ptr->publ.connected)
1606 return -EINVAL;
1607
1608 msg = &p_ptr->publ.phdr;
1609 msg_set_type(msg, TIPC_DIRECT_MSG);
1610 msg_set_orignode(msg, orig->node);
1611 msg_set_origport(msg, orig->ref);
1612 msg_set_destnode(msg, dest->node);
1613 msg_set_destport(msg, dest->ref);
1614 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1615 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1616 msg_set_importance(msg, importance);
1617 p_ptr->sent++;
1618 if (dest->node == tipc_own_addr)
1619 return port_recv_sections(p_ptr, num_sect, msg_sect);
1620 res = link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1621 if (likely(res != -ELINKCONG))
1622 return res;
1623 if (port_unreliable(p_ptr)) {
1624 /* Just calculate msg length and return */
1625 return msg_calc_data_size(msg_sect, num_sect);
1626 }
1627 return -ELINKCONG;
1628}
1629
1630/**
1631 * tipc_send2port - send message sections to port identity
1632 */
1633
1634int tipc_send2port(u32 ref,
1635 struct tipc_portid const *dest,
1636 unsigned int num_sect,
1637 struct iovec const *msg_sect)
1638{
1639 struct tipc_portid orig;
1640
1641 orig.ref = ref;
1642 orig.node = tipc_own_addr;
1643 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1644 TIPC_PORT_IMPORTANCE);
1645}
1646
1647/**
1648 * tipc_forward_buf2port - forward message buffer to port identity
1649 */
1650int tipc_forward_buf2port(u32 ref,
1651 struct tipc_portid const *dest,
1652 struct sk_buff *buf,
1653 unsigned int dsz,
1654 struct tipc_portid const *orig,
1655 unsigned int importance)
1656{
1657 struct port *p_ptr;
1658 struct tipc_msg *msg;
1659 int res;
1660
1661 p_ptr = (struct port *)ref_deref(ref);
1662 if (!p_ptr || p_ptr->publ.connected)
1663 return -EINVAL;
1664
1665 msg = &p_ptr->publ.phdr;
1666 msg_set_type(msg, TIPC_DIRECT_MSG);
1667 msg_set_orignode(msg, orig->node);
1668 msg_set_origport(msg, orig->ref);
1669 msg_set_destnode(msg, dest->node);
1670 msg_set_destport(msg, dest->ref);
1671 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1672 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1673 msg_set_importance(msg, importance);
1674 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1675 if (skb_cow(buf, DIR_MSG_H_SIZE))
1676 return -ENOMEM;
1677
1678 skb_push(buf, DIR_MSG_H_SIZE);
1679 memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
1680 msg_dbg(msg, "buf2port: ");
1681 p_ptr->sent++;
1682 if (dest->node == tipc_own_addr)
1683 return port_recv_msg(buf);
1684 res = tipc_send_buf_fast(buf, dest->node);
1685 if (likely(res != -ELINKCONG))
1686 return res;
1687 if (port_unreliable(p_ptr))
1688 return dsz;
1689 return -ELINKCONG;
1690}
1691
1692/**
1693 * tipc_send_buf2port - send message buffer to port identity
1694 */
1695
1696int tipc_send_buf2port(u32 ref,
1697 struct tipc_portid const *dest,
1698 struct sk_buff *buf,
1699 unsigned int dsz)
1700{
1701 struct tipc_portid orig;
1702
1703 orig.ref = ref;
1704 orig.node = tipc_own_addr;
1705 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1706 TIPC_PORT_IMPORTANCE);
1707}
1708
diff --git a/net/tipc/port.h b/net/tipc/port.h
new file mode 100644
index 000000000000..e829a99d3b7f
--- /dev/null
+++ b/net/tipc/port.h
@@ -0,0 +1,209 @@
1/*
2 * net/tipc/port.h: Include file for TIPC port code
3 *
4 * Copyright (c) 1994-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H
39
40#include <net/tipc/tipc_port.h>
41#include "ref.h"
42#include "net.h"
43#include "msg.h"
44#include "dbg.h"
45#include "node_subscr.h"
46
47/**
48 * struct user_port - TIPC user port (used with native API)
49 * @user_ref: id of user who created user port
50 * @usr_handle: user-specified field
51 * @ref: object reference to associated TIPC port
52 * <various callback routines>
53 * @uport_list: adjacent user ports in list of ports held by user
54 */
55
56struct user_port {
57 u32 user_ref;
58 void *usr_handle;
59 u32 ref;
60 tipc_msg_err_event err_cb;
61 tipc_named_msg_err_event named_err_cb;
62 tipc_conn_shutdown_event conn_err_cb;
63 tipc_msg_event msg_cb;
64 tipc_named_msg_event named_msg_cb;
65 tipc_conn_msg_event conn_msg_cb;
66 tipc_continue_event continue_event_cb;
67 struct list_head uport_list;
68};
69
70/**
71 * struct port - TIPC port structure
72 * @publ: TIPC port info available to privileged users
73 * @port_list: adjacent ports in TIPC's global list of ports
74 * @dispatcher: ptr to routine which handles received messages
75 * @wakeup: ptr to routine to call when port is no longer congested
76 * @user_port: ptr to user port associated with port (if any)
77 * @wait_list: adjacent ports in list of ports waiting on link congestion
78 * @congested_link: ptr to congested link port is waiting on
79 * @waiting_pkts:
80 * @sent:
81 * @acked:
82 * @publications: list of publications for port
83 * @pub_count: total # of publications port has made during its lifetime
84 * @max_pkt: maximum packet size "hint" used when building messages sent by port
85 * @probing_state:
86 * @probing_interval:
87 * @last_in_seqno:
88 * @timer_ref:
89 * @subscription: "node down" subscription used to terminate failed connections
90 */
91
92struct port {
93 struct tipc_port publ;
94 struct list_head port_list;
95 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
96 void (*wakeup)(struct tipc_port *);
97 struct user_port *user_port;
98 struct list_head wait_list;
99 struct link *congested_link;
100 u32 waiting_pkts;
101 u32 sent;
102 u32 acked;
103 struct list_head publications;
104 u32 pub_count;
105 u32 max_pkt;
106 u32 probing_state;
107 u32 probing_interval;
108 u32 last_in_seqno;
109 struct timer_list timer;
110 struct node_subscr subscription;
111};
112
113extern spinlock_t port_list_lock;
114struct port_list;
115
116int port_recv_sections(struct port *p_ptr, u32 num_sect,
117 struct iovec const *msg_sect);
118int port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
119 struct iovec const *msg_sect, u32 num_sect,
120 int err);
121struct sk_buff *port_get_ports(void);
122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
123void port_recv_proto_msg(struct sk_buff *buf);
124void port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
125void port_reinit(void);
126
127/**
128 * port_lock - lock port instance referred to and return its pointer
129 */
130
131static inline struct port *port_lock(u32 ref)
132{
133 return (struct port *)ref_lock(ref);
134}
135
136/**
137 * port_unlock - unlock a port instance
138 *
139 * Can use pointer instead of ref_unlock() since port is already locked.
140 */
141
142static inline void port_unlock(struct port *p_ptr)
143{
144 spin_unlock_bh(p_ptr->publ.lock);
145}
146
147static inline struct port* port_deref(u32 ref)
148{
149 return (struct port *)ref_deref(ref);
150}
151
152static inline u32 peer_port(struct port *p_ptr)
153{
154 return msg_destport(&p_ptr->publ.phdr);
155}
156
157static inline u32 peer_node(struct port *p_ptr)
158{
159 return msg_destnode(&p_ptr->publ.phdr);
160}
161
162static inline int port_congested(struct port *p_ptr)
163{
164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
165}
166
167/**
168 * port_recv_msg - receive message from lower layer and deliver to port user
169 */
170
171static inline int port_recv_msg(struct sk_buff *buf)
172{
173 struct port *p_ptr;
174 struct tipc_msg *msg = buf_msg(buf);
175 u32 destport = msg_destport(msg);
176 u32 dsz = msg_data_sz(msg);
177 u32 err;
178
179 /* forward unresolved named message */
180 if (unlikely(!destport)) {
181 net_route_msg(buf);
182 return dsz;
183 }
184
185 /* validate destination & pass to port, otherwise reject message */
186 p_ptr = port_lock(destport);
187 if (likely(p_ptr)) {
188 if (likely(p_ptr->publ.connected)) {
189 if ((unlikely(msg_origport(msg) != peer_port(p_ptr))) ||
190 (unlikely(msg_orignode(msg) != peer_node(p_ptr))) ||
191 (unlikely(!msg_connected(msg)))) {
192 err = TIPC_ERR_NO_PORT;
193 port_unlock(p_ptr);
194 goto reject;
195 }
196 }
197 err = p_ptr->dispatcher(&p_ptr->publ, buf);
198 port_unlock(p_ptr);
199 if (likely(!err))
200 return dsz;
201 } else {
202 err = TIPC_ERR_NO_PORT;
203 }
204reject:
205 dbg("port->rejecting, err = %x..\n",err);
206 return tipc_reject_msg(buf, err);
207}
208
209#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
new file mode 100644
index 000000000000..944093fe246f
--- /dev/null
+++ b/net/tipc/ref.c
@@ -0,0 +1,189 @@
1/*
2 * net/tipc/ref.c: TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "ref.h"
39#include "port.h"
40#include "subscr.h"
41#include "name_distr.h"
42#include "name_table.h"
43#include "config.h"
44#include "discover.h"
45#include "bearer.h"
46#include "node.h"
47#include "bcast.h"
48
49/*
50 * Object reference table consists of 2**N entries.
51 *
52 * A used entry has object ptr != 0, reference == XXXX|own index
53 * (XXXX changes each time entry is acquired)
54 * A free entry has object ptr == 0, reference == YYYY|next free index
55 * (YYYY is one more than last used XXXX)
56 *
57 * Free list is initially chained from entry (2**N)-1 to entry 1.
58 * Entry 0 is not used to allow index 0 to indicate the end of the free list.
59 *
60 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
61 * because entry 0's reference field has the form XXXX|1--1.
62 */
63
64struct ref_table ref_table = { 0 };
65
66rwlock_t reftbl_lock = RW_LOCK_UNLOCKED;
67
68/**
69 * ref_table_init - create reference table for objects
70 */
71
72int ref_table_init(u32 requested_size, u32 start)
73{
74 struct reference *table;
75 u32 sz = 1 << 4;
76 u32 index_mask;
77 int i;
78
79 while (sz < requested_size) {
80 sz <<= 1;
81 }
82 table = (struct reference *)vmalloc(sz * sizeof(struct reference));
83 if (table == NULL)
84 return -ENOMEM;
85
86 write_lock_bh(&reftbl_lock);
87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = 0;
90 table[i].lock = SPIN_LOCK_UNLOCKED;
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 }
93 ref_table.entries = table;
94 ref_table.index_mask = index_mask;
95 ref_table.first_free = sz - 1;
96 ref_table.last_free = 1;
97 write_unlock_bh(&reftbl_lock);
98 return TIPC_OK;
99}
100
101/**
102 * ref_table_stop - destroy reference table for objects
103 */
104
105void ref_table_stop(void)
106{
107 if (!ref_table.entries)
108 return;
109
110 vfree(ref_table.entries);
111 ref_table.entries = 0;
112}
113
114/**
115 * ref_acquire - create reference to an object
116 *
117 * Return a unique reference value which can be translated back to the pointer
118 * 'object' at a later time. Also, pass back a pointer to the lock protecting
119 * the object, but without locking it.
120 */
121
122u32 ref_acquire(void *object, spinlock_t **lock)
123{
124 struct reference *entry;
125 u32 index;
126 u32 index_mask;
127 u32 next_plus_upper;
128 u32 reference = 0;
129
130 assert(ref_table.entries && object);
131
132 write_lock_bh(&reftbl_lock);
133 if (ref_table.first_free) {
134 index = ref_table.first_free;
135 entry = &(ref_table.entries[index]);
136 index_mask = ref_table.index_mask;
137 /* take lock in case a previous user of entry still holds it */
138 spin_lock_bh(&entry->lock);
139 next_plus_upper = entry->data.next_plus_upper;
140 ref_table.first_free = next_plus_upper & index_mask;
141 reference = (next_plus_upper & ~index_mask) + index;
142 entry->data.reference = reference;
143 entry->object = object;
144 if (lock != 0)
145 *lock = &entry->lock;
146 spin_unlock_bh(&entry->lock);
147 }
148 write_unlock_bh(&reftbl_lock);
149 return reference;
150}
151
152/**
153 * ref_discard - invalidate references to an object
154 *
155 * Disallow future references to an object and free up the entry for re-use.
156 * Note: The entry's spin_lock may still be busy after discard
157 */
158
159void ref_discard(u32 ref)
160{
161 struct reference *entry;
162 u32 index;
163 u32 index_mask;
164
165 assert(ref_table.entries);
166 assert(ref != 0);
167
168 write_lock_bh(&reftbl_lock);
169 index_mask = ref_table.index_mask;
170 index = ref & index_mask;
171 entry = &(ref_table.entries[index]);
172 assert(entry->object != 0);
173 assert(entry->data.reference == ref);
174
175 /* mark entry as unused */
176 entry->object = 0;
177 if (ref_table.first_free == 0)
178 ref_table.first_free = index;
179 else
180 /* next_plus_upper is always XXXX|0--0 for last free entry */
181 ref_table.entries[ref_table.last_free].data.next_plus_upper
182 |= index;
183 ref_table.last_free = index;
184
185 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
187 write_unlock_bh(&reftbl_lock);
188}
189
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
new file mode 100644
index 000000000000..429cde57228a
--- /dev/null
+++ b/net/tipc/ref.h
@@ -0,0 +1,131 @@
1/*
2 * net/tipc/ref.h: Include file for TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_REF_H
38#define _TIPC_REF_H
39
40/**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @data: reference value associated with object (or link to next unused entry)
45 */
46
47struct reference {
48 void *object;
49 spinlock_t lock;
50 union {
51 u32 next_plus_upper;
52 u32 reference;
53 } data;
54};
55
56/**
57 * struct ref_table - table of TIPC object reference entries
58 * @entries: pointer to array of reference entries
59 * @index_mask: bitmask for array index portion of reference values
60 * @first_free: array index of first unused object reference entry
61 * @last_free: array index of last unused object reference entry
62 */
63
64struct ref_table {
65 struct reference *entries;
66 u32 index_mask;
67 u32 first_free;
68 u32 last_free;
69};
70
71extern struct ref_table ref_table;
72
73int ref_table_init(u32 requested_size, u32 start);
74void ref_table_stop(void);
75
76u32 ref_acquire(void *object, spinlock_t **lock);
77void ref_discard(u32 ref);
78
79
80/**
81 * ref_lock - lock referenced object and return pointer to it
82 */
83
84static inline void *ref_lock(u32 ref)
85{
86 if (likely(ref_table.entries)) {
87 struct reference *r =
88 &ref_table.entries[ref & ref_table.index_mask];
89
90 spin_lock_bh(&r->lock);
91 if (likely(r->data.reference == ref))
92 return r->object;
93 spin_unlock_bh(&r->lock);
94 }
95 return 0;
96}
97
98/**
99 * ref_unlock - unlock referenced object
100 */
101
102static inline void ref_unlock(u32 ref)
103{
104 if (likely(ref_table.entries)) {
105 struct reference *r =
106 &ref_table.entries[ref & ref_table.index_mask];
107
108 if (likely(r->data.reference == ref))
109 spin_unlock_bh(&r->lock);
110 else
111 err("ref_unlock() invoked using obsolete reference\n");
112 }
113}
114
115/**
116 * ref_deref - return pointer referenced object (without locking it)
117 */
118
119static inline void *ref_deref(u32 ref)
120{
121 if (likely(ref_table.entries)) {
122 struct reference *r =
123 &ref_table.entries[ref & ref_table.index_mask];
124
125 if (likely(r->data.reference == ref))
126 return r->object;
127 }
128 return 0;
129}
130
131#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644
index 000000000000..d21f8c0cd25a
--- /dev/null
+++ b/net/tipc/socket.c
@@ -0,0 +1,1726 @@
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/net.h>
40#include <linux/socket.h>
41#include <linux/errno.h>
42#include <linux/mm.h>
43#include <linux/slab.h>
44#include <linux/poll.h>
45#include <linux/version.h>
46#include <linux/fcntl.h>
47#include <linux/version.h>
48#include <asm/semaphore.h>
49#include <asm/string.h>
50#include <asm/atomic.h>
51#include <net/sock.h>
52
53#include <linux/tipc.h>
54#include <linux/tipc_config.h>
55#include <net/tipc/tipc_msg.h>
56#include <net/tipc/tipc_port.h>
57
58#include "core.h"
59
60#define SS_LISTENING -1 /* socket is listening */
61#define SS_READY -2 /* socket is connectionless */
62
63#define OVERLOAD_LIMIT_BASE 5000
64
65struct tipc_sock {
66 struct sock sk;
67 struct tipc_port *p;
68 struct semaphore sem;
69};
70
71#define tipc_sk(sk) ((struct tipc_sock*)sk)
72
73static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
74static void wakeupdispatch(struct tipc_port *tport);
75
76static struct proto_ops packet_ops;
77static struct proto_ops stream_ops;
78static struct proto_ops msg_ops;
79
80static struct proto tipc_proto;
81
82static int sockets_enabled = 0;
83
84static atomic_t tipc_queue_size = ATOMIC_INIT(0);
85
86
87/*
88 * sock_lock(): Lock a port/socket pair. lock_sock() can
89 * not be used here, since the same lock must protect ports
90 * with non-socket interfaces.
91 * See net.c for description of locking policy.
92 */
93static inline void sock_lock(struct tipc_sock* tsock)
94{
95 spin_lock_bh(tsock->p->lock);
96}
97
98/*
99 * sock_unlock(): Unlock a port/socket pair
100 */
101static inline void sock_unlock(struct tipc_sock* tsock)
102{
103 spin_unlock_bh(tsock->p->lock);
104}
105
106/**
107 * pollmask - determine the current set of poll() events for a socket
108 * @sock: socket structure
109 *
110 * TIPC sets the returned events as follows:
111 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
112 * or if a connection-oriented socket is does not have an active connection
113 * (i.e. a read operation will not block).
114 * b) POLLOUT is set except when a socket's connection has been terminated
115 * (i.e. a write operation will not block).
116 * c) POLLHUP is set when a socket's connection has been terminated.
117 *
118 * IMPORTANT: The fact that a read or write operation will not block does NOT
119 * imply that the operation will succeed!
120 *
121 * Returns pollmask value
122 */
123
124static inline u32 pollmask(struct socket *sock)
125{
126 u32 mask;
127
128 if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
129 (sock->state == SS_UNCONNECTED) ||
130 (sock->state == SS_DISCONNECTING))
131 mask = (POLLRDNORM | POLLIN);
132 else
133 mask = 0;
134
135 if (sock->state == SS_DISCONNECTING)
136 mask |= POLLHUP;
137 else
138 mask |= POLLOUT;
139
140 return mask;
141}
142
143
144/**
145 * advance_queue - discard first buffer in queue
146 * @tsock: TIPC socket
147 */
148
149static inline void advance_queue(struct tipc_sock *tsock)
150{
151 sock_lock(tsock);
152 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
153 sock_unlock(tsock);
154 atomic_dec(&tipc_queue_size);
155}
156
157/**
158 * tipc_create - create a TIPC socket
159 * @sock: pre-allocated socket structure
160 * @protocol: protocol indicator (must be 0)
161 *
162 * This routine creates and attaches a 'struct sock' to the 'struct socket',
163 * then create and attaches a TIPC port to the 'struct sock' part.
164 *
165 * Returns 0 on success, errno otherwise
166 */
167static int tipc_create(struct socket *sock, int protocol)
168{
169 struct tipc_sock *tsock;
170 struct tipc_port *port;
171 struct sock *sk;
172 u32 ref;
173
174 if ((sock->type != SOCK_STREAM) &&
175 (sock->type != SOCK_SEQPACKET) &&
176 (sock->type != SOCK_DGRAM) &&
177 (sock->type != SOCK_RDM))
178 return -EPROTOTYPE;
179
180 if (unlikely(protocol != 0))
181 return -EPROTONOSUPPORT;
182
183 ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
184 if (unlikely(!ref))
185 return -ENOMEM;
186
187 sock->state = SS_UNCONNECTED;
188
189 switch (sock->type) {
190 case SOCK_STREAM:
191 sock->ops = &stream_ops;
192 break;
193 case SOCK_SEQPACKET:
194 sock->ops = &packet_ops;
195 break;
196 case SOCK_DGRAM:
197 tipc_set_portunreliable(ref, 1);
198 /* fall through */
199 case SOCK_RDM:
200 tipc_set_portunreturnable(ref, 1);
201 sock->ops = &msg_ops;
202 sock->state = SS_READY;
203 break;
204 }
205
206 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
207 if (!sk) {
208 tipc_deleteport(ref);
209 return -ENOMEM;
210 }
211
212 sock_init_data(sock, sk);
213 init_waitqueue_head(sk->sk_sleep);
214 sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */
215
216 tsock = tipc_sk(sk);
217 port = tipc_get_port(ref);
218
219 tsock->p = port;
220 port->usr_handle = tsock;
221
222 init_MUTEX(&tsock->sem);
223
224 dbg("sock_create: %x\n",tsock);
225
226 atomic_inc(&tipc_user_count);
227
228 return 0;
229}
230
231/**
232 * release - destroy a TIPC socket
233 * @sock: socket to destroy
234 *
235 * This routine cleans up any messages that are still queued on the socket.
236 * For DGRAM and RDM socket types, all queued messages are rejected.
237 * For SEQPACKET and STREAM socket types, the first message is rejected
238 * and any others are discarded. (If the first message on a STREAM socket
239 * is partially-read, it is discarded and the next one is rejected instead.)
240 *
241 * NOTE: Rejected messages are not necessarily returned to the sender! They
242 * are returned or discarded according to the "destination droppable" setting
243 * specified for the message by the sender.
244 *
245 * Returns 0 on success, errno otherwise
246 */
247
248static int release(struct socket *sock)
249{
250 struct tipc_sock *tsock = tipc_sk(sock->sk);
251 struct sock *sk = sock->sk;
252 int res = TIPC_OK;
253 struct sk_buff *buf;
254
255 dbg("sock_delete: %x\n",tsock);
256 if (!tsock)
257 return 0;
258 down_interruptible(&tsock->sem);
259 if (!sock->sk) {
260 up(&tsock->sem);
261 return 0;
262 }
263
264 /* Reject unreceived messages, unless no longer connected */
265
266 while (sock->state != SS_DISCONNECTING) {
267 sock_lock(tsock);
268 buf = skb_dequeue(&sk->sk_receive_queue);
269 if (!buf)
270 tsock->p->usr_handle = 0;
271 sock_unlock(tsock);
272 if (!buf)
273 break;
274 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
275 buf_discard(buf);
276 else
277 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
278 atomic_dec(&tipc_queue_size);
279 }
280
281 /* Delete TIPC port */
282
283 res = tipc_deleteport(tsock->p->ref);
284 sock->sk = NULL;
285
286 /* Discard any remaining messages */
287
288 while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
289 buf_discard(buf);
290 atomic_dec(&tipc_queue_size);
291 }
292
293 up(&tsock->sem);
294
295 sock_put(sk);
296
297 atomic_dec(&tipc_user_count);
298 return res;
299}
300
301/**
302 * bind - associate or disassocate TIPC name(s) with a socket
303 * @sock: socket structure
304 * @uaddr: socket address describing name(s) and desired operation
305 * @uaddr_len: size of socket address data structure
306 *
307 * Name and name sequence binding is indicated using a positive scope value;
308 * a negative scope value unbinds the specified name. Specifying no name
309 * (i.e. a socket address length of 0) unbinds all names from the socket.
310 *
311 * Returns 0 on success, errno otherwise
312 */
313
314static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
315{
316 struct tipc_sock *tsock = tipc_sk(sock->sk);
317 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
318 int res;
319
320 if (down_interruptible(&tsock->sem))
321 return -ERESTARTSYS;
322
323 if (unlikely(!uaddr_len)) {
324 res = tipc_withdraw(tsock->p->ref, 0, 0);
325 goto exit;
326 }
327
328 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
329 res = -EINVAL;
330 goto exit;
331 }
332
333 if (addr->family != AF_TIPC) {
334 res = -EAFNOSUPPORT;
335 goto exit;
336 }
337 if (addr->addrtype == TIPC_ADDR_NAME)
338 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
339 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
340 res = -EAFNOSUPPORT;
341 goto exit;
342 }
343
344 if (addr->scope > 0)
345 res = tipc_publish(tsock->p->ref, addr->scope,
346 &addr->addr.nameseq);
347 else
348 res = tipc_withdraw(tsock->p->ref, -addr->scope,
349 &addr->addr.nameseq);
350exit:
351 up(&tsock->sem);
352 return res;
353}
354
355/**
356 * get_name - get port ID of socket or peer socket
357 * @sock: socket structure
358 * @uaddr: area for returned socket address
359 * @uaddr_len: area for returned length of socket address
360 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
361 *
362 * Returns 0 on success, errno otherwise
363 */
364
365static int get_name(struct socket *sock, struct sockaddr *uaddr,
366 int *uaddr_len, int peer)
367{
368 struct tipc_sock *tsock = tipc_sk(sock->sk);
369 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
370 u32 res;
371
372 if (down_interruptible(&tsock->sem))
373 return -ERESTARTSYS;
374
375 *uaddr_len = sizeof(*addr);
376 addr->addrtype = TIPC_ADDR_ID;
377 addr->family = AF_TIPC;
378 addr->scope = 0;
379 if (peer)
380 res = tipc_peer(tsock->p->ref, &addr->addr.id);
381 else
382 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
383 addr->addr.name.domain = 0;
384
385 up(&tsock->sem);
386 return res;
387}
388
389/**
390 * poll - read and possibly block on pollmask
391 * @file: file structure associated with the socket
392 * @sock: socket for which to calculate the poll bits
393 * @wait: ???
394 *
395 * Returns the pollmask
396 */
397
398static unsigned int poll(struct file *file, struct socket *sock,
399 poll_table *wait)
400{
401 poll_wait(file, sock->sk->sk_sleep, wait);
402 /* NEED LOCK HERE? */
403 return pollmask(sock);
404}
405
406/**
407 * dest_name_check - verify user is permitted to send to specified port name
408 * @dest: destination address
409 * @m: descriptor for message to be sent
410 *
411 * Prevents restricted configuration commands from being issued by
412 * unauthorized users.
413 *
414 * Returns 0 if permission is granted, otherwise errno
415 */
416
417static inline int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
418{
419 struct tipc_cfg_msg_hdr hdr;
420
421 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
422 return 0;
423 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
424 return 0;
425
426 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
427 return -EACCES;
428
429 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
430 return -EFAULT;
431 if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
432 return -EACCES;
433
434 return 0;
435}
436
437/**
438 * send_msg - send message in connectionless manner
439 * @iocb: (unused)
440 * @sock: socket structure
441 * @m: message to send
442 * @total_len: (unused)
443 *
444 * Message must have an destination specified explicitly.
445 * Used for SOCK_RDM and SOCK_DGRAM messages,
446 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
447 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
448 *
449 * Returns the number of bytes sent on success, or errno otherwise
450 */
451
452static int send_msg(struct kiocb *iocb, struct socket *sock,
453 struct msghdr *m, size_t total_len)
454{
455 struct tipc_sock *tsock = tipc_sk(sock->sk);
456 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
457 struct sk_buff *buf;
458 int needs_conn;
459 int res = -EINVAL;
460
461 if (unlikely(!dest))
462 return -EDESTADDRREQ;
463 if (unlikely(dest->family != AF_TIPC))
464 return -EINVAL;
465
466 needs_conn = (sock->state != SS_READY);
467 if (unlikely(needs_conn)) {
468 if (sock->state == SS_LISTENING)
469 return -EPIPE;
470 if (sock->state != SS_UNCONNECTED)
471 return -EISCONN;
472 if ((tsock->p->published) ||
473 ((sock->type == SOCK_STREAM) && (total_len != 0)))
474 return -EOPNOTSUPP;
475 }
476
477 if (down_interruptible(&tsock->sem))
478 return -ERESTARTSYS;
479
480 if (needs_conn) {
481
482 /* Abort any pending connection attempts (very unlikely) */
483
484 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
485 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
486 atomic_dec(&tipc_queue_size);
487 }
488
489 sock->state = SS_CONNECTING;
490 }
491
492 do {
493 if (dest->addrtype == TIPC_ADDR_NAME) {
494 if ((res = dest_name_check(dest, m)))
495 goto exit;
496 res = tipc_send2name(tsock->p->ref,
497 &dest->addr.name.name,
498 dest->addr.name.domain,
499 m->msg_iovlen,
500 m->msg_iov);
501 }
502 else if (dest->addrtype == TIPC_ADDR_ID) {
503 res = tipc_send2port(tsock->p->ref,
504 &dest->addr.id,
505 m->msg_iovlen,
506 m->msg_iov);
507 }
508 else if (dest->addrtype == TIPC_ADDR_MCAST) {
509 if (needs_conn) {
510 res = -EOPNOTSUPP;
511 goto exit;
512 }
513 if ((res = dest_name_check(dest, m)))
514 goto exit;
515 res = tipc_multicast(tsock->p->ref,
516 &dest->addr.nameseq,
517 0,
518 m->msg_iovlen,
519 m->msg_iov);
520 }
521 if (likely(res != -ELINKCONG)) {
522exit:
523 up(&tsock->sem);
524 return res;
525 }
526 if (m->msg_flags & MSG_DONTWAIT) {
527 res = -EWOULDBLOCK;
528 goto exit;
529 }
530 if (wait_event_interruptible(*sock->sk->sk_sleep,
531 !tsock->p->congested)) {
532 res = -ERESTARTSYS;
533 goto exit;
534 }
535 } while (1);
536}
537
538/**
539 * send_packet - send a connection-oriented message
540 * @iocb: (unused)
541 * @sock: socket structure
542 * @m: message to send
543 * @total_len: (unused)
544 *
545 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
546 *
547 * Returns the number of bytes sent on success, or errno otherwise
548 */
549
550static int send_packet(struct kiocb *iocb, struct socket *sock,
551 struct msghdr *m, size_t total_len)
552{
553 struct tipc_sock *tsock = tipc_sk(sock->sk);
554 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
555 int res;
556
557 /* Handle implied connection establishment */
558
559 if (unlikely(dest))
560 return send_msg(iocb, sock, m, total_len);
561
562 if (down_interruptible(&tsock->sem)) {
563 return -ERESTARTSYS;
564 }
565
566 if (unlikely(sock->state != SS_CONNECTED)) {
567 if (sock->state == SS_DISCONNECTING)
568 res = -EPIPE;
569 else
570 res = -ENOTCONN;
571 goto exit;
572 }
573
574 do {
575 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
576 if (likely(res != -ELINKCONG)) {
577exit:
578 up(&tsock->sem);
579 return res;
580 }
581 if (m->msg_flags & MSG_DONTWAIT) {
582 res = -EWOULDBLOCK;
583 goto exit;
584 }
585 if (wait_event_interruptible(*sock->sk->sk_sleep,
586 !tsock->p->congested)) {
587 res = -ERESTARTSYS;
588 goto exit;
589 }
590 } while (1);
591}
592
593/**
594 * send_stream - send stream-oriented data
595 * @iocb: (unused)
596 * @sock: socket structure
597 * @m: data to send
598 * @total_len: total length of data to be sent
599 *
600 * Used for SOCK_STREAM data.
601 *
602 * Returns the number of bytes sent on success, or errno otherwise
603 */
604
605
606static int send_stream(struct kiocb *iocb, struct socket *sock,
607 struct msghdr *m, size_t total_len)
608{
609 struct msghdr my_msg;
610 struct iovec my_iov;
611 struct iovec *curr_iov;
612 int curr_iovlen;
613 char __user *curr_start;
614 int curr_left;
615 int bytes_to_send;
616 int res;
617
618 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
619 return send_packet(iocb, sock, m, total_len);
620
621 /* Can only send large data streams if already connected */
622
623 if (unlikely(sock->state != SS_CONNECTED)) {
624 if (sock->state == SS_DISCONNECTING)
625 return -EPIPE;
626 else
627 return -ENOTCONN;
628 }
629
630 /*
631 * Send each iovec entry using one or more messages
632 *
633 * Note: This algorithm is good for the most likely case
634 * (i.e. one large iovec entry), but could be improved to pass sets
635 * of small iovec entries into send_packet().
636 */
637
638 my_msg = *m;
639 curr_iov = my_msg.msg_iov;
640 curr_iovlen = my_msg.msg_iovlen;
641 my_msg.msg_iov = &my_iov;
642 my_msg.msg_iovlen = 1;
643
644 while (curr_iovlen--) {
645 curr_start = curr_iov->iov_base;
646 curr_left = curr_iov->iov_len;
647
648 while (curr_left) {
649 bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE)
650 ? curr_left : TIPC_MAX_USER_MSG_SIZE;
651 my_iov.iov_base = curr_start;
652 my_iov.iov_len = bytes_to_send;
653 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
654 return res;
655 curr_left -= bytes_to_send;
656 curr_start += bytes_to_send;
657 }
658
659 curr_iov++;
660 }
661
662 return total_len;
663}
664
665/**
666 * auto_connect - complete connection setup to a remote port
667 * @sock: socket structure
668 * @tsock: TIPC-specific socket structure
669 * @msg: peer's response message
670 *
671 * Returns 0 on success, errno otherwise
672 */
673
674static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
675 struct tipc_msg *msg)
676{
677 struct tipc_portid peer;
678
679 if (msg_errcode(msg)) {
680 sock->state = SS_DISCONNECTING;
681 return -ECONNREFUSED;
682 }
683
684 peer.ref = msg_origport(msg);
685 peer.node = msg_orignode(msg);
686 tipc_connect2port(tsock->p->ref, &peer);
687 tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
688 sock->state = SS_CONNECTED;
689 return 0;
690}
691
692/**
693 * set_orig_addr - capture sender's address for received message
694 * @m: descriptor for message info
695 * @msg: received message header
696 *
697 * Note: Address is not captured if not requested by receiver.
698 */
699
700static inline void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
701{
702 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
703
704 if (addr) {
705 addr->family = AF_TIPC;
706 addr->addrtype = TIPC_ADDR_ID;
707 addr->addr.id.ref = msg_origport(msg);
708 addr->addr.id.node = msg_orignode(msg);
709 addr->addr.name.domain = 0; /* could leave uninitialized */
710 addr->scope = 0; /* could leave uninitialized */
711 m->msg_namelen = sizeof(struct sockaddr_tipc);
712 }
713}
714
715/**
716 * anc_data_recv - optionally capture ancillary data for received message
717 * @m: descriptor for message info
718 * @msg: received message header
719 * @tport: TIPC port associated with message
720 *
721 * Note: Ancillary data is not captured if not requested by receiver.
722 *
723 * Returns 0 if successful, otherwise errno
724 */
725
726static inline int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
727 struct tipc_port *tport)
728{
729 u32 anc_data[3];
730 u32 err;
731 u32 dest_type;
732 int res;
733
734 if (likely(m->msg_controllen == 0))
735 return 0;
736
737 /* Optionally capture errored message object(s) */
738
739 err = msg ? msg_errcode(msg) : 0;
740 if (unlikely(err)) {
741 anc_data[0] = err;
742 anc_data[1] = msg_data_sz(msg);
743 if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
744 return res;
745 if (anc_data[1] &&
746 (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1],
747 msg_data(msg))))
748 return res;
749 }
750
751 /* Optionally capture message destination object */
752
753 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
754 switch (dest_type) {
755 case TIPC_NAMED_MSG:
756 anc_data[0] = msg_nametype(msg);
757 anc_data[1] = msg_namelower(msg);
758 anc_data[2] = msg_namelower(msg);
759 break;
760 case TIPC_MCAST_MSG:
761 anc_data[0] = msg_nametype(msg);
762 anc_data[1] = msg_namelower(msg);
763 anc_data[2] = msg_nameupper(msg);
764 break;
765 case TIPC_CONN_MSG:
766 anc_data[0] = tport->conn_type;
767 anc_data[1] = tport->conn_instance;
768 anc_data[2] = tport->conn_instance;
769 break;
770 default:
771 anc_data[0] = 0;
772 }
773 if (anc_data[0] &&
774 (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
775 return res;
776
777 return 0;
778}
779
780/**
781 * recv_msg - receive packet-oriented message
782 * @iocb: (unused)
783 * @m: descriptor for message info
784 * @buf_len: total size of user buffer area
785 * @flags: receive flags
786 *
787 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
788 * If the complete message doesn't fit in user area, truncate it.
789 *
790 * Returns size of returned message data, errno otherwise
791 */
792
793static int recv_msg(struct kiocb *iocb, struct socket *sock,
794 struct msghdr *m, size_t buf_len, int flags)
795{
796 struct tipc_sock *tsock = tipc_sk(sock->sk);
797 struct sk_buff *buf;
798 struct tipc_msg *msg;
799 unsigned int q_len;
800 unsigned int sz;
801 u32 err;
802 int res;
803
804 /* Currently doesn't support receiving into multiple iovec entries */
805
806 if (m->msg_iovlen != 1)
807 return -EOPNOTSUPP;
808
809 /* Catch invalid receive attempts */
810
811 if (unlikely(!buf_len))
812 return -EINVAL;
813
814 if (sock->type == SOCK_SEQPACKET) {
815 if (unlikely(sock->state == SS_UNCONNECTED))
816 return -ENOTCONN;
817 if (unlikely((sock->state == SS_DISCONNECTING) &&
818 (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
819 return -ENOTCONN;
820 }
821
822 /* Look for a message in receive queue; wait if necessary */
823
824 if (unlikely(down_interruptible(&tsock->sem)))
825 return -ERESTARTSYS;
826
827restart:
828 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
829 (flags & MSG_DONTWAIT))) {
830 res = -EWOULDBLOCK;
831 goto exit;
832 }
833
834 if ((res = wait_event_interruptible(
835 *sock->sk->sk_sleep,
836 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
837 (sock->state == SS_DISCONNECTING))) )) {
838 goto exit;
839 }
840
841 /* Catch attempt to receive on an already terminated connection */
842 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
843
844 if (!q_len) {
845 res = -ENOTCONN;
846 goto exit;
847 }
848
849 /* Get access to first message in receive queue */
850
851 buf = skb_peek(&sock->sk->sk_receive_queue);
852 msg = buf_msg(buf);
853 sz = msg_data_sz(msg);
854 err = msg_errcode(msg);
855
856 /* Complete connection setup for an implied connect */
857
858 if (unlikely(sock->state == SS_CONNECTING)) {
859 if ((res = auto_connect(sock, tsock, msg)))
860 goto exit;
861 }
862
863 /* Discard an empty non-errored message & try again */
864
865 if ((!sz) && (!err)) {
866 advance_queue(tsock);
867 goto restart;
868 }
869
870 /* Capture sender's address (optional) */
871
872 set_orig_addr(m, msg);
873
874 /* Capture ancillary data (optional) */
875
876 if ((res = anc_data_recv(m, msg, tsock->p)))
877 goto exit;
878
879 /* Capture message data (if valid) & compute return value (always) */
880
881 if (!err) {
882 if (unlikely(buf_len < sz)) {
883 sz = buf_len;
884 m->msg_flags |= MSG_TRUNC;
885 }
886 if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
887 sz))) {
888 res = -EFAULT;
889 goto exit;
890 }
891 res = sz;
892 } else {
893 if ((sock->state == SS_READY) ||
894 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
895 res = 0;
896 else
897 res = -ECONNRESET;
898 }
899
900 /* Consume received message (optional) */
901
902 if (likely(!(flags & MSG_PEEK))) {
903 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
904 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
905 advance_queue(tsock);
906 }
907exit:
908 up(&tsock->sem);
909 return res;
910}
911
912/**
913 * recv_stream - receive stream-oriented data
914 * @iocb: (unused)
915 * @m: descriptor for message info
916 * @buf_len: total size of user buffer area
917 * @flags: receive flags
918 *
919 * Used for SOCK_STREAM messages only. If not enough data is available
920 * will optionally wait for more; never truncates data.
921 *
922 * Returns size of returned message data, errno otherwise
923 */
924
925static int recv_stream(struct kiocb *iocb, struct socket *sock,
926 struct msghdr *m, size_t buf_len, int flags)
927{
928 struct tipc_sock *tsock = tipc_sk(sock->sk);
929 struct sk_buff *buf;
930 struct tipc_msg *msg;
931 unsigned int q_len;
932 unsigned int sz;
933 int sz_to_copy;
934 int sz_copied = 0;
935 int needed;
936 char *crs = m->msg_iov->iov_base;
937 unsigned char *buf_crs;
938 u32 err;
939 int res;
940
941 /* Currently doesn't support receiving into multiple iovec entries */
942
943 if (m->msg_iovlen != 1)
944 return -EOPNOTSUPP;
945
946 /* Catch invalid receive attempts */
947
948 if (unlikely(!buf_len))
949 return -EINVAL;
950
951 if (unlikely(sock->state == SS_DISCONNECTING)) {
952 if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
953 return -ENOTCONN;
954 } else if (unlikely(sock->state != SS_CONNECTED))
955 return -ENOTCONN;
956
957 /* Look for a message in receive queue; wait if necessary */
958
959 if (unlikely(down_interruptible(&tsock->sem)))
960 return -ERESTARTSYS;
961
962restart:
963 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
964 (flags & MSG_DONTWAIT))) {
965 res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
966 goto exit;
967 }
968
969 if ((res = wait_event_interruptible(
970 *sock->sk->sk_sleep,
971 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
972 (sock->state == SS_DISCONNECTING))) )) {
973 goto exit;
974 }
975
976 /* Catch attempt to receive on an already terminated connection */
977 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
978
979 if (!q_len) {
980 res = -ENOTCONN;
981 goto exit;
982 }
983
984 /* Get access to first message in receive queue */
985
986 buf = skb_peek(&sock->sk->sk_receive_queue);
987 msg = buf_msg(buf);
988 sz = msg_data_sz(msg);
989 err = msg_errcode(msg);
990
991 /* Discard an empty non-errored message & try again */
992
993 if ((!sz) && (!err)) {
994 advance_queue(tsock);
995 goto restart;
996 }
997
998 /* Optionally capture sender's address & ancillary data of first msg */
999
1000 if (sz_copied == 0) {
1001 set_orig_addr(m, msg);
1002 if ((res = anc_data_recv(m, msg, tsock->p)))
1003 goto exit;
1004 }
1005
1006 /* Capture message data (if valid) & compute return value (always) */
1007
1008 if (!err) {
1009 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1010 sz = buf->tail - buf_crs;
1011
1012 needed = (buf_len - sz_copied);
1013 sz_to_copy = (sz <= needed) ? sz : needed;
1014 if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
1015 res = -EFAULT;
1016 goto exit;
1017 }
1018 sz_copied += sz_to_copy;
1019
1020 if (sz_to_copy < sz) {
1021 if (!(flags & MSG_PEEK))
1022 TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
1023 goto exit;
1024 }
1025
1026 crs += sz_to_copy;
1027 } else {
1028 if (sz_copied != 0)
1029 goto exit; /* can't add error msg to valid data */
1030
1031 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1032 res = 0;
1033 else
1034 res = -ECONNRESET;
1035 }
1036
1037 /* Consume received message (optional) */
1038
1039 if (likely(!(flags & MSG_PEEK))) {
1040 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1041 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
1042 advance_queue(tsock);
1043 }
1044
1045 /* Loop around if more data is required */
1046
1047 if ((sz_copied < buf_len) /* didn't get all requested data */
1048 && (flags & MSG_WAITALL) /* ... and need to wait for more */
1049 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1050 && (!err) /* ... and haven't reached a FIN */
1051 )
1052 goto restart;
1053
1054exit:
1055 up(&tsock->sem);
1056 return res ? res : sz_copied;
1057}
1058
1059/**
1060 * queue_overloaded - test if queue overload condition exists
1061 * @queue_size: current size of queue
1062 * @base: nominal maximum size of queue
1063 * @msg: message to be added to queue
1064 *
1065 * Returns 1 if queue is currently overloaded, 0 otherwise
1066 */
1067
1068static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
1069{
1070 u32 threshold;
1071 u32 imp = msg_importance(msg);
1072
1073 if (imp == TIPC_LOW_IMPORTANCE)
1074 threshold = base;
1075 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1076 threshold = base * 2;
1077 else if (imp == TIPC_HIGH_IMPORTANCE)
1078 threshold = base * 100;
1079 else
1080 return 0;
1081
1082 if (msg_connected(msg))
1083 threshold *= 4;
1084
1085 return (queue_size > threshold);
1086}
1087
1088/**
1089 * async_disconnect - wrapper function used to disconnect port
1090 * @portref: TIPC port reference (passed as pointer-sized value)
1091 */
1092
1093static void async_disconnect(unsigned long portref)
1094{
1095 tipc_disconnect((u32)portref);
1096}
1097
1098/**
1099 * dispatch - handle arriving message
1100 * @tport: TIPC port that received message
1101 * @buf: message
1102 *
1103 * Called with port locked. Must not take socket lock to avoid deadlock risk.
1104 *
1105 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1106 */
1107
1108static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1109{
1110 struct tipc_msg *msg = buf_msg(buf);
1111 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1112 struct socket *sock;
1113 u32 recv_q_len;
1114
1115 /* Reject message if socket is closing */
1116
1117 if (!tsock)
1118 return TIPC_ERR_NO_PORT;
1119
1120 /* Reject message if it is wrong sort of message for socket */
1121
1122 /*
1123 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1124 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1125 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1126 */
1127 sock = tsock->sk.sk_socket;
1128 if (sock->state == SS_READY) {
1129 if (msg_connected(msg)) {
1130 msg_dbg(msg, "dispatch filter 1\n");
1131 return TIPC_ERR_NO_PORT;
1132 }
1133 } else {
1134 if (msg_mcast(msg)) {
1135 msg_dbg(msg, "dispatch filter 2\n");
1136 return TIPC_ERR_NO_PORT;
1137 }
1138 if (sock->state == SS_CONNECTED) {
1139 if (!msg_connected(msg)) {
1140 msg_dbg(msg, "dispatch filter 3\n");
1141 return TIPC_ERR_NO_PORT;
1142 }
1143 }
1144 else if (sock->state == SS_CONNECTING) {
1145 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
1146 msg_dbg(msg, "dispatch filter 4\n");
1147 return TIPC_ERR_NO_PORT;
1148 }
1149 }
1150 else if (sock->state == SS_LISTENING) {
1151 if (msg_connected(msg) || msg_errcode(msg)) {
1152 msg_dbg(msg, "dispatch filter 5\n");
1153 return TIPC_ERR_NO_PORT;
1154 }
1155 }
1156 else if (sock->state == SS_DISCONNECTING) {
1157 msg_dbg(msg, "dispatch filter 6\n");
1158 return TIPC_ERR_NO_PORT;
1159 }
1160 else /* (sock->state == SS_UNCONNECTED) */ {
1161 if (msg_connected(msg) || msg_errcode(msg)) {
1162 msg_dbg(msg, "dispatch filter 7\n");
1163 return TIPC_ERR_NO_PORT;
1164 }
1165 }
1166 }
1167
1168 /* Reject message if there isn't room to queue it */
1169
1170 if (unlikely((u32)atomic_read(&tipc_queue_size) >
1171 OVERLOAD_LIMIT_BASE)) {
1172 if (queue_overloaded(atomic_read(&tipc_queue_size),
1173 OVERLOAD_LIMIT_BASE, msg))
1174 return TIPC_ERR_OVERLOAD;
1175 }
1176 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
1177 if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
1178 if (queue_overloaded(recv_q_len,
1179 OVERLOAD_LIMIT_BASE / 2, msg))
1180 return TIPC_ERR_OVERLOAD;
1181 }
1182
1183 /* Initiate connection termination for an incoming 'FIN' */
1184
1185 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1186 sock->state = SS_DISCONNECTING;
1187 /* Note: Use signal since port lock is already taken! */
1188 k_signal((Handler)async_disconnect, tport->ref);
1189 }
1190
1191 /* Enqueue message (finally!) */
1192
1193 msg_dbg(msg,"<DISP<: ");
1194 TIPC_SKB_CB(buf)->handle = msg_data(msg);
1195 atomic_inc(&tipc_queue_size);
1196 skb_queue_tail(&sock->sk->sk_receive_queue, buf);
1197
1198 wake_up_interruptible(sock->sk->sk_sleep);
1199 return TIPC_OK;
1200}
1201
1202/**
1203 * wakeupdispatch - wake up port after congestion
1204 * @tport: port to wakeup
1205 *
1206 * Called with port lock on.
1207 */
1208
1209static void wakeupdispatch(struct tipc_port *tport)
1210{
1211 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1212
1213 wake_up_interruptible(tsock->sk.sk_sleep);
1214}
1215
1216/**
1217 * connect - establish a connection to another TIPC port
1218 * @sock: socket structure
1219 * @dest: socket address for destination port
1220 * @destlen: size of socket address data structure
1221 * @flags: (unused)
1222 *
1223 * Returns 0 on success, errno otherwise
1224 */
1225
1226static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1227 int flags)
1228{
1229 struct tipc_sock *tsock = tipc_sk(sock->sk);
1230 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1231 struct msghdr m = {0,};
1232 struct sk_buff *buf;
1233 struct tipc_msg *msg;
1234 int res;
1235
1236 /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */
1237
1238 if (sock->state == SS_READY)
1239 return -EOPNOTSUPP;
1240
1241 /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
1242 if (sock->state == SS_LISTENING)
1243 return -EOPNOTSUPP;
1244 if (sock->state == SS_CONNECTING)
1245 return -EALREADY;
1246 if (sock->state != SS_UNCONNECTED)
1247 return -EISCONN;
1248
1249 if ((dst->family != AF_TIPC) ||
1250 ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
1251 return -EINVAL;
1252
1253 /* Send a 'SYN-' to destination */
1254
1255 m.msg_name = dest;
1256 if ((res = send_msg(0, sock, &m, 0)) < 0) {
1257 sock->state = SS_DISCONNECTING;
1258 return res;
1259 }
1260
1261 if (down_interruptible(&tsock->sem))
1262 return -ERESTARTSYS;
1263
1264 /* Wait for destination's 'ACK' response */
1265
1266 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
1267 skb_queue_len(&sock->sk->sk_receive_queue),
1268 sock->sk->sk_rcvtimeo);
1269 buf = skb_peek(&sock->sk->sk_receive_queue);
1270 if (res > 0) {
1271 msg = buf_msg(buf);
1272 res = auto_connect(sock, tsock, msg);
1273 if (!res) {
1274 if (dst->addrtype == TIPC_ADDR_NAME) {
1275 tsock->p->conn_type = dst->addr.name.name.type;
1276 tsock->p->conn_instance = dst->addr.name.name.instance;
1277 }
1278 if (!msg_data_sz(msg))
1279 advance_queue(tsock);
1280 }
1281 } else {
1282 if (res == 0) {
1283 res = -ETIMEDOUT;
1284 } else
1285 { /* leave "res" unchanged */ }
1286 sock->state = SS_DISCONNECTING;
1287 }
1288
1289 up(&tsock->sem);
1290 return res;
1291}
1292
1293/**
1294 * listen - allow socket to listen for incoming connections
1295 * @sock: socket structure
1296 * @len: (unused)
1297 *
1298 * Returns 0 on success, errno otherwise
1299 */
1300
1301static int listen(struct socket *sock, int len)
1302{
1303 /* REQUIRES SOCKET LOCKING OF SOME SORT? */
1304
1305 if (sock->state == SS_READY)
1306 return -EOPNOTSUPP;
1307 if (sock->state != SS_UNCONNECTED)
1308 return -EINVAL;
1309 sock->state = SS_LISTENING;
1310 return 0;
1311}
1312
1313/**
1314 * accept - wait for connection request
1315 * @sock: listening socket
1316 * @newsock: new socket that is to be connected
1317 * @flags: file-related flags associated with socket
1318 *
1319 * Returns 0 on success, errno otherwise
1320 */
1321
1322static int accept(struct socket *sock, struct socket *newsock, int flags)
1323{
1324 struct tipc_sock *tsock = tipc_sk(sock->sk);
1325 struct sk_buff *buf;
1326 int res = -EFAULT;
1327
1328 if (sock->state == SS_READY)
1329 return -EOPNOTSUPP;
1330 if (sock->state != SS_LISTENING)
1331 return -EINVAL;
1332
1333 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
1334 (flags & O_NONBLOCK)))
1335 return -EWOULDBLOCK;
1336
1337 if (down_interruptible(&tsock->sem))
1338 return -ERESTARTSYS;
1339
1340 if (wait_event_interruptible(*sock->sk->sk_sleep,
1341 skb_queue_len(&sock->sk->sk_receive_queue))) {
1342 res = -ERESTARTSYS;
1343 goto exit;
1344 }
1345 buf = skb_peek(&sock->sk->sk_receive_queue);
1346
1347 res = tipc_create(newsock, 0);
1348 if (!res) {
1349 struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
1350 struct tipc_portid id;
1351 struct tipc_msg *msg = buf_msg(buf);
1352 u32 new_ref = new_tsock->p->ref;
1353
1354 id.ref = msg_origport(msg);
1355 id.node = msg_orignode(msg);
1356 tipc_connect2port(new_ref, &id);
1357 newsock->state = SS_CONNECTED;
1358
1359 tipc_set_portimportance(new_ref, msg_importance(msg));
1360 if (msg_named(msg)) {
1361 new_tsock->p->conn_type = msg_nametype(msg);
1362 new_tsock->p->conn_instance = msg_nameinst(msg);
1363 }
1364
1365 /*
1366 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1367 * Respond to 'SYN+' by queuing it on new socket.
1368 */
1369
1370 msg_dbg(msg,"<ACC<: ");
1371 if (!msg_data_sz(msg)) {
1372 struct msghdr m = {0,};
1373
1374 send_packet(0, newsock, &m, 0);
1375 advance_queue(tsock);
1376 } else {
1377 sock_lock(tsock);
1378 skb_dequeue(&sock->sk->sk_receive_queue);
1379 sock_unlock(tsock);
1380 skb_queue_head(&newsock->sk->sk_receive_queue, buf);
1381 }
1382 }
1383exit:
1384 up(&tsock->sem);
1385 return res;
1386}
1387
1388/**
1389 * shutdown - shutdown socket connection
1390 * @sock: socket structure
1391 * @how: direction to close (always treated as read + write)
1392 *
1393 * Terminates connection (if necessary), then purges socket's receive queue.
1394 *
1395 * Returns 0 on success, errno otherwise
1396 */
1397
1398static int shutdown(struct socket *sock, int how)
1399{
1400 struct tipc_sock* tsock = tipc_sk(sock->sk);
1401 struct sk_buff *buf;
1402 int res;
1403
1404 /* Could return -EINVAL for an invalid "how", but why bother? */
1405
1406 if (down_interruptible(&tsock->sem))
1407 return -ERESTARTSYS;
1408
1409 sock_lock(tsock);
1410
1411 switch (sock->state) {
1412 case SS_CONNECTED:
1413
1414 /* Send 'FIN+' or 'FIN-' message to peer */
1415
1416 sock_unlock(tsock);
1417restart:
1418 if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
1419 atomic_dec(&tipc_queue_size);
1420 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
1421 buf_discard(buf);
1422 goto restart;
1423 }
1424 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1425 }
1426 else {
1427 tipc_shutdown(tsock->p->ref);
1428 }
1429 sock_lock(tsock);
1430
1431 /* fall through */
1432
1433 case SS_DISCONNECTING:
1434
1435 /* Discard any unreceived messages */
1436
1437 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
1438 atomic_dec(&tipc_queue_size);
1439 buf_discard(buf);
1440 }
1441 tsock->p->conn_unacked = 0;
1442
1443 /* fall through */
1444
1445 case SS_CONNECTING:
1446 sock->state = SS_DISCONNECTING;
1447 res = 0;
1448 break;
1449
1450 default:
1451 res = -ENOTCONN;
1452 }
1453
1454 sock_unlock(tsock);
1455
1456 up(&tsock->sem);
1457 return res;
1458}
1459
1460/**
1461 * setsockopt - set socket option
1462 * @sock: socket structure
1463 * @lvl: option level
1464 * @opt: option identifier
1465 * @ov: pointer to new option value
1466 * @ol: length of option value
1467 *
1468 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1469 * (to ease compatibility).
1470 *
1471 * Returns 0 on success, errno otherwise
1472 */
1473
1474static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
1475{
1476 struct tipc_sock *tsock = tipc_sk(sock->sk);
1477 u32 value;
1478 int res;
1479
1480 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1481 return 0;
1482 if (lvl != SOL_TIPC)
1483 return -ENOPROTOOPT;
1484 if (ol < sizeof(value))
1485 return -EINVAL;
1486 if ((res = get_user(value, (u32 *)ov)))
1487 return res;
1488
1489 if (down_interruptible(&tsock->sem))
1490 return -ERESTARTSYS;
1491
1492 switch (opt) {
1493 case TIPC_IMPORTANCE:
1494 res = tipc_set_portimportance(tsock->p->ref, value);
1495 break;
1496 case TIPC_SRC_DROPPABLE:
1497 if (sock->type != SOCK_STREAM)
1498 res = tipc_set_portunreliable(tsock->p->ref, value);
1499 else
1500 res = -ENOPROTOOPT;
1501 break;
1502 case TIPC_DEST_DROPPABLE:
1503 res = tipc_set_portunreturnable(tsock->p->ref, value);
1504 break;
1505 case TIPC_CONN_TIMEOUT:
1506 sock->sk->sk_rcvtimeo = (value * HZ / 1000);
1507 break;
1508 default:
1509 res = -EINVAL;
1510 }
1511
1512 up(&tsock->sem);
1513 return res;
1514}
1515
1516/**
1517 * getsockopt - get socket option
1518 * @sock: socket structure
1519 * @lvl: option level
1520 * @opt: option identifier
1521 * @ov: receptacle for option value
1522 * @ol: receptacle for length of option value
1523 *
1524 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1525 * (to ease compatibility).
1526 *
1527 * Returns 0 on success, errno otherwise
1528 */
1529
1530static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
1531{
1532 struct tipc_sock *tsock = tipc_sk(sock->sk);
1533 int len;
1534 u32 value;
1535 int res;
1536
1537 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1538 return put_user(0, ol);
1539 if (lvl != SOL_TIPC)
1540 return -ENOPROTOOPT;
1541 if ((res = get_user(len, ol)))
1542 return res;
1543
1544 if (down_interruptible(&tsock->sem))
1545 return -ERESTARTSYS;
1546
1547 switch (opt) {
1548 case TIPC_IMPORTANCE:
1549 res = tipc_portimportance(tsock->p->ref, &value);
1550 break;
1551 case TIPC_SRC_DROPPABLE:
1552 res = tipc_portunreliable(tsock->p->ref, &value);
1553 break;
1554 case TIPC_DEST_DROPPABLE:
1555 res = tipc_portunreturnable(tsock->p->ref, &value);
1556 break;
1557 case TIPC_CONN_TIMEOUT:
1558 value = (sock->sk->sk_rcvtimeo * 1000) / HZ;
1559 break;
1560 default:
1561 res = -EINVAL;
1562 }
1563
1564 if (res) {
1565 /* "get" failed */
1566 }
1567 else if (len < sizeof(value)) {
1568 res = -EINVAL;
1569 }
1570 else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
1571 /* couldn't return value */
1572 }
1573 else {
1574 res = put_user(sizeof(value), ol);
1575 }
1576
1577 up(&tsock->sem);
1578 return res;
1579}
1580
1581/**
1582 * Placeholders for non-implemented functionality
1583 *
1584 * Returns error code (POSIX-compliant where defined)
1585 */
1586
1587static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
1588{
1589 return -EINVAL;
1590}
1591
1592static int no_mmap(struct file *file, struct socket *sock,
1593 struct vm_area_struct *vma)
1594{
1595 return -EINVAL;
1596}
1597static ssize_t no_sendpage(struct socket *sock, struct page *page,
1598 int offset, size_t size, int flags)
1599{
1600 return -EINVAL;
1601}
1602
1603static int no_skpair(struct socket *s1, struct socket *s2)
1604{
1605 return -EOPNOTSUPP;
1606}
1607
1608/**
1609 * Protocol switches for the various types of TIPC sockets
1610 */
1611
1612static struct proto_ops msg_ops = {
1613 .owner = THIS_MODULE,
1614 .family = AF_TIPC,
1615 .release = release,
1616 .bind = bind,
1617 .connect = connect,
1618 .socketpair = no_skpair,
1619 .accept = accept,
1620 .getname = get_name,
1621 .poll = poll,
1622 .ioctl = ioctl,
1623 .listen = listen,
1624 .shutdown = shutdown,
1625 .setsockopt = setsockopt,
1626 .getsockopt = getsockopt,
1627 .sendmsg = send_msg,
1628 .recvmsg = recv_msg,
1629 .mmap = no_mmap,
1630 .sendpage = no_sendpage
1631};
1632
1633static struct proto_ops packet_ops = {
1634 .owner = THIS_MODULE,
1635 .family = AF_TIPC,
1636 .release = release,
1637 .bind = bind,
1638 .connect = connect,
1639 .socketpair = no_skpair,
1640 .accept = accept,
1641 .getname = get_name,
1642 .poll = poll,
1643 .ioctl = ioctl,
1644 .listen = listen,
1645 .shutdown = shutdown,
1646 .setsockopt = setsockopt,
1647 .getsockopt = getsockopt,
1648 .sendmsg = send_packet,
1649 .recvmsg = recv_msg,
1650 .mmap = no_mmap,
1651 .sendpage = no_sendpage
1652};
1653
1654static struct proto_ops stream_ops = {
1655 .owner = THIS_MODULE,
1656 .family = AF_TIPC,
1657 .release = release,
1658 .bind = bind,
1659 .connect = connect,
1660 .socketpair = no_skpair,
1661 .accept = accept,
1662 .getname = get_name,
1663 .poll = poll,
1664 .ioctl = ioctl,
1665 .listen = listen,
1666 .shutdown = shutdown,
1667 .setsockopt = setsockopt,
1668 .getsockopt = getsockopt,
1669 .sendmsg = send_stream,
1670 .recvmsg = recv_stream,
1671 .mmap = no_mmap,
1672 .sendpage = no_sendpage
1673};
1674
1675static struct net_proto_family tipc_family_ops = {
1676 .owner = THIS_MODULE,
1677 .family = AF_TIPC,
1678 .create = tipc_create
1679};
1680
1681static struct proto tipc_proto = {
1682 .name = "TIPC",
1683 .owner = THIS_MODULE,
1684 .obj_size = sizeof(struct tipc_sock)
1685};
1686
1687/**
1688 * socket_init - initialize TIPC socket interface
1689 *
1690 * Returns 0 on success, errno otherwise
1691 */
1692int socket_init(void)
1693{
1694 int res;
1695
1696 res = proto_register(&tipc_proto, 1);
1697 if (res) {
1698 err("Failed to register TIPC protocol type\n");
1699 goto out;
1700 }
1701
1702 res = sock_register(&tipc_family_ops);
1703 if (res) {
1704 err("Failed to register TIPC socket type\n");
1705 proto_unregister(&tipc_proto);
1706 goto out;
1707 }
1708
1709 sockets_enabled = 1;
1710 out:
1711 return res;
1712}
1713
1714/**
1715 * sock_stop - stop TIPC socket interface
1716 */
1717void socket_stop(void)
1718{
1719 if (!sockets_enabled)
1720 return;
1721
1722 sockets_enabled = 0;
1723 sock_unregister(tipc_family_ops.family);
1724 proto_unregister(&tipc_proto);
1725}
1726
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644
index 000000000000..80e219ba527d
--- /dev/null
+++ b/net/tipc/subscr.c
@@ -0,0 +1,527 @@
1/*
2 * net/tipc/subscr.c: TIPC subscription service
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "subscr.h"
40#include "name_table.h"
41#include "ref.h"
42
43/**
44 * struct subscriber - TIPC network topology subscriber
45 * @ref: object reference to subscriber object itself
46 * @lock: pointer to spinlock controlling access to subscriber object
47 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
48 * @subscription_list: list of subscription objects for this subscriber
49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */
52
53struct subscriber {
54 u32 ref;
55 spinlock_t *lock;
56 struct list_head subscriber_list;
57 struct list_head subscription_list;
58 u32 port_ref;
59 int swap;
60};
61
62/**
63 * struct top_srv - TIPC network topology subscription service
64 * @user_ref: TIPC userid of subscription service
65 * @setup_port: reference to TIPC port that handles subscription requests
66 * @subscription_count: number of active subscriptions (not subscribers!)
67 * @subscriber_list: list of ports subscribing to service
68 * @lock: spinlock govering access to subscriber list
69 */
70
71struct top_srv {
72 u32 user_ref;
73 u32 setup_port;
74 atomic_t subscription_count;
75 struct list_head subscriber_list;
76 spinlock_t lock;
77};
78
79static struct top_srv topsrv = { 0 };
80
81/**
82 * htohl - convert value to endianness used by destination
83 * @in: value to convert
84 * @swap: non-zero if endianness must be reversed
85 *
86 * Returns converted value
87 */
88
89static inline u32 htohl(u32 in, int swap)
90{
91 char *c = (char *)&in;
92
93 return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
94}
95
96/**
97 * subscr_send_event - send a message containing a tipc_event to the subscriber
98 */
99
100static void subscr_send_event(struct subscription *sub,
101 u32 found_lower,
102 u32 found_upper,
103 u32 event,
104 u32 port_ref,
105 u32 node)
106{
107 struct iovec msg_sect;
108
109 msg_sect.iov_base = (void *)&sub->evt;
110 msg_sect.iov_len = sizeof(struct tipc_event);
111
112 sub->evt.event = htohl(event, sub->owner->swap);
113 sub->evt.found_lower = htohl(found_lower, sub->owner->swap);
114 sub->evt.found_upper = htohl(found_upper, sub->owner->swap);
115 sub->evt.port.ref = htohl(port_ref, sub->owner->swap);
116 sub->evt.port.node = htohl(node, sub->owner->swap);
117 tipc_send(sub->owner->port_ref, 1, &msg_sect);
118}
119
120/**
121 * subscr_overlap - test for subscription overlap with the given values
122 *
123 * Returns 1 if there is overlap, otherwise 0.
124 */
125
126int subscr_overlap(struct subscription *sub,
127 u32 found_lower,
128 u32 found_upper)
129
130{
131 if (found_lower < sub->seq.lower)
132 found_lower = sub->seq.lower;
133 if (found_upper > sub->seq.upper)
134 found_upper = sub->seq.upper;
135 if (found_lower > found_upper)
136 return 0;
137 return 1;
138}
139
140/**
141 * subscr_report_overlap - issue event if there is subscription overlap
142 *
143 * Protected by nameseq.lock in name_table.c
144 */
145
146void subscr_report_overlap(struct subscription *sub,
147 u32 found_lower,
148 u32 found_upper,
149 u32 event,
150 u32 port_ref,
151 u32 node,
152 int must)
153{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper);
156 if (!subscr_overlap(sub, found_lower, found_upper))
157 return;
158 if (!must && (sub->filter != TIPC_SUB_PORTS))
159 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
161}
162
163/**
164 * subscr_timeout - subscription timeout has occurred
165 */
166
167static void subscr_timeout(struct subscription *sub)
168{
169 struct subscriber *subscriber;
170 u32 subscriber_ref;
171
172 /* Validate subscriber reference (in case subscriber is terminating) */
173
174 subscriber_ref = sub->owner->ref;
175 subscriber = (struct subscriber *)ref_lock(subscriber_ref);
176 if (subscriber == NULL)
177 return;
178
179 /* Unlink subscription from name table */
180
181 nametbl_unsubscribe(sub);
182
183 /* Notify subscriber of timeout, then unlink subscription */
184
185 subscr_send_event(sub,
186 sub->evt.s.seq.lower,
187 sub->evt.s.seq.upper,
188 TIPC_SUBSCR_TIMEOUT,
189 0,
190 0);
191 list_del(&sub->subscription_list);
192
193 /* Now destroy subscription */
194
195 ref_unlock(subscriber_ref);
196 k_term_timer(&sub->timer);
197 kfree(sub);
198 atomic_dec(&topsrv.subscription_count);
199}
200
201/**
202 * subscr_terminate - terminate communication with a subscriber
203 *
204 * Called with subscriber locked. Routine must temporarily release this lock
205 * to enable subscription timeout routine(s) to finish without deadlocking;
206 * the lock is then reclaimed to allow caller to release it upon return.
207 * (This should work even in the unlikely event some other thread creates
208 * a new object reference in the interim that uses this lock; this routine will
209 * simply wait for it to be released, then claim it.)
210 */
211
212static void subscr_terminate(struct subscriber *subscriber)
213{
214 struct subscription *sub;
215 struct subscription *sub_temp;
216
217 /* Invalidate subscriber reference */
218
219 ref_discard(subscriber->ref);
220 spin_unlock_bh(subscriber->lock);
221
222 /* Destroy any existing subscriptions for subscriber */
223
224 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
225 subscription_list) {
226 if (sub->timeout != TIPC_WAIT_FOREVER) {
227 k_cancel_timer(&sub->timer);
228 k_term_timer(&sub->timer);
229 }
230 nametbl_unsubscribe(sub);
231 list_del(&sub->subscription_list);
232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
234 kfree(sub);
235 atomic_dec(&topsrv.subscription_count);
236 }
237
238 /* Sever connection to subscriber */
239
240 tipc_shutdown(subscriber->port_ref);
241 tipc_deleteport(subscriber->port_ref);
242
243 /* Remove subscriber from topology server's subscriber list */
244
245 spin_lock_bh(&topsrv.lock);
246 list_del(&subscriber->subscriber_list);
247 spin_unlock_bh(&topsrv.lock);
248
249 /* Now destroy subscriber */
250
251 spin_lock_bh(subscriber->lock);
252 kfree(subscriber);
253}
254
255/**
256 * subscr_subscribe - create subscription for subscriber
257 *
258 * Called with subscriber locked
259 */
260
261static void subscr_subscribe(struct tipc_subscr *s,
262 struct subscriber *subscriber)
263{
264 struct subscription *sub;
265
266 /* Refuse subscription if global limit exceeded */
267
268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
269 warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
270 subscr_terminate(subscriber);
271 return;
272 }
273
274 /* Allocate subscription object */
275
276 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
277 if (sub == NULL) {
278 warn("Memory squeeze; ignoring subscription\n");
279 subscr_terminate(subscriber);
280 return;
281 }
282
283 /* Determine/update subscriber's endianness */
284
285 if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
286 subscriber->swap = 0;
287 else
288 subscriber->swap = 1;
289
290 /* Initialize subscription object */
291
292 memset(sub, 0, sizeof(*sub));
293 sub->seq.type = htohl(s->seq.type, subscriber->swap);
294 sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
295 sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
296 sub->timeout = htohl(s->timeout, subscriber->swap);
297 sub->filter = htohl(s->filter, subscriber->swap);
298 if ((((sub->filter != TIPC_SUB_PORTS)
299 && (sub->filter != TIPC_SUB_SERVICE)))
300 || (sub->seq.lower > sub->seq.upper)) {
301 warn("Rejecting illegal subscription %u,%u,%u\n",
302 sub->seq.type, sub->seq.lower, sub->seq.upper);
303 kfree(sub);
304 subscr_terminate(subscriber);
305 return;
306 }
307 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
308 INIT_LIST_HEAD(&sub->subscription_list);
309 INIT_LIST_HEAD(&sub->nameseq_list);
310 list_add(&sub->subscription_list, &subscriber->subscription_list);
311 atomic_inc(&topsrv.subscription_count);
312 if (sub->timeout != TIPC_WAIT_FOREVER) {
313 k_init_timer(&sub->timer,
314 (Handler)subscr_timeout, (unsigned long)sub);
315 k_start_timer(&sub->timer, sub->timeout);
316 }
317 sub->owner = subscriber;
318 nametbl_subscribe(sub);
319}
320
321/**
322 * subscr_conn_shutdown_event - handle termination request from subscriber
323 */
324
325static void subscr_conn_shutdown_event(void *usr_handle,
326 u32 portref,
327 struct sk_buff **buf,
328 unsigned char const *data,
329 unsigned int size,
330 int reason)
331{
332 struct subscriber *subscriber;
333 spinlock_t *subscriber_lock;
334
335 subscriber = ref_lock((u32)(unsigned long)usr_handle);
336 if (subscriber == NULL)
337 return;
338
339 subscriber_lock = subscriber->lock;
340 subscr_terminate(subscriber);
341 spin_unlock_bh(subscriber_lock);
342}
343
344/**
345 * subscr_conn_msg_event - handle new subscription request from subscriber
346 */
347
348static void subscr_conn_msg_event(void *usr_handle,
349 u32 port_ref,
350 struct sk_buff **buf,
351 const unchar *data,
352 u32 size)
353{
354 struct subscriber *subscriber;
355 spinlock_t *subscriber_lock;
356
357 subscriber = ref_lock((u32)(unsigned long)usr_handle);
358 if (subscriber == NULL)
359 return;
360
361 subscriber_lock = subscriber->lock;
362 if (size != sizeof(struct tipc_subscr))
363 subscr_terminate(subscriber);
364 else
365 subscr_subscribe((struct tipc_subscr *)data, subscriber);
366
367 spin_unlock_bh(subscriber_lock);
368}
369
370/**
371 * subscr_named_msg_event - handle request to establish a new subscriber
372 */
373
374static void subscr_named_msg_event(void *usr_handle,
375 u32 port_ref,
376 struct sk_buff **buf,
377 const unchar *data,
378 u32 size,
379 u32 importance,
380 struct tipc_portid const *orig,
381 struct tipc_name_seq const *dest)
382{
383 struct subscriber *subscriber;
384 struct iovec msg_sect = {0, 0};
385 spinlock_t *subscriber_lock;
386
387 dbg("subscr_named_msg_event: orig = %x own = %x,\n",
388 orig->node, tipc_own_addr);
389 if (size && (size != sizeof(struct tipc_subscr))) {
390 warn("Received tipc_subscr of invalid size\n");
391 return;
392 }
393
394 /* Create subscriber object */
395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) {
398 warn("Memory squeeze; ignoring subscriber setup\n");
399 return;
400 }
401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n");
407 kfree(subscriber);
408 return;
409 }
410
411 /* Establish a connection to subscriber */
412
413 tipc_createport(topsrv.user_ref,
414 (void *)(unsigned long)subscriber->ref,
415 importance,
416 0,
417 0,
418 subscr_conn_shutdown_event,
419 0,
420 0,
421 subscr_conn_msg_event,
422 0,
423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n");
426 ref_discard(subscriber->ref);
427 kfree(subscriber);
428 return;
429 }
430 tipc_connect2port(subscriber->port_ref, orig);
431
432
433 /* Add subscriber to topology server's subscriber list */
434
435 ref_lock(subscriber->ref);
436 spin_lock_bh(&topsrv.lock);
437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
438 spin_unlock_bh(&topsrv.lock);
439
440 /*
441 * Subscribe now if message contains a subscription,
442 * otherwise send an empty response to complete connection handshaking
443 */
444
445 subscriber_lock = subscriber->lock;
446 if (size)
447 subscr_subscribe((struct tipc_subscr *)data, subscriber);
448 else
449 tipc_send(subscriber->port_ref, 1, &msg_sect);
450
451 spin_unlock_bh(subscriber_lock);
452}
453
454int subscr_start(void)
455{
456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
457 int res = -1;
458
459 memset(&topsrv, 0, sizeof (topsrv));
460 topsrv.lock = SPIN_LOCK_UNLOCKED;
461 INIT_LIST_HEAD(&topsrv.subscriber_list);
462
463 spin_lock_bh(&topsrv.lock);
464 res = tipc_attach(&topsrv.user_ref, 0, 0);
465 if (res) {
466 spin_unlock_bh(&topsrv.lock);
467 return res;
468 }
469
470 res = tipc_createport(topsrv.user_ref,
471 0,
472 TIPC_CRITICAL_IMPORTANCE,
473 0,
474 0,
475 0,
476 0,
477 subscr_named_msg_event,
478 0,
479 0,
480 &topsrv.setup_port);
481 if (res)
482 goto failed;
483
484 res = nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
485 if (res)
486 goto failed;
487
488 spin_unlock_bh(&topsrv.lock);
489 return 0;
490
491failed:
492 err("Failed to create subscription service\n");
493 tipc_detach(topsrv.user_ref);
494 topsrv.user_ref = 0;
495 spin_unlock_bh(&topsrv.lock);
496 return res;
497}
498
499void subscr_stop(void)
500{
501 struct subscriber *subscriber;
502 struct subscriber *subscriber_temp;
503 spinlock_t *subscriber_lock;
504
505 if (topsrv.user_ref) {
506 tipc_deleteport(topsrv.setup_port);
507 list_for_each_entry_safe(subscriber, subscriber_temp,
508 &topsrv.subscriber_list,
509 subscriber_list) {
510 ref_lock(subscriber->ref);
511 subscriber_lock = subscriber->lock;
512 subscr_terminate(subscriber);
513 spin_unlock_bh(subscriber_lock);
514 }
515 tipc_detach(topsrv.user_ref);
516 topsrv.user_ref = 0;
517 }
518}
519
520
521int tipc_ispublished(struct tipc_name const *name)
522{
523 u32 domain = 0;
524
525 return(nametbl_translate(name->type, name->instance,&domain) != 0);
526}
527
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644
index 000000000000..ccff4efcb755
--- /dev/null
+++ b/net/tipc/subscr.h
@@ -0,0 +1,80 @@
1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_SUBSCR_H
38#define _TIPC_SUBSCR_H
39
40/**
41 * struct subscription - TIPC network topology subscription object
42 * @seq: name sequence associated with subscription
43 * @timeout: duration of subscription (in ms)
44 * @filter: event filtering to be done for subscription
45 * @evt: template for events generated by subscription
46 * @subscription_list: adjacent subscriptions in subscriber's subscription list
47 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
48 * @timer_ref: reference to timer governing subscription duration (may be NULL)
49 * @owner: pointer to subscriber object associated with this subscription
50 */
51
52struct subscription {
53 struct tipc_name_seq seq;
54 u32 timeout;
55 u32 filter;
56 struct tipc_event evt;
57 struct list_head subscription_list;
58 struct list_head nameseq_list;
59 struct timer_list timer;
60 struct subscriber *owner;
61};
62
63int subscr_overlap(struct subscription * sub,
64 u32 found_lower,
65 u32 found_upper);
66
67void subscr_report_overlap(struct subscription * sub,
68 u32 found_lower,
69 u32 found_upper,
70 u32 event,
71 u32 port_ref,
72 u32 node,
73 int must_report);
74
75int subscr_start(void);
76
77void subscr_stop(void);
78
79
80#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
new file mode 100644
index 000000000000..35ec7dc8211d
--- /dev/null
+++ b/net/tipc/user_reg.c
@@ -0,0 +1,265 @@
1/*
2 * net/tipc/user_reg.c: TIPC user registry code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "user_reg.h"
39
40/*
41 * TIPC user registry keeps track of users of the tipc_port interface.
42 *
43 * The registry utilizes an array of "TIPC user" entries;
44 * a user's ID is the index of their associated array entry.
45 * Array entry 0 is not used, so userid 0 is not valid;
46 * TIPC sometimes uses this value to denote an anonymous user.
47 * The list of free entries is initially chained from last entry to entry 1.
48 */
49
50/**
51 * struct tipc_user - registered TIPC user info
52 * @next: index of next free registry entry (or -1 for an allocated entry)
53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
54 * @usr_handle: user-defined value passed to callback routine
55 * @ports: list of user ports owned by the user
56 */
57
58struct tipc_user {
59 int next;
60 tipc_mode_event callback;
61 void *usr_handle;
62 struct list_head ports;
63};
64
65#define MAX_USERID 64
66#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
67
68static struct tipc_user *users = 0;
69static u32 next_free_user = MAX_USERID + 1;
70static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
71
72/**
73 * reg_init - create TIPC user registry (but don't activate it)
74 *
75 * If registry has been pre-initialized it is left "as is".
76 * NOTE: This routine may be called when TIPC is inactive.
77 */
78
79static int reg_init(void)
80{
81 u32 i;
82
83 spin_lock_bh(&reg_lock);
84 if (!users) {
85 users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) {
87 memset(users, 0, USER_LIST_SIZE);
88 for (i = 1; i <= MAX_USERID; i++) {
89 users[i].next = i - 1;
90 }
91 next_free_user = MAX_USERID;
92 }
93 }
94 spin_unlock_bh(&reg_lock);
95 return users ? TIPC_OK : -ENOMEM;
96}
97
98/**
99 * reg_callback - inform TIPC user about current operating mode
100 */
101
102static void reg_callback(struct tipc_user *user_ptr)
103{
104 tipc_mode_event cb;
105 void *arg;
106
107 spin_lock_bh(&reg_lock);
108 cb = user_ptr->callback;
109 arg = user_ptr->usr_handle;
110 spin_unlock_bh(&reg_lock);
111
112 if (cb)
113 cb(arg, tipc_mode, tipc_own_addr);
114}
115
116/**
117 * reg_start - activate TIPC user registry
118 */
119
120int reg_start(void)
121{
122 u32 u;
123 int res;
124
125 if ((res = reg_init()))
126 return res;
127
128 for (u = 1; u <= MAX_USERID; u++) {
129 if (users[u].callback)
130 k_signal((Handler)reg_callback,
131 (unsigned long)&users[u]);
132 }
133 return TIPC_OK;
134}
135
136/**
137 * reg_stop - shut down & delete TIPC user registry
138 */
139
140void reg_stop(void)
141{
142 int id;
143
144 if (!users)
145 return;
146
147 for (id = 1; id <= MAX_USERID; id++) {
148 if (users[id].callback)
149 reg_callback(&users[id]);
150 }
151 kfree(users);
152 users = 0;
153}
154
155/**
156 * tipc_attach - register a TIPC user
157 *
158 * NOTE: This routine may be called when TIPC is inactive.
159 */
160
161int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
162{
163 struct tipc_user *user_ptr;
164
165 if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
166 return -ENOPROTOOPT;
167 if (!users)
168 reg_init();
169
170 spin_lock_bh(&reg_lock);
171 if (!next_free_user) {
172 spin_unlock_bh(&reg_lock);
173 return -EBUSY;
174 }
175 user_ptr = &users[next_free_user];
176 *userid = next_free_user;
177 next_free_user = user_ptr->next;
178 user_ptr->next = -1;
179 spin_unlock_bh(&reg_lock);
180
181 user_ptr->callback = cb;
182 user_ptr->usr_handle = usr_handle;
183 INIT_LIST_HEAD(&user_ptr->ports);
184 atomic_inc(&tipc_user_count);
185
186 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
187 k_signal((Handler)reg_callback, (unsigned long)user_ptr);
188 return TIPC_OK;
189}
190
191/**
192 * tipc_detach - deregister a TIPC user
193 */
194
195void tipc_detach(u32 userid)
196{
197 struct tipc_user *user_ptr;
198 struct list_head ports_temp;
199 struct user_port *up_ptr, *temp_up_ptr;
200
201 if ((userid == 0) || (userid > MAX_USERID))
202 return;
203
204 spin_lock_bh(&reg_lock);
205 if ((!users) || (users[userid].next >= 0)) {
206 spin_unlock_bh(&reg_lock);
207 return;
208 }
209
210 user_ptr = &users[userid];
211 user_ptr->callback = NULL;
212 INIT_LIST_HEAD(&ports_temp);
213 list_splice(&user_ptr->ports, &ports_temp);
214 user_ptr->next = next_free_user;
215 next_free_user = userid;
216 spin_unlock_bh(&reg_lock);
217
218 atomic_dec(&tipc_user_count);
219
220 list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
221 tipc_deleteport(up_ptr->ref);
222 }
223}
224
225/**
226 * reg_add_port - register a user's driver port
227 */
228
229int reg_add_port(struct user_port *up_ptr)
230{
231 struct tipc_user *user_ptr;
232
233 if (up_ptr->user_ref == 0)
234 return TIPC_OK;
235 if (up_ptr->user_ref > MAX_USERID)
236 return -EINVAL;
237 if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
238 return -ENOPROTOOPT;
239
240 spin_lock_bh(&reg_lock);
241 user_ptr = &users[up_ptr->user_ref];
242 list_add(&up_ptr->uport_list, &user_ptr->ports);
243 spin_unlock_bh(&reg_lock);
244 return TIPC_OK;
245}
246
247/**
248 * reg_remove_port - deregister a user's driver port
249 */
250
251int reg_remove_port(struct user_port *up_ptr)
252{
253 if (up_ptr->user_ref == 0)
254 return TIPC_OK;
255 if (up_ptr->user_ref > MAX_USERID)
256 return -EINVAL;
257 if (!users )
258 return -ENOPROTOOPT;
259
260 spin_lock_bh(&reg_lock);
261 list_del_init(&up_ptr->uport_list);
262 spin_unlock_bh(&reg_lock);
263 return TIPC_OK;
264}
265
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
new file mode 100644
index 000000000000..122ca9be3671
--- /dev/null
+++ b/net/tipc/user_reg.h
@@ -0,0 +1,48 @@
1/*
2 * net/tipc/user_reg.h: Include file for TIPC user registry code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_USER_REG_H
38#define _TIPC_USER_REG_H
39
40#include "port.h"
41
42int reg_start(void);
43void reg_stop(void);
44
45int reg_add_port(struct user_port *up_ptr);
46int reg_remove_port(struct user_port *up_ptr);
47
48#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
new file mode 100644
index 000000000000..4eaef662d568
--- /dev/null
+++ b/net/tipc/zone.c
@@ -0,0 +1,169 @@
1/*
2 * net/tipc/zone.c: TIPC zone management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "zone.h"
39#include "net.h"
40#include "addr.h"
41#include "node_subscr.h"
42#include "cluster.h"
43#include "node.h"
44
45struct _zone *zone_create(u32 addr)
46{
47 struct _zone *z_ptr = 0;
48 u32 z_num;
49
50 if (!addr_domain_valid(addr))
51 return 0;
52
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
54 if (z_ptr != NULL) {
55 memset(z_ptr, 0, sizeof(*z_ptr));
56 z_num = tipc_zone(addr);
57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 net.zones[z_num] = z_ptr;
59 }
60 return z_ptr;
61}
62
63void zone_delete(struct _zone *z_ptr)
64{
65 u32 c_num;
66
67 if (!z_ptr)
68 return;
69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
70 cluster_delete(z_ptr->clusters[c_num]);
71 }
72 kfree(z_ptr);
73}
74
75void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
76{
77 u32 c_num = tipc_cluster(c_ptr->addr);
78
79 assert(c_ptr->addr);
80 assert(c_num <= tipc_max_clusters);
81 assert(z_ptr->clusters[c_num] == 0);
82 z_ptr->clusters[c_num] = c_ptr;
83}
84
85void zone_remove_as_router(struct _zone *z_ptr, u32 router)
86{
87 u32 c_num;
88
89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
90 if (z_ptr->clusters[c_num]) {
91 cluster_remove_as_router(z_ptr->clusters[c_num],
92 router);
93 }
94 }
95}
96
97void zone_send_external_routes(struct _zone *z_ptr, u32 dest)
98{
99 u32 c_num;
100
101 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
102 if (z_ptr->clusters[c_num]) {
103 if (in_own_cluster(z_ptr->addr))
104 continue;
105 cluster_send_ext_routes(z_ptr->clusters[c_num], dest);
106 }
107 }
108}
109
110struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
111{
112 struct cluster *c_ptr;
113 struct node *n_ptr;
114 u32 c_num;
115
116 if (!z_ptr)
117 return 0;
118 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
119 if (!c_ptr)
120 return 0;
121 n_ptr = cluster_select_node(c_ptr, ref);
122 if (n_ptr)
123 return n_ptr;
124
125 /* Links to any other clusters within this zone ? */
126 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
127 c_ptr = z_ptr->clusters[c_num];
128 if (!c_ptr)
129 return 0;
130 n_ptr = cluster_select_node(c_ptr, ref);
131 if (n_ptr)
132 return n_ptr;
133 }
134 return 0;
135}
136
137u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
138{
139 struct cluster *c_ptr;
140 u32 c_num;
141 u32 router;
142
143 if (!z_ptr)
144 return 0;
145 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
146 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
147 if (router)
148 return router;
149
150 /* Links to any other clusters within the zone? */
151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
152 c_ptr = z_ptr->clusters[c_num];
153 router = c_ptr ? cluster_select_router(c_ptr, ref) : 0;
154 if (router)
155 return router;
156 }
157 return 0;
158}
159
160
161u32 zone_next_node(u32 addr)
162{
163 struct cluster *c_ptr = cluster_find(addr);
164
165 if (c_ptr)
166 return cluster_next_node(c_ptr, addr);
167 return 0;
168}
169
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
new file mode 100644
index 000000000000..4326f78d8292
--- /dev/null
+++ b/net/tipc/zone.h
@@ -0,0 +1,71 @@
1/*
2 * net/tipc/zone.h: Include file for TIPC zone management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_ZONE_H
38#define _TIPC_ZONE_H
39
40#include "node_subscr.h"
41#include "net.h"
42
43
44/**
45 * struct _zone - TIPC zone structure
46 * @addr: network address of zone
47 * @clusters: array of pointers to all clusters within zone
48 * @links: (used for inter-zone communication)
49 */
50
51struct _zone {
52 u32 addr;
53 struct cluster *clusters[2]; /* currently limited to just 1 cluster */
54 u32 links;
55};
56
57struct node *zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
58u32 zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
59void zone_remove_as_router(struct _zone *z_ptr, u32 router);
60void zone_send_external_routes(struct _zone *z_ptr, u32 dest);
61struct _zone *zone_create(u32 addr);
62void zone_delete(struct _zone *z_ptr);
63void zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
64u32 zone_next_node(u32 addr);
65
66static inline struct _zone *zone_find(u32 addr)
67{
68 return net.zones[tipc_zone(addr)];
69}
70
71#endif