aboutsummaryrefslogtreecommitdiffstats
path: root/net/tipc
diff options
context:
space:
mode:
authorDave Kleikamp <shaggy@austin.ibm.com>2006-01-24 15:34:47 -0500
committerDave Kleikamp <shaggy@austin.ibm.com>2006-01-24 15:34:47 -0500
commit0a0fc0ddbe732779366ab6b1b879f62195e65967 (patch)
tree7b42490a676cf39ae0691b6859ecf7fd410f229b /net/tipc
parent4d5dbd0945d9e0833dd7964a3d6ee33157f7cc7a (diff)
parent3ee68c4af3fd7228c1be63254b9f884614f9ebb2 (diff)
Merge with /home/shaggy/git/linus-clean/
Diffstat (limited to 'net/tipc')
-rw-r--r--net/tipc/Kconfig117
-rw-r--r--net/tipc/Makefile13
-rw-r--r--net/tipc/addr.c94
-rw-r--r--net/tipc/addr.h128
-rw-r--r--net/tipc/bcast.c806
-rw-r--r--net/tipc/bcast.h223
-rw-r--r--net/tipc/bearer.c699
-rw-r--r--net/tipc/bearer.h170
-rw-r--r--net/tipc/cluster.c576
-rw-r--r--net/tipc/cluster.h92
-rw-r--r--net/tipc/config.c718
-rw-r--r--net/tipc/config.h79
-rw-r--r--net/tipc/core.c284
-rw-r--r--net/tipc/core.h321
-rw-r--r--net/tipc/dbg.c395
-rw-r--r--net/tipc/dbg.h59
-rw-r--r--net/tipc/discover.c318
-rw-r--r--net/tipc/discover.h58
-rw-r--r--net/tipc/eth_media.c297
-rw-r--r--net/tipc/handler.c132
-rw-r--r--net/tipc/link.c3166
-rw-r--r--net/tipc/link.h295
-rw-r--r--net/tipc/msg.c323
-rw-r--r--net/tipc/msg.h818
-rw-r--r--net/tipc/name_distr.c309
-rw-r--r--net/tipc/name_distr.h48
-rw-r--r--net/tipc/name_table.c1079
-rw-r--r--net/tipc/name_table.h108
-rw-r--r--net/tipc/net.c311
-rw-r--r--net/tipc/net.h64
-rw-r--r--net/tipc/netlink.c112
-rw-r--r--net/tipc/node.c678
-rw-r--r--net/tipc/node.h144
-rw-r--r--net/tipc/node_subscr.c79
-rw-r--r--net/tipc/node_subscr.h63
-rw-r--r--net/tipc/port.c1708
-rw-r--r--net/tipc/port.h209
-rw-r--r--net/tipc/ref.c189
-rw-r--r--net/tipc/ref.h131
-rw-r--r--net/tipc/socket.c1724
-rw-r--r--net/tipc/subscr.c527
-rw-r--r--net/tipc/subscr.h80
-rw-r--r--net/tipc/user_reg.c265
-rw-r--r--net/tipc/user_reg.h48
-rw-r--r--net/tipc/zone.c169
-rw-r--r--net/tipc/zone.h71
46 files changed, 18297 insertions, 0 deletions
diff --git a/net/tipc/Kconfig b/net/tipc/Kconfig
new file mode 100644
index 000000000000..3891cc00087d
--- /dev/null
+++ b/net/tipc/Kconfig
@@ -0,0 +1,117 @@
1#
2# TIPC configuration
3#
4
5menu "TIPC Configuration (EXPERIMENTAL)"
6 depends on INET && EXPERIMENTAL
7
8config TIPC
9 tristate "The TIPC Protocol (EXPERIMENTAL)"
10 ---help---
11 The Transparent Inter Process Communication (TIPC) protocol is
12 specially designed for intra cluster communication. This protocol
13 originates from Ericsson where it has been used in carrier grade
14 cluster applications for many years.
15
16 For more information about TIPC, see http://tipc.sourceforge.net.
17
18 This protocol support is also available as a module ( = code which
19 can be inserted in and removed from the running kernel whenever you
20 want). The module will be called tipc. If you want to compile it
21 as a module, say M here and read <file:Documentation/modules.txt>.
22
23 If in doubt, say N.
24
25config TIPC_ADVANCED
26 bool "TIPC: Advanced configuration"
27 depends on TIPC
28 default n
29 help
30 Saying Y here will open some advanced configuration
31 for TIPC. Most users do not need to bother, so if
32 unsure, just say N.
33
34config TIPC_ZONES
35 int "Maximum number of zones in network"
36 depends on TIPC && TIPC_ADVANCED
37 default "3"
38 help
39 Max number of zones inside TIPC network. Max supported value
40 is 255 zones, minimum is 1
41
42 Default is 3 zones in a network; setting this to higher
43 allows more zones but might use more memory.
44
45config TIPC_CLUSTERS
46 int "Maximum number of clusters in a zone"
47 depends on TIPC && TIPC_ADVANCED
48 default "1"
49 help
50 ***Only 1 (one cluster in a zone) is supported by current code.
51 Any value set here will be overridden.***
52
53 (Max number of clusters inside TIPC zone. Max supported
54 value is 4095 clusters, minimum is 1.
55
56 Default is 1; setting this to smaller value might save
57 some memory, setting it to higher
58 allows more clusters and might consume more memory.)
59
60config TIPC_NODES
61 int "Maximum number of nodes in cluster"
62 depends on TIPC && TIPC_ADVANCED
63 default "255"
64 help
65 Maximum number of nodes inside a TIPC cluster. Maximum
66 supported value is 2047 nodes, minimum is 8.
67
68 Setting this to a smaller value saves some memory,
69 setting it to higher allows more nodes.
70
71config TIPC_SLAVE_NODES
72 int "Maximum number of slave nodes in cluster"
73 depends on TIPC && TIPC_ADVANCED
74 default "0"
75 help
76 ***This capability is not supported by current code.***
77
78 Maximum number of slave nodes inside a TIPC cluster. Maximum
79 supported value is 2047 nodes, minimum is 0.
80
81 Setting this to a smaller value saves some memory,
82 setting it to higher allows more nodes.
83
84config TIPC_PORTS
85 int "Maximum number of ports in a node"
86 depends on TIPC && TIPC_ADVANCED
87 default "8191"
88 help
89 Maximum number of ports within a node. Maximum
90 supported value is 64535 nodes, minimum is 127.
91
92 Setting this to a smaller value saves some memory,
93 setting it to higher allows more ports.
94
95config TIPC_LOG
96 int "Size of log buffer"
97 depends on TIPC && TIPC_ADVANCED
98 default 0
99 help
100 Size (in bytes) of TIPC's internal log buffer, which records the
101 occurrence of significant events. Maximum supported value
102 is 32768 bytes, minimum is 0.
103
104 There is no need to enable the log buffer unless the node will be
105 managed remotely via TIPC.
106
107config TIPC_DEBUG
108 bool "Enable debugging support"
109 depends on TIPC
110 default n
111 help
112 This will enable debugging of TIPC.
113
114 Only say Y here if you are having trouble with TIPC. It will
115 enable the display of detailed information about what is going on.
116
117endmenu
diff --git a/net/tipc/Makefile b/net/tipc/Makefile
new file mode 100644
index 000000000000..dceb7027946c
--- /dev/null
+++ b/net/tipc/Makefile
@@ -0,0 +1,13 @@
1#
2# Makefile for the Linux TIPC layer
3#
4
5obj-$(CONFIG_TIPC) := tipc.o
6
7tipc-y += addr.o bcast.o bearer.o config.o cluster.o \
8 core.o handler.o link.o discover.o msg.o \
9 name_distr.o subscr.o name_table.o net.o \
10 netlink.o node.o node_subscr.o port.o ref.o \
11 socket.o user_reg.o zone.o dbg.o eth_media.o
12
13# End of file
diff --git a/net/tipc/addr.c b/net/tipc/addr.c
new file mode 100644
index 000000000000..0be25e175b93
--- /dev/null
+++ b/net/tipc/addr.c
@@ -0,0 +1,94 @@
1/*
2 * net/tipc/addr.c: TIPC address utility routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "addr.h"
40#include "zone.h"
41#include "cluster.h"
42#include "net.h"
43
44u32 tipc_get_addr(void)
45{
46 return tipc_own_addr;
47}
48
49/**
50 * tipc_addr_domain_valid - validates a network domain address
51 *
52 * Accepts <Z.C.N>, <Z.C.0>, <Z.0.0>, and <0.0.0>,
53 * where Z, C, and N are non-zero and do not exceed the configured limits.
54 *
55 * Returns 1 if domain address is valid, otherwise 0
56 */
57
58int tipc_addr_domain_valid(u32 addr)
59{
60 u32 n = tipc_node(addr);
61 u32 c = tipc_cluster(addr);
62 u32 z = tipc_zone(addr);
63 u32 max_nodes = tipc_max_nodes;
64
65 if (is_slave(addr))
66 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
67 if (n > max_nodes)
68 return 0;
69 if (c > tipc_max_clusters)
70 return 0;
71 if (z > tipc_max_zones)
72 return 0;
73
74 if (n && (!z || !c))
75 return 0;
76 if (c && !z)
77 return 0;
78 return 1;
79}
80
81/**
82 * tipc_addr_node_valid - validates a proposed network address for this node
83 *
84 * Accepts <Z.C.N>, where Z, C, and N are non-zero and do not exceed
85 * the configured limits.
86 *
87 * Returns 1 if address can be used, otherwise 0
88 */
89
90int tipc_addr_node_valid(u32 addr)
91{
92 return (tipc_addr_domain_valid(addr) && tipc_node(addr));
93}
94
diff --git a/net/tipc/addr.h b/net/tipc/addr.h
new file mode 100644
index 000000000000..bcfebb3cbbf3
--- /dev/null
+++ b/net/tipc/addr.h
@@ -0,0 +1,128 @@
1/*
2 * net/tipc/addr.h: Include file for TIPC address utility routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_ADDR_H
38#define _TIPC_ADDR_H
39
40static inline u32 own_node(void)
41{
42 return tipc_node(tipc_own_addr);
43}
44
45static inline u32 own_cluster(void)
46{
47 return tipc_cluster(tipc_own_addr);
48}
49
50static inline u32 own_zone(void)
51{
52 return tipc_zone(tipc_own_addr);
53}
54
55static inline int in_own_cluster(u32 addr)
56{
57 return !((addr ^ tipc_own_addr) >> 12);
58}
59
60static inline int in_own_zone(u32 addr)
61{
62 return !((addr ^ tipc_own_addr) >> 24);
63}
64
65static inline int is_slave(u32 addr)
66{
67 return addr & 0x800;
68}
69
70static inline int may_route(u32 addr)
71{
72 return(addr ^ tipc_own_addr) >> 11;
73}
74
75static inline int in_scope(u32 domain, u32 addr)
76{
77 if (!domain || (domain == addr))
78 return 1;
79 if (domain == (addr & 0xfffff000u)) /* domain <Z.C.0> */
80 return 1;
81 if (domain == (addr & 0xff000000u)) /* domain <Z.0.0> */
82 return 1;
83 return 0;
84}
85
86/**
87 * addr_scope - convert message lookup domain to equivalent 2-bit scope value
88 */
89
90static inline int addr_scope(u32 domain)
91{
92 if (likely(!domain))
93 return TIPC_ZONE_SCOPE;
94 if (tipc_node(domain))
95 return TIPC_NODE_SCOPE;
96 if (tipc_cluster(domain))
97 return TIPC_CLUSTER_SCOPE;
98 return TIPC_ZONE_SCOPE;
99}
100
101/**
102 * addr_domain - convert 2-bit scope value to equivalent message lookup domain
103 *
104 * Needed when address of a named message must be looked up a second time
105 * after a network hop.
106 */
107
108static inline int addr_domain(int sc)
109{
110 if (likely(sc == TIPC_NODE_SCOPE))
111 return tipc_own_addr;
112 if (sc == TIPC_CLUSTER_SCOPE)
113 return tipc_addr(tipc_zone(tipc_own_addr),
114 tipc_cluster(tipc_own_addr), 0);
115 return tipc_addr(tipc_zone(tipc_own_addr), 0, 0);
116}
117
118static inline char *addr_string_fill(char *string, u32 addr)
119{
120 snprintf(string, 16, "<%u.%u.%u>",
121 tipc_zone(addr), tipc_cluster(addr), tipc_node(addr));
122 return string;
123}
124
125int tipc_addr_domain_valid(u32);
126int tipc_addr_node_valid(u32 addr);
127
128#endif
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
new file mode 100644
index 000000000000..a7b04f397c12
--- /dev/null
+++ b/net/tipc/bcast.c
@@ -0,0 +1,806 @@
1/*
2 * net/tipc/bcast.c: TIPC broadcast code
3 *
4 * Copyright (c) 2004-2006, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, Wind River Systems
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
11 *
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
20 *
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
36 */
37
38#include "core.h"
39#include "msg.h"
40#include "dbg.h"
41#include "link.h"
42#include "net.h"
43#include "node.h"
44#include "port.h"
45#include "addr.h"
46#include "node_subscr.h"
47#include "name_distr.h"
48#include "bearer.h"
49#include "name_table.h"
50#include "bcast.h"
51
52
53#define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
54
55#define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
56
57#define BCLINK_LOG_BUF_SIZE 0
58
59/**
60 * struct bcbearer_pair - a pair of bearers used by broadcast link
61 * @primary: pointer to primary bearer
62 * @secondary: pointer to secondary bearer
63 *
64 * Bearers must have same priority and same set of reachable destinations
65 * to be paired.
66 */
67
68struct bcbearer_pair {
69 struct bearer *primary;
70 struct bearer *secondary;
71};
72
73/**
74 * struct bcbearer - bearer used by broadcast link
75 * @bearer: (non-standard) broadcast bearer structure
76 * @media: (non-standard) broadcast media structure
77 * @bpairs: array of bearer pairs
78 * @bpairs_temp: array of bearer pairs used during creation of "bpairs"
79 */
80
81struct bcbearer {
82 struct bearer bearer;
83 struct media media;
84 struct bcbearer_pair bpairs[MAX_BEARERS];
85 struct bcbearer_pair bpairs_temp[TIPC_MAX_LINK_PRI + 1];
86};
87
88/**
89 * struct bclink - link used for broadcast messages
90 * @link: (non-standard) broadcast link structure
91 * @node: (non-standard) node structure representing b'cast link's peer node
92 *
93 * Handles sequence numbering, fragmentation, bundling, etc.
94 */
95
96struct bclink {
97 struct link link;
98 struct node node;
99};
100
101
102static struct bcbearer *bcbearer = NULL;
103static struct bclink *bclink = NULL;
104static struct link *bcl = NULL;
105static spinlock_t bc_lock = SPIN_LOCK_UNLOCKED;
106
107char tipc_bclink_name[] = "multicast-link";
108
109
110static inline u32 buf_seqno(struct sk_buff *buf)
111{
112 return msg_seqno(buf_msg(buf));
113}
114
115static inline u32 bcbuf_acks(struct sk_buff *buf)
116{
117 return (u32)(unsigned long)TIPC_SKB_CB(buf)->handle;
118}
119
120static inline void bcbuf_set_acks(struct sk_buff *buf, u32 acks)
121{
122 TIPC_SKB_CB(buf)->handle = (void *)(unsigned long)acks;
123}
124
125static inline void bcbuf_decr_acks(struct sk_buff *buf)
126{
127 bcbuf_set_acks(buf, bcbuf_acks(buf) - 1);
128}
129
130
131/**
132 * bclink_set_gap - set gap according to contents of current deferred pkt queue
133 *
134 * Called with 'node' locked, bc_lock unlocked
135 */
136
137static inline void bclink_set_gap(struct node *n_ptr)
138{
139 struct sk_buff *buf = n_ptr->bclink.deferred_head;
140
141 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to =
142 mod(n_ptr->bclink.last_in);
143 if (unlikely(buf != NULL))
144 n_ptr->bclink.gap_to = mod(buf_seqno(buf) - 1);
145}
146
147/**
148 * bclink_ack_allowed - test if ACK or NACK message can be sent at this moment
149 *
150 * This mechanism endeavours to prevent all nodes in network from trying
151 * to ACK or NACK at the same time.
152 *
153 * Note: TIPC uses a different trigger to distribute ACKs than it does to
154 * distribute NACKs, but tries to use the same spacing (divide by 16).
155 */
156
157static inline int bclink_ack_allowed(u32 n)
158{
159 return((n % TIPC_MIN_LINK_WIN) == tipc_own_tag);
160}
161
162
163/**
164 * bclink_retransmit_pkt - retransmit broadcast packets
165 * @after: sequence number of last packet to *not* retransmit
166 * @to: sequence number of last packet to retransmit
167 *
168 * Called with 'node' locked, bc_lock unlocked
169 */
170
171static void bclink_retransmit_pkt(u32 after, u32 to)
172{
173 struct sk_buff *buf;
174
175 spin_lock_bh(&bc_lock);
176 buf = bcl->first_out;
177 while (buf && less_eq(buf_seqno(buf), after)) {
178 buf = buf->next;
179 }
180 if (buf != NULL)
181 tipc_link_retransmit(bcl, buf, mod(to - after));
182 spin_unlock_bh(&bc_lock);
183}
184
185/**
186 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
187 * @n_ptr: node that sent acknowledgement info
188 * @acked: broadcast sequence # that has been acknowledged
189 *
190 * Node is locked, bc_lock unlocked.
191 */
192
193void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked)
194{
195 struct sk_buff *crs;
196 struct sk_buff *next;
197 unsigned int released = 0;
198
199 if (less_eq(acked, n_ptr->bclink.acked))
200 return;
201
202 spin_lock_bh(&bc_lock);
203
204 /* Skip over packets that node has previously acknowledged */
205
206 crs = bcl->first_out;
207 while (crs && less_eq(buf_seqno(crs), n_ptr->bclink.acked)) {
208 crs = crs->next;
209 }
210
211 /* Update packets that node is now acknowledging */
212
213 while (crs && less_eq(buf_seqno(crs), acked)) {
214 next = crs->next;
215 bcbuf_decr_acks(crs);
216 if (bcbuf_acks(crs) == 0) {
217 bcl->first_out = next;
218 bcl->out_queue_size--;
219 buf_discard(crs);
220 released = 1;
221 }
222 crs = next;
223 }
224 n_ptr->bclink.acked = acked;
225
226 /* Try resolving broadcast link congestion, if necessary */
227
228 if (unlikely(bcl->next_out))
229 tipc_link_push_queue(bcl);
230 if (unlikely(released && !list_empty(&bcl->waiting_ports)))
231 tipc_link_wakeup_ports(bcl, 0);
232 spin_unlock_bh(&bc_lock);
233}
234
235/**
236 * bclink_send_ack - unicast an ACK msg
237 *
238 * tipc_net_lock and node lock set
239 */
240
241static void bclink_send_ack(struct node *n_ptr)
242{
243 struct link *l_ptr = n_ptr->active_links[n_ptr->addr & 1];
244
245 if (l_ptr != NULL)
246 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
247}
248
249/**
250 * bclink_send_nack- broadcast a NACK msg
251 *
252 * tipc_net_lock and node lock set
253 */
254
255static void bclink_send_nack(struct node *n_ptr)
256{
257 struct sk_buff *buf;
258 struct tipc_msg *msg;
259
260 if (!less(n_ptr->bclink.gap_after, n_ptr->bclink.gap_to))
261 return;
262
263 buf = buf_acquire(INT_H_SIZE);
264 if (buf) {
265 msg = buf_msg(buf);
266 msg_init(msg, BCAST_PROTOCOL, STATE_MSG,
267 TIPC_OK, INT_H_SIZE, n_ptr->addr);
268 msg_set_mc_netid(msg, tipc_net_id);
269 msg_set_bcast_ack(msg, mod(n_ptr->bclink.last_in));
270 msg_set_bcgap_after(msg, n_ptr->bclink.gap_after);
271 msg_set_bcgap_to(msg, n_ptr->bclink.gap_to);
272 msg_set_bcast_tag(msg, tipc_own_tag);
273
274 if (tipc_bearer_send(&bcbearer->bearer, buf, 0)) {
275 bcl->stats.sent_nacks++;
276 buf_discard(buf);
277 } else {
278 tipc_bearer_schedule(bcl->b_ptr, bcl);
279 bcl->proto_msg_queue = buf;
280 bcl->stats.bearer_congs++;
281 }
282
283 /*
284 * Ensure we doesn't send another NACK msg to the node
285 * until 16 more deferred messages arrive from it
286 * (i.e. helps prevent all nodes from NACK'ing at same time)
287 */
288
289 n_ptr->bclink.nack_sync = tipc_own_tag;
290 }
291}
292
293/**
294 * tipc_bclink_check_gap - send a NACK if a sequence gap exists
295 *
296 * tipc_net_lock and node lock set
297 */
298
299void tipc_bclink_check_gap(struct node *n_ptr, u32 last_sent)
300{
301 if (!n_ptr->bclink.supported ||
302 less_eq(last_sent, mod(n_ptr->bclink.last_in)))
303 return;
304
305 bclink_set_gap(n_ptr);
306 if (n_ptr->bclink.gap_after == n_ptr->bclink.gap_to)
307 n_ptr->bclink.gap_to = last_sent;
308 bclink_send_nack(n_ptr);
309}
310
311/**
312 * tipc_bclink_peek_nack - process a NACK msg meant for another node
313 *
314 * Only tipc_net_lock set.
315 */
316
317void tipc_bclink_peek_nack(u32 dest, u32 sender_tag, u32 gap_after, u32 gap_to)
318{
319 struct node *n_ptr = tipc_node_find(dest);
320 u32 my_after, my_to;
321
322 if (unlikely(!n_ptr || !tipc_node_is_up(n_ptr)))
323 return;
324 tipc_node_lock(n_ptr);
325 /*
326 * Modify gap to suppress unnecessary NACKs from this node
327 */
328 my_after = n_ptr->bclink.gap_after;
329 my_to = n_ptr->bclink.gap_to;
330
331 if (less_eq(gap_after, my_after)) {
332 if (less(my_after, gap_to) && less(gap_to, my_to))
333 n_ptr->bclink.gap_after = gap_to;
334 else if (less_eq(my_to, gap_to))
335 n_ptr->bclink.gap_to = n_ptr->bclink.gap_after;
336 } else if (less_eq(gap_after, my_to)) {
337 if (less_eq(my_to, gap_to))
338 n_ptr->bclink.gap_to = gap_after;
339 } else {
340 /*
341 * Expand gap if missing bufs not in deferred queue:
342 */
343 struct sk_buff *buf = n_ptr->bclink.deferred_head;
344 u32 prev = n_ptr->bclink.gap_to;
345
346 for (; buf; buf = buf->next) {
347 u32 seqno = buf_seqno(buf);
348
349 if (mod(seqno - prev) != 1)
350 buf = NULL;
351 if (seqno == gap_after)
352 break;
353 prev = seqno;
354 }
355 if (buf == NULL)
356 n_ptr->bclink.gap_to = gap_after;
357 }
358 /*
359 * Some nodes may send a complementary NACK now:
360 */
361 if (bclink_ack_allowed(sender_tag + 1)) {
362 if (n_ptr->bclink.gap_to != n_ptr->bclink.gap_after) {
363 bclink_send_nack(n_ptr);
364 bclink_set_gap(n_ptr);
365 }
366 }
367 tipc_node_unlock(n_ptr);
368}
369
370/**
371 * tipc_bclink_send_msg - broadcast a packet to all nodes in cluster
372 */
373
374int tipc_bclink_send_msg(struct sk_buff *buf)
375{
376 int res;
377
378 spin_lock_bh(&bc_lock);
379
380 res = tipc_link_send_buf(bcl, buf);
381 if (unlikely(res == -ELINKCONG))
382 buf_discard(buf);
383 else
384 bcl->stats.sent_info++;
385
386 if (bcl->out_queue_size > bcl->stats.max_queue_sz)
387 bcl->stats.max_queue_sz = bcl->out_queue_size;
388 bcl->stats.queue_sz_counts++;
389 bcl->stats.accu_queue_sz += bcl->out_queue_size;
390
391 spin_unlock_bh(&bc_lock);
392 return res;
393}
394
395/**
396 * tipc_bclink_recv_pkt - receive a broadcast packet, and deliver upwards
397 *
398 * tipc_net_lock is read_locked, no other locks set
399 */
400
401void tipc_bclink_recv_pkt(struct sk_buff *buf)
402{
403 struct tipc_msg *msg = buf_msg(buf);
404 struct node* node = tipc_node_find(msg_prevnode(msg));
405 u32 next_in;
406 u32 seqno;
407 struct sk_buff *deferred;
408
409 msg_dbg(msg, "<BC<<<");
410
411 if (unlikely(!node || !tipc_node_is_up(node) || !node->bclink.supported ||
412 (msg_mc_netid(msg) != tipc_net_id))) {
413 buf_discard(buf);
414 return;
415 }
416
417 if (unlikely(msg_user(msg) == BCAST_PROTOCOL)) {
418 msg_dbg(msg, "<BCNACK<<<");
419 if (msg_destnode(msg) == tipc_own_addr) {
420 tipc_node_lock(node);
421 tipc_bclink_acknowledge(node, msg_bcast_ack(msg));
422 tipc_node_unlock(node);
423 bcl->stats.recv_nacks++;
424 bclink_retransmit_pkt(msg_bcgap_after(msg),
425 msg_bcgap_to(msg));
426 } else {
427 tipc_bclink_peek_nack(msg_destnode(msg),
428 msg_bcast_tag(msg),
429 msg_bcgap_after(msg),
430 msg_bcgap_to(msg));
431 }
432 buf_discard(buf);
433 return;
434 }
435
436 tipc_node_lock(node);
437receive:
438 deferred = node->bclink.deferred_head;
439 next_in = mod(node->bclink.last_in + 1);
440 seqno = msg_seqno(msg);
441
442 if (likely(seqno == next_in)) {
443 bcl->stats.recv_info++;
444 node->bclink.last_in++;
445 bclink_set_gap(node);
446 if (unlikely(bclink_ack_allowed(seqno))) {
447 bclink_send_ack(node);
448 bcl->stats.sent_acks++;
449 }
450 if (likely(msg_isdata(msg))) {
451 tipc_node_unlock(node);
452 tipc_port_recv_mcast(buf, NULL);
453 } else if (msg_user(msg) == MSG_BUNDLER) {
454 bcl->stats.recv_bundles++;
455 bcl->stats.recv_bundled += msg_msgcnt(msg);
456 tipc_node_unlock(node);
457 tipc_link_recv_bundle(buf);
458 } else if (msg_user(msg) == MSG_FRAGMENTER) {
459 bcl->stats.recv_fragments++;
460 if (tipc_link_recv_fragment(&node->bclink.defragm,
461 &buf, &msg))
462 bcl->stats.recv_fragmented++;
463 tipc_node_unlock(node);
464 tipc_net_route_msg(buf);
465 } else {
466 tipc_node_unlock(node);
467 tipc_net_route_msg(buf);
468 }
469 if (deferred && (buf_seqno(deferred) == mod(next_in + 1))) {
470 tipc_node_lock(node);
471 buf = deferred;
472 msg = buf_msg(buf);
473 node->bclink.deferred_head = deferred->next;
474 goto receive;
475 }
476 return;
477 } else if (less(next_in, seqno)) {
478 u32 gap_after = node->bclink.gap_after;
479 u32 gap_to = node->bclink.gap_to;
480
481 if (tipc_link_defer_pkt(&node->bclink.deferred_head,
482 &node->bclink.deferred_tail,
483 buf)) {
484 node->bclink.nack_sync++;
485 bcl->stats.deferred_recv++;
486 if (seqno == mod(gap_after + 1))
487 node->bclink.gap_after = seqno;
488 else if (less(gap_after, seqno) && less(seqno, gap_to))
489 node->bclink.gap_to = seqno;
490 }
491 if (bclink_ack_allowed(node->bclink.nack_sync)) {
492 if (gap_to != gap_after)
493 bclink_send_nack(node);
494 bclink_set_gap(node);
495 }
496 } else {
497 bcl->stats.duplicates++;
498 buf_discard(buf);
499 }
500 tipc_node_unlock(node);
501}
502
503u32 tipc_bclink_get_last_sent(void)
504{
505 u32 last_sent = mod(bcl->next_out_no - 1);
506
507 if (bcl->next_out)
508 last_sent = mod(buf_seqno(bcl->next_out) - 1);
509 return last_sent;
510}
511
512u32 tipc_bclink_acks_missing(struct node *n_ptr)
513{
514 return (n_ptr->bclink.supported &&
515 (tipc_bclink_get_last_sent() != n_ptr->bclink.acked));
516}
517
518
519/**
520 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
521 *
522 * Send through as many bearers as necessary to reach all nodes
523 * that support TIPC multicasting.
524 *
525 * Returns 0 if packet sent successfully, non-zero if not
526 */
527
528int tipc_bcbearer_send(struct sk_buff *buf,
529 struct tipc_bearer *unused1,
530 struct tipc_media_addr *unused2)
531{
532 static int send_count = 0;
533
534 struct node_map remains;
535 struct node_map remains_new;
536 int bp_index;
537 int swap_time;
538
539 /* Prepare buffer for broadcasting (if first time trying to send it) */
540
541 if (likely(!msg_non_seq(buf_msg(buf)))) {
542 struct tipc_msg *msg;
543
544 assert(tipc_cltr_bcast_nodes.count != 0);
545 bcbuf_set_acks(buf, tipc_cltr_bcast_nodes.count);
546 msg = buf_msg(buf);
547 msg_set_non_seq(msg);
548 msg_set_mc_netid(msg, tipc_net_id);
549 }
550
551 /* Determine if bearer pairs should be swapped following this attempt */
552
553 if ((swap_time = (++send_count >= 10)))
554 send_count = 0;
555
556 /* Send buffer over bearers until all targets reached */
557
558 remains = tipc_cltr_bcast_nodes;
559
560 for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
561 struct bearer *p = bcbearer->bpairs[bp_index].primary;
562 struct bearer *s = bcbearer->bpairs[bp_index].secondary;
563
564 if (!p)
565 break; /* no more bearers to try */
566
567 tipc_nmap_diff(&remains, &p->nodes, &remains_new);
568 if (remains_new.count == remains.count)
569 continue; /* bearer pair doesn't add anything */
570
571 if (!p->publ.blocked &&
572 !p->media->send_msg(buf, &p->publ, &p->media->bcast_addr)) {
573 if (swap_time && s && !s->publ.blocked)
574 goto swap;
575 else
576 goto update;
577 }
578
579 if (!s || s->publ.blocked ||
580 s->media->send_msg(buf, &s->publ, &s->media->bcast_addr))
581 continue; /* unable to send using bearer pair */
582swap:
583 bcbearer->bpairs[bp_index].primary = s;
584 bcbearer->bpairs[bp_index].secondary = p;
585update:
586 if (remains_new.count == 0)
587 return TIPC_OK;
588
589 remains = remains_new;
590 }
591
592 /* Unable to reach all targets */
593
594 bcbearer->bearer.publ.blocked = 1;
595 bcl->stats.bearer_congs++;
596 return ~TIPC_OK;
597}
598
599/**
600 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
601 */
602
603void tipc_bcbearer_sort(void)
604{
605 struct bcbearer_pair *bp_temp = bcbearer->bpairs_temp;
606 struct bcbearer_pair *bp_curr;
607 int b_index;
608 int pri;
609
610 spin_lock_bh(&bc_lock);
611
612 /* Group bearers by priority (can assume max of two per priority) */
613
614 memset(bp_temp, 0, sizeof(bcbearer->bpairs_temp));
615
616 for (b_index = 0; b_index < MAX_BEARERS; b_index++) {
617 struct bearer *b = &tipc_bearers[b_index];
618
619 if (!b->active || !b->nodes.count)
620 continue;
621
622 if (!bp_temp[b->priority].primary)
623 bp_temp[b->priority].primary = b;
624 else
625 bp_temp[b->priority].secondary = b;
626 }
627
628 /* Create array of bearer pairs for broadcasting */
629
630 bp_curr = bcbearer->bpairs;
631 memset(bcbearer->bpairs, 0, sizeof(bcbearer->bpairs));
632
633 for (pri = TIPC_MAX_LINK_PRI; pri >= 0; pri--) {
634
635 if (!bp_temp[pri].primary)
636 continue;
637
638 bp_curr->primary = bp_temp[pri].primary;
639
640 if (bp_temp[pri].secondary) {
641 if (tipc_nmap_equal(&bp_temp[pri].primary->nodes,
642 &bp_temp[pri].secondary->nodes)) {
643 bp_curr->secondary = bp_temp[pri].secondary;
644 } else {
645 bp_curr++;
646 bp_curr->primary = bp_temp[pri].secondary;
647 }
648 }
649
650 bp_curr++;
651 }
652
653 spin_unlock_bh(&bc_lock);
654}
655
656/**
657 * tipc_bcbearer_push - resolve bearer congestion
658 *
659 * Forces bclink to push out any unsent packets, until all packets are gone
660 * or congestion reoccurs.
661 * No locks set when function called
662 */
663
664void tipc_bcbearer_push(void)
665{
666 struct bearer *b_ptr;
667
668 spin_lock_bh(&bc_lock);
669 b_ptr = &bcbearer->bearer;
670 if (b_ptr->publ.blocked) {
671 b_ptr->publ.blocked = 0;
672 tipc_bearer_lock_push(b_ptr);
673 }
674 spin_unlock_bh(&bc_lock);
675}
676
677
678int tipc_bclink_stats(char *buf, const u32 buf_size)
679{
680 struct print_buf pb;
681
682 if (!bcl)
683 return 0;
684
685 tipc_printbuf_init(&pb, buf, buf_size);
686
687 spin_lock_bh(&bc_lock);
688
689 tipc_printf(&pb, "Link <%s>\n"
690 " Window:%u packets\n",
691 bcl->name, bcl->queue_limit[0]);
692 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
693 bcl->stats.recv_info,
694 bcl->stats.recv_fragments,
695 bcl->stats.recv_fragmented,
696 bcl->stats.recv_bundles,
697 bcl->stats.recv_bundled);
698 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
699 bcl->stats.sent_info,
700 bcl->stats.sent_fragments,
701 bcl->stats.sent_fragmented,
702 bcl->stats.sent_bundles,
703 bcl->stats.sent_bundled);
704 tipc_printf(&pb, " RX naks:%u defs:%u dups:%u\n",
705 bcl->stats.recv_nacks,
706 bcl->stats.deferred_recv,
707 bcl->stats.duplicates);
708 tipc_printf(&pb, " TX naks:%u acks:%u dups:%u\n",
709 bcl->stats.sent_nacks,
710 bcl->stats.sent_acks,
711 bcl->stats.retransmitted);
712 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
713 bcl->stats.bearer_congs,
714 bcl->stats.link_congs,
715 bcl->stats.max_queue_sz,
716 bcl->stats.queue_sz_counts
717 ? (bcl->stats.accu_queue_sz / bcl->stats.queue_sz_counts)
718 : 0);
719
720 spin_unlock_bh(&bc_lock);
721 return tipc_printbuf_validate(&pb);
722}
723
724int tipc_bclink_reset_stats(void)
725{
726 if (!bcl)
727 return -ENOPROTOOPT;
728
729 spin_lock_bh(&bc_lock);
730 memset(&bcl->stats, 0, sizeof(bcl->stats));
731 spin_unlock_bh(&bc_lock);
732 return TIPC_OK;
733}
734
735int tipc_bclink_set_queue_limits(u32 limit)
736{
737 if (!bcl)
738 return -ENOPROTOOPT;
739 if ((limit < TIPC_MIN_LINK_WIN) || (limit > TIPC_MAX_LINK_WIN))
740 return -EINVAL;
741
742 spin_lock_bh(&bc_lock);
743 tipc_link_set_queue_limits(bcl, limit);
744 spin_unlock_bh(&bc_lock);
745 return TIPC_OK;
746}
747
748int tipc_bclink_init(void)
749{
750 bcbearer = kmalloc(sizeof(*bcbearer), GFP_ATOMIC);
751 bclink = kmalloc(sizeof(*bclink), GFP_ATOMIC);
752 if (!bcbearer || !bclink) {
753 nomem:
754 warn("Memory squeeze; Failed to create multicast link\n");
755 kfree(bcbearer);
756 bcbearer = NULL;
757 kfree(bclink);
758 bclink = NULL;
759 return -ENOMEM;
760 }
761
762 memset(bcbearer, 0, sizeof(struct bcbearer));
763 INIT_LIST_HEAD(&bcbearer->bearer.cong_links);
764 bcbearer->bearer.media = &bcbearer->media;
765 bcbearer->media.send_msg = tipc_bcbearer_send;
766 sprintf(bcbearer->media.name, "tipc-multicast");
767
768 bcl = &bclink->link;
769 memset(bclink, 0, sizeof(struct bclink));
770 INIT_LIST_HEAD(&bcl->waiting_ports);
771 bcl->next_out_no = 1;
772 bclink->node.lock = SPIN_LOCK_UNLOCKED;
773 bcl->owner = &bclink->node;
774 bcl->max_pkt = MAX_PKT_DEFAULT_MCAST;
775 tipc_link_set_queue_limits(bcl, BCLINK_WIN_DEFAULT);
776 bcl->b_ptr = &bcbearer->bearer;
777 bcl->state = WORKING_WORKING;
778 sprintf(bcl->name, tipc_bclink_name);
779
780 if (BCLINK_LOG_BUF_SIZE) {
781 char *pb = kmalloc(BCLINK_LOG_BUF_SIZE, GFP_ATOMIC);
782
783 if (!pb)
784 goto nomem;
785 tipc_printbuf_init(&bcl->print_buf, pb, BCLINK_LOG_BUF_SIZE);
786 }
787
788 return TIPC_OK;
789}
790
791void tipc_bclink_stop(void)
792{
793 spin_lock_bh(&bc_lock);
794 if (bcbearer) {
795 tipc_link_stop(bcl);
796 if (BCLINK_LOG_BUF_SIZE)
797 kfree(bcl->print_buf.buf);
798 bcl = NULL;
799 kfree(bclink);
800 bclink = NULL;
801 kfree(bcbearer);
802 bcbearer = NULL;
803 }
804 spin_unlock_bh(&bc_lock);
805}
806
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
new file mode 100644
index 000000000000..0e3be2ab3307
--- /dev/null
+++ b/net/tipc/bcast.h
@@ -0,0 +1,223 @@
1/*
2 * net/tipc/bcast.h: Include file for TIPC broadcast code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_BCAST_H
38#define _TIPC_BCAST_H
39
40#define MAX_NODES 4096
41#define WSIZE 32
42
43/**
44 * struct node_map - set of node identifiers
45 * @count: # of nodes in set
46 * @map: bitmap of node identifiers that are in the set
47 */
48
49struct node_map {
50 u32 count;
51 u32 map[MAX_NODES / WSIZE];
52};
53
54
55#define PLSIZE 32
56
57/**
58 * struct port_list - set of node local destination ports
59 * @count: # of ports in set (only valid for first entry in list)
60 * @next: pointer to next entry in list
61 * @ports: array of port references
62 */
63
64struct port_list {
65 int count;
66 struct port_list *next;
67 u32 ports[PLSIZE];
68};
69
70
71struct node;
72
73extern char tipc_bclink_name[];
74
75
76/**
77 * nmap_get - determine if node exists in a node map
78 */
79
80static inline int tipc_nmap_get(struct node_map *nm_ptr, u32 node)
81{
82 int n = tipc_node(node);
83 int w = n / WSIZE;
84 int b = n % WSIZE;
85
86 return nm_ptr->map[w] & (1 << b);
87}
88
89/**
90 * nmap_add - add a node to a node map
91 */
92
93static inline void tipc_nmap_add(struct node_map *nm_ptr, u32 node)
94{
95 int n = tipc_node(node);
96 int w = n / WSIZE;
97 u32 mask = (1 << (n % WSIZE));
98
99 if ((nm_ptr->map[w] & mask) == 0) {
100 nm_ptr->count++;
101 nm_ptr->map[w] |= mask;
102 }
103}
104
105/**
106 * nmap_remove - remove a node from a node map
107 */
108
109static inline void tipc_nmap_remove(struct node_map *nm_ptr, u32 node)
110{
111 int n = tipc_node(node);
112 int w = n / WSIZE;
113 u32 mask = (1 << (n % WSIZE));
114
115 if ((nm_ptr->map[w] & mask) != 0) {
116 nm_ptr->map[w] &= ~mask;
117 nm_ptr->count--;
118 }
119}
120
121/**
122 * nmap_equal - test for equality of node maps
123 */
124
125static inline int tipc_nmap_equal(struct node_map *nm_a, struct node_map *nm_b)
126{
127 return !memcmp(nm_a, nm_b, sizeof(*nm_a));
128}
129
130/**
131 * nmap_diff - find differences between node maps
132 * @nm_a: input node map A
133 * @nm_b: input node map B
134 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
135 */
136
137static inline void tipc_nmap_diff(struct node_map *nm_a, struct node_map *nm_b,
138 struct node_map *nm_diff)
139{
140 int stop = sizeof(nm_a->map) / sizeof(u32);
141 int w;
142 int b;
143 u32 map;
144
145 memset(nm_diff, 0, sizeof(*nm_diff));
146 for (w = 0; w < stop; w++) {
147 map = nm_a->map[w] ^ (nm_a->map[w] & nm_b->map[w]);
148 nm_diff->map[w] = map;
149 if (map != 0) {
150 for (b = 0 ; b < WSIZE; b++) {
151 if (map & (1 << b))
152 nm_diff->count++;
153 }
154 }
155 }
156}
157
158/**
159 * port_list_add - add a port to a port list, ensuring no duplicates
160 */
161
162static inline void tipc_port_list_add(struct port_list *pl_ptr, u32 port)
163{
164 struct port_list *item = pl_ptr;
165 int i;
166 int item_sz = PLSIZE;
167 int cnt = pl_ptr->count;
168
169 for (; ; cnt -= item_sz, item = item->next) {
170 if (cnt < PLSIZE)
171 item_sz = cnt;
172 for (i = 0; i < item_sz; i++)
173 if (item->ports[i] == port)
174 return;
175 if (i < PLSIZE) {
176 item->ports[i] = port;
177 pl_ptr->count++;
178 return;
179 }
180 if (!item->next) {
181 item->next = kmalloc(sizeof(*item), GFP_ATOMIC);
182 if (!item->next) {
183 warn("Memory squeeze: multicast destination port list is incomplete\n");
184 return;
185 }
186 item->next->next = NULL;
187 }
188 }
189}
190
191/**
192 * port_list_free - free dynamically created entries in port_list chain
193 *
194 * Note: First item is on stack, so it doesn't need to be released
195 */
196
197static inline void tipc_port_list_free(struct port_list *pl_ptr)
198{
199 struct port_list *item;
200 struct port_list *next;
201
202 for (item = pl_ptr->next; item; item = next) {
203 next = item->next;
204 kfree(item);
205 }
206}
207
208
209int tipc_bclink_init(void);
210void tipc_bclink_stop(void);
211void tipc_bclink_acknowledge(struct node *n_ptr, u32 acked);
212int tipc_bclink_send_msg(struct sk_buff *buf);
213void tipc_bclink_recv_pkt(struct sk_buff *buf);
214u32 tipc_bclink_get_last_sent(void);
215u32 tipc_bclink_acks_missing(struct node *n_ptr);
216void tipc_bclink_check_gap(struct node *n_ptr, u32 seqno);
217int tipc_bclink_stats(char *stats_buf, const u32 buf_size);
218int tipc_bclink_reset_stats(void);
219int tipc_bclink_set_queue_limits(u32 limit);
220void tipc_bcbearer_sort(void);
221void tipc_bcbearer_push(void);
222
223#endif
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
new file mode 100644
index 000000000000..64dcb0f3a8b2
--- /dev/null
+++ b/net/tipc/bearer.c
@@ -0,0 +1,699 @@
1/*
2 * net/tipc/bearer.c: TIPC bearer code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "bearer.h"
41#include "link.h"
42#include "port.h"
43#include "discover.h"
44#include "bcast.h"
45
46#define MAX_ADDR_STR 32
47
48static struct media *media_list = 0;
49static u32 media_count = 0;
50
51struct bearer *tipc_bearers = 0;
52
53/**
54 * media_name_valid - validate media name
55 *
56 * Returns 1 if media name is valid, otherwise 0.
57 */
58
59static int media_name_valid(const char *name)
60{
61 u32 len;
62
63 len = strlen(name);
64 if ((len + 1) > TIPC_MAX_MEDIA_NAME)
65 return 0;
66 return (strspn(name, tipc_alphabet) == len);
67}
68
69/**
70 * media_find - locates specified media object by name
71 */
72
73static struct media *media_find(const char *name)
74{
75 struct media *m_ptr;
76 u32 i;
77
78 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
79 if (!strcmp(m_ptr->name, name))
80 return m_ptr;
81 }
82 return 0;
83}
84
85/**
86 * tipc_register_media - register a media type
87 *
88 * Bearers for this media type must be activated separately at a later stage.
89 */
90
91int tipc_register_media(u32 media_type,
92 char *name,
93 int (*enable)(struct tipc_bearer *),
94 void (*disable)(struct tipc_bearer *),
95 int (*send_msg)(struct sk_buff *,
96 struct tipc_bearer *,
97 struct tipc_media_addr *),
98 char *(*addr2str)(struct tipc_media_addr *a,
99 char *str_buf, int str_size),
100 struct tipc_media_addr *bcast_addr,
101 const u32 bearer_priority,
102 const u32 link_tolerance, /* [ms] */
103 const u32 send_window_limit)
104{
105 struct media *m_ptr;
106 u32 media_id;
107 u32 i;
108 int res = -EINVAL;
109
110 write_lock_bh(&tipc_net_lock);
111 if (!media_list)
112 goto exit;
113
114 if (!media_name_valid(name)) {
115 warn("Media registration error: illegal name <%s>\n", name);
116 goto exit;
117 }
118 if (!bcast_addr) {
119 warn("Media registration error: no broadcast address supplied\n");
120 goto exit;
121 }
122 if ((bearer_priority < TIPC_MIN_LINK_PRI) &&
123 (bearer_priority > TIPC_MAX_LINK_PRI)) {
124 warn("Media registration error: priority %u\n", bearer_priority);
125 goto exit;
126 }
127 if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
128 (link_tolerance > TIPC_MAX_LINK_TOL)) {
129 warn("Media registration error: tolerance %u\n", link_tolerance);
130 goto exit;
131 }
132
133 media_id = media_count++;
134 if (media_id >= MAX_MEDIA) {
135 warn("Attempt to register more than %u media\n", MAX_MEDIA);
136 media_count--;
137 goto exit;
138 }
139 for (i = 0; i < media_id; i++) {
140 if (media_list[i].type_id == media_type) {
141 warn("Attempt to register second media with type %u\n",
142 media_type);
143 media_count--;
144 goto exit;
145 }
146 if (!strcmp(name, media_list[i].name)) {
147 warn("Attempt to re-register media name <%s>\n", name);
148 media_count--;
149 goto exit;
150 }
151 }
152
153 m_ptr = &media_list[media_id];
154 m_ptr->type_id = media_type;
155 m_ptr->send_msg = send_msg;
156 m_ptr->enable_bearer = enable;
157 m_ptr->disable_bearer = disable;
158 m_ptr->addr2str = addr2str;
159 memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
160 m_ptr->bcast = 1;
161 strcpy(m_ptr->name, name);
162 m_ptr->priority = bearer_priority;
163 m_ptr->tolerance = link_tolerance;
164 m_ptr->window = send_window_limit;
165 dbg("Media <%s> registered\n", name);
166 res = 0;
167exit:
168 write_unlock_bh(&tipc_net_lock);
169 return res;
170}
171
172/**
173 * tipc_media_addr_printf - record media address in print buffer
174 */
175
176void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
177{
178 struct media *m_ptr;
179 u32 media_type;
180 u32 i;
181
182 media_type = ntohl(a->type);
183 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
184 if (m_ptr->type_id == media_type)
185 break;
186 }
187
188 if ((i < media_count) && (m_ptr->addr2str != NULL)) {
189 char addr_str[MAX_ADDR_STR];
190
191 tipc_printf(pb, "%s(%s) ", m_ptr->name,
192 m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
193 } else {
194 unchar *addr = (unchar *)&a->dev_addr;
195
196 tipc_printf(pb, "UNKNOWN(%u):", media_type);
197 for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++) {
198 tipc_printf(pb, "%02x ", addr[i]);
199 }
200 }
201}
202
203/**
204 * tipc_media_get_names - record names of registered media in buffer
205 */
206
207struct sk_buff *tipc_media_get_names(void)
208{
209 struct sk_buff *buf;
210 struct media *m_ptr;
211 int i;
212
213 buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
214 if (!buf)
215 return NULL;
216
217 read_lock_bh(&tipc_net_lock);
218 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
219 tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
220 strlen(m_ptr->name) + 1);
221 }
222 read_unlock_bh(&tipc_net_lock);
223 return buf;
224}
225
226/**
227 * bearer_name_validate - validate & (optionally) deconstruct bearer name
228 * @name - ptr to bearer name string
229 * @name_parts - ptr to area for bearer name components (or NULL if not needed)
230 *
231 * Returns 1 if bearer name is valid, otherwise 0.
232 */
233
234static int bearer_name_validate(const char *name,
235 struct bearer_name *name_parts)
236{
237 char name_copy[TIPC_MAX_BEARER_NAME];
238 char *media_name;
239 char *if_name;
240 u32 media_len;
241 u32 if_len;
242
243 /* copy bearer name & ensure length is OK */
244
245 name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
246 /* need above in case non-Posix strncpy() doesn't pad with nulls */
247 strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
248 if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
249 return 0;
250
251 /* ensure all component parts of bearer name are present */
252
253 media_name = name_copy;
254 if ((if_name = strchr(media_name, ':')) == NULL)
255 return 0;
256 *(if_name++) = 0;
257 media_len = if_name - media_name;
258 if_len = strlen(if_name) + 1;
259
260 /* validate component parts of bearer name */
261
262 if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
263 (if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
264 (strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
265 (strspn(if_name, tipc_alphabet) != (if_len - 1)))
266 return 0;
267
268 /* return bearer name components, if necessary */
269
270 if (name_parts) {
271 strcpy(name_parts->media_name, media_name);
272 strcpy(name_parts->if_name, if_name);
273 }
274 return 1;
275}
276
277/**
278 * bearer_find - locates bearer object with matching bearer name
279 */
280
281static struct bearer *bearer_find(const char *name)
282{
283 struct bearer *b_ptr;
284 u32 i;
285
286 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
287 if (b_ptr->active && (!strcmp(b_ptr->publ.name, name)))
288 return b_ptr;
289 }
290 return 0;
291}
292
293/**
294 * tipc_bearer_find_interface - locates bearer object with matching interface name
295 */
296
297struct bearer *tipc_bearer_find_interface(const char *if_name)
298{
299 struct bearer *b_ptr;
300 char *b_if_name;
301 u32 i;
302
303 for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
304 if (!b_ptr->active)
305 continue;
306 b_if_name = strchr(b_ptr->publ.name, ':') + 1;
307 if (!strcmp(b_if_name, if_name))
308 return b_ptr;
309 }
310 return 0;
311}
312
313/**
314 * tipc_bearer_get_names - record names of bearers in buffer
315 */
316
317struct sk_buff *tipc_bearer_get_names(void)
318{
319 struct sk_buff *buf;
320 struct media *m_ptr;
321 struct bearer *b_ptr;
322 int i, j;
323
324 buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
325 if (!buf)
326 return NULL;
327
328 read_lock_bh(&tipc_net_lock);
329 for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
330 for (j = 0; j < MAX_BEARERS; j++) {
331 b_ptr = &tipc_bearers[j];
332 if (b_ptr->active && (b_ptr->media == m_ptr)) {
333 tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
334 b_ptr->publ.name,
335 strlen(b_ptr->publ.name) + 1);
336 }
337 }
338 }
339 read_unlock_bh(&tipc_net_lock);
340 return buf;
341}
342
343void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest)
344{
345 tipc_nmap_add(&b_ptr->nodes, dest);
346 tipc_disc_update_link_req(b_ptr->link_req);
347 tipc_bcbearer_sort();
348}
349
350void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest)
351{
352 tipc_nmap_remove(&b_ptr->nodes, dest);
353 tipc_disc_update_link_req(b_ptr->link_req);
354 tipc_bcbearer_sort();
355}
356
357/*
358 * bearer_push(): Resolve bearer congestion. Force the waiting
359 * links to push out their unsent packets, one packet per link
360 * per iteration, until all packets are gone or congestion reoccurs.
361 * 'tipc_net_lock' is read_locked when this function is called
362 * bearer.lock must be taken before calling
363 * Returns binary true(1) ore false(0)
364 */
365static int bearer_push(struct bearer *b_ptr)
366{
367 u32 res = TIPC_OK;
368 struct link *ln, *tln;
369
370 if (b_ptr->publ.blocked)
371 return 0;
372
373 while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
374 list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
375 res = tipc_link_push_packet(ln);
376 if (res == PUSH_FAILED)
377 break;
378 if (res == PUSH_FINISHED)
379 list_move_tail(&ln->link_list, &b_ptr->links);
380 }
381 }
382 return list_empty(&b_ptr->cong_links);
383}
384
385void tipc_bearer_lock_push(struct bearer *b_ptr)
386{
387 int res;
388
389 spin_lock_bh(&b_ptr->publ.lock);
390 res = bearer_push(b_ptr);
391 spin_unlock_bh(&b_ptr->publ.lock);
392 if (res)
393 tipc_bcbearer_push();
394}
395
396
397/*
398 * Interrupt enabling new requests after bearer congestion or blocking:
399 * See bearer_send().
400 */
401void tipc_continue(struct tipc_bearer *tb_ptr)
402{
403 struct bearer *b_ptr = (struct bearer *)tb_ptr;
404
405 spin_lock_bh(&b_ptr->publ.lock);
406 b_ptr->continue_count++;
407 if (!list_empty(&b_ptr->cong_links))
408 tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
409 b_ptr->publ.blocked = 0;
410 spin_unlock_bh(&b_ptr->publ.lock);
411}
412
413/*
414 * Schedule link for sending of messages after the bearer
415 * has been deblocked by 'continue()'. This method is called
416 * when somebody tries to send a message via this link while
417 * the bearer is congested. 'tipc_net_lock' is in read_lock here
418 * bearer.lock is busy
419 */
420
421static void tipc_bearer_schedule_unlocked(struct bearer *b_ptr, struct link *l_ptr)
422{
423 list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
424}
425
426/*
427 * Schedule link for sending of messages after the bearer
428 * has been deblocked by 'continue()'. This method is called
429 * when somebody tries to send a message via this link while
430 * the bearer is congested. 'tipc_net_lock' is in read_lock here,
431 * bearer.lock is free
432 */
433
434void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr)
435{
436 spin_lock_bh(&b_ptr->publ.lock);
437 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
438 spin_unlock_bh(&b_ptr->publ.lock);
439}
440
441
442/*
443 * tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
444 * and if there is, try to resolve it before returning.
445 * 'tipc_net_lock' is read_locked when this function is called
446 */
447int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr)
448{
449 int res = 1;
450
451 if (list_empty(&b_ptr->cong_links))
452 return 1;
453 spin_lock_bh(&b_ptr->publ.lock);
454 if (!bearer_push(b_ptr)) {
455 tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
456 res = 0;
457 }
458 spin_unlock_bh(&b_ptr->publ.lock);
459 return res;
460}
461
462
463/**
464 * tipc_enable_bearer - enable bearer with the given name
465 */
466
467int tipc_enable_bearer(const char *name, u32 bcast_scope, u32 priority)
468{
469 struct bearer *b_ptr;
470 struct media *m_ptr;
471 struct bearer_name b_name;
472 char addr_string[16];
473 u32 bearer_id;
474 u32 with_this_prio;
475 u32 i;
476 int res = -EINVAL;
477
478 if (tipc_mode != TIPC_NET_MODE)
479 return -ENOPROTOOPT;
480
481 if (!bearer_name_validate(name, &b_name) ||
482 !tipc_addr_domain_valid(bcast_scope) ||
483 !in_scope(bcast_scope, tipc_own_addr))
484 return -EINVAL;
485
486 if ((priority < TIPC_MIN_LINK_PRI ||
487 priority > TIPC_MAX_LINK_PRI) &&
488 (priority != TIPC_MEDIA_LINK_PRI))
489 return -EINVAL;
490
491 write_lock_bh(&tipc_net_lock);
492 if (!tipc_bearers)
493 goto failed;
494
495 m_ptr = media_find(b_name.media_name);
496 if (!m_ptr) {
497 warn("No media <%s>\n", b_name.media_name);
498 goto failed;
499 }
500
501 if (priority == TIPC_MEDIA_LINK_PRI)
502 priority = m_ptr->priority;
503
504restart:
505 bearer_id = MAX_BEARERS;
506 with_this_prio = 1;
507 for (i = MAX_BEARERS; i-- != 0; ) {
508 if (!tipc_bearers[i].active) {
509 bearer_id = i;
510 continue;
511 }
512 if (!strcmp(name, tipc_bearers[i].publ.name)) {
513 warn("Bearer <%s> already enabled\n", name);
514 goto failed;
515 }
516 if ((tipc_bearers[i].priority == priority) &&
517 (++with_this_prio > 2)) {
518 if (priority-- == 0) {
519 warn("Third bearer <%s> with priority %u, unable to lower to %u\n",
520 name, priority + 1, priority);
521 goto failed;
522 }
523 warn("Third bearer <%s> with priority %u, lowering to %u\n",
524 name, priority + 1, priority);
525 goto restart;
526 }
527 }
528 if (bearer_id >= MAX_BEARERS) {
529 warn("Attempt to enable more than %d bearers\n", MAX_BEARERS);
530 goto failed;
531 }
532
533 b_ptr = &tipc_bearers[bearer_id];
534 memset(b_ptr, 0, sizeof(struct bearer));
535
536 strcpy(b_ptr->publ.name, name);
537 res = m_ptr->enable_bearer(&b_ptr->publ);
538 if (res) {
539 warn("Failed to enable bearer <%s>\n", name);
540 goto failed;
541 }
542
543 b_ptr->identity = bearer_id;
544 b_ptr->media = m_ptr;
545 b_ptr->net_plane = bearer_id + 'A';
546 b_ptr->active = 1;
547 b_ptr->detect_scope = bcast_scope;
548 b_ptr->priority = priority;
549 INIT_LIST_HEAD(&b_ptr->cong_links);
550 INIT_LIST_HEAD(&b_ptr->links);
551 if (m_ptr->bcast) {
552 b_ptr->link_req = tipc_disc_init_link_req(b_ptr, &m_ptr->bcast_addr,
553 bcast_scope, 2);
554 }
555 b_ptr->publ.lock = SPIN_LOCK_UNLOCKED;
556 write_unlock_bh(&tipc_net_lock);
557 info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
558 name, addr_string_fill(addr_string, bcast_scope), priority);
559 return 0;
560failed:
561 write_unlock_bh(&tipc_net_lock);
562 return res;
563}
564
565/**
566 * tipc_block_bearer(): Block the bearer with the given name,
567 * and reset all its links
568 */
569
570int tipc_block_bearer(const char *name)
571{
572 struct bearer *b_ptr = 0;
573 struct link *l_ptr;
574 struct link *temp_l_ptr;
575
576 if (tipc_mode != TIPC_NET_MODE)
577 return -ENOPROTOOPT;
578
579 read_lock_bh(&tipc_net_lock);
580 b_ptr = bearer_find(name);
581 if (!b_ptr) {
582 warn("Attempt to block unknown bearer <%s>\n", name);
583 read_unlock_bh(&tipc_net_lock);
584 return -EINVAL;
585 }
586
587 spin_lock_bh(&b_ptr->publ.lock);
588 b_ptr->publ.blocked = 1;
589 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
590 struct node *n_ptr = l_ptr->owner;
591
592 spin_lock_bh(&n_ptr->lock);
593 tipc_link_reset(l_ptr);
594 spin_unlock_bh(&n_ptr->lock);
595 }
596 spin_unlock_bh(&b_ptr->publ.lock);
597 read_unlock_bh(&tipc_net_lock);
598 info("Blocked bearer <%s>\n", name);
599 return TIPC_OK;
600}
601
602/**
603 * bearer_disable -
604 *
605 * Note: This routine assumes caller holds tipc_net_lock.
606 */
607
608static int bearer_disable(const char *name)
609{
610 struct bearer *b_ptr;
611 struct link *l_ptr;
612 struct link *temp_l_ptr;
613
614 if (tipc_mode != TIPC_NET_MODE)
615 return -ENOPROTOOPT;
616
617 b_ptr = bearer_find(name);
618 if (!b_ptr) {
619 warn("Attempt to disable unknown bearer <%s>\n", name);
620 return -EINVAL;
621 }
622
623 tipc_disc_stop_link_req(b_ptr->link_req);
624 spin_lock_bh(&b_ptr->publ.lock);
625 b_ptr->link_req = NULL;
626 b_ptr->publ.blocked = 1;
627 if (b_ptr->media->disable_bearer) {
628 spin_unlock_bh(&b_ptr->publ.lock);
629 write_unlock_bh(&tipc_net_lock);
630 b_ptr->media->disable_bearer(&b_ptr->publ);
631 write_lock_bh(&tipc_net_lock);
632 spin_lock_bh(&b_ptr->publ.lock);
633 }
634 list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
635 tipc_link_delete(l_ptr);
636 }
637 spin_unlock_bh(&b_ptr->publ.lock);
638 info("Disabled bearer <%s>\n", name);
639 memset(b_ptr, 0, sizeof(struct bearer));
640 return TIPC_OK;
641}
642
643int tipc_disable_bearer(const char *name)
644{
645 int res;
646
647 write_lock_bh(&tipc_net_lock);
648 res = bearer_disable(name);
649 write_unlock_bh(&tipc_net_lock);
650 return res;
651}
652
653
654
655int tipc_bearer_init(void)
656{
657 int res;
658
659 write_lock_bh(&tipc_net_lock);
660 tipc_bearers = kmalloc(MAX_BEARERS * sizeof(struct bearer), GFP_ATOMIC);
661 media_list = kmalloc(MAX_MEDIA * sizeof(struct media), GFP_ATOMIC);
662 if (tipc_bearers && media_list) {
663 memset(tipc_bearers, 0, MAX_BEARERS * sizeof(struct bearer));
664 memset(media_list, 0, MAX_MEDIA * sizeof(struct media));
665 res = TIPC_OK;
666 } else {
667 kfree(tipc_bearers);
668 kfree(media_list);
669 tipc_bearers = 0;
670 media_list = 0;
671 res = -ENOMEM;
672 }
673 write_unlock_bh(&tipc_net_lock);
674 return res;
675}
676
677void tipc_bearer_stop(void)
678{
679 u32 i;
680
681 if (!tipc_bearers)
682 return;
683
684 for (i = 0; i < MAX_BEARERS; i++) {
685 if (tipc_bearers[i].active)
686 tipc_bearers[i].publ.blocked = 1;
687 }
688 for (i = 0; i < MAX_BEARERS; i++) {
689 if (tipc_bearers[i].active)
690 bearer_disable(tipc_bearers[i].publ.name);
691 }
692 kfree(tipc_bearers);
693 kfree(media_list);
694 tipc_bearers = 0;
695 media_list = 0;
696 media_count = 0;
697}
698
699
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
new file mode 100644
index 000000000000..c4e7c1c3655b
--- /dev/null
+++ b/net/tipc/bearer.h
@@ -0,0 +1,170 @@
1/*
2 * net/tipc/bearer.h: Include file for TIPC bearer code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_BEARER_H
38#define _TIPC_BEARER_H
39
40#include "core.h"
41#include "bcast.h"
42
43#define MAX_BEARERS 8
44#define MAX_MEDIA 4
45
46
47/**
48 * struct media - TIPC media information available to internal users
49 * @send_msg: routine which handles buffer transmission
50 * @enable_bearer: routine which enables a bearer
51 * @disable_bearer: routine which disables a bearer
52 * @addr2str: routine which converts bearer's address to string form
53 * @bcast_addr: media address used in broadcasting
54 * @bcast: non-zero if media supports broadcasting [currently mandatory]
55 * @priority: default link (and bearer) priority
56 * @tolerance: default time (in ms) before declaring link failure
57 * @window: default window (in packets) before declaring link congestion
58 * @type_id: TIPC media identifier [defined in tipc_bearer.h]
59 * @name: media name
60 */
61
62struct media {
63 int (*send_msg)(struct sk_buff *buf,
64 struct tipc_bearer *b_ptr,
65 struct tipc_media_addr *dest);
66 int (*enable_bearer)(struct tipc_bearer *b_ptr);
67 void (*disable_bearer)(struct tipc_bearer *b_ptr);
68 char *(*addr2str)(struct tipc_media_addr *a,
69 char *str_buf, int str_size);
70 struct tipc_media_addr bcast_addr;
71 int bcast;
72 u32 priority;
73 u32 tolerance;
74 u32 window;
75 u32 type_id;
76 char name[TIPC_MAX_MEDIA_NAME];
77};
78
79/**
80 * struct bearer - TIPC bearer information available to internal users
81 * @publ: bearer information available to privileged users
82 * @media: ptr to media structure associated with bearer
83 * @priority: default link priority for bearer
84 * @detect_scope: network address mask used during automatic link creation
85 * @identity: array index of this bearer within TIPC bearer array
86 * @link_req: ptr to (optional) structure making periodic link setup requests
87 * @links: list of non-congested links associated with bearer
88 * @cong_links: list of congested links associated with bearer
89 * @continue_count: # of times bearer has resumed after congestion or blocking
90 * @active: non-zero if bearer structure is represents a bearer
91 * @net_plane: network plane ('A' through 'H') currently associated with bearer
92 * @nodes: indicates which nodes in cluster can be reached through bearer
93 */
94
95struct bearer {
96 struct tipc_bearer publ;
97 struct media *media;
98 u32 priority;
99 u32 detect_scope;
100 u32 identity;
101 struct link_req *link_req;
102 struct list_head links;
103 struct list_head cong_links;
104 u32 continue_count;
105 int active;
106 char net_plane;
107 struct node_map nodes;
108};
109
110struct bearer_name {
111 char media_name[TIPC_MAX_MEDIA_NAME];
112 char if_name[TIPC_MAX_IF_NAME];
113};
114
115struct link;
116
117extern struct bearer *tipc_bearers;
118
119void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a);
120struct sk_buff *tipc_media_get_names(void);
121
122struct sk_buff *tipc_bearer_get_names(void);
123void tipc_bearer_add_dest(struct bearer *b_ptr, u32 dest);
124void tipc_bearer_remove_dest(struct bearer *b_ptr, u32 dest);
125void tipc_bearer_schedule(struct bearer *b_ptr, struct link *l_ptr);
126struct bearer *tipc_bearer_find_interface(const char *if_name);
127int tipc_bearer_resolve_congestion(struct bearer *b_ptr, struct link *l_ptr);
128int tipc_bearer_init(void);
129void tipc_bearer_stop(void);
130void tipc_bearer_lock_push(struct bearer *b_ptr);
131
132
133/**
134 * tipc_bearer_send- sends buffer to destination over bearer
135 *
136 * Returns true (1) if successful, or false (0) if unable to send
137 *
138 * IMPORTANT:
139 * The media send routine must not alter the buffer being passed in
140 * as it may be needed for later retransmission!
141 *
142 * If the media send routine returns a non-zero value (indicating that
143 * it was unable to send the buffer), it must:
144 * 1) mark the bearer as blocked,
145 * 2) call tipc_continue() once the bearer is able to send again.
146 * Media types that are unable to meet these two critera must ensure their
147 * send routine always returns success -- even if the buffer was not sent --
148 * and let TIPC's link code deal with the undelivered message.
149 */
150
151static inline int tipc_bearer_send(struct bearer *b_ptr, struct sk_buff *buf,
152 struct tipc_media_addr *dest)
153{
154 return !b_ptr->media->send_msg(buf, &b_ptr->publ, dest);
155}
156
157/**
158 * tipc_bearer_congested - determines if bearer is currently congested
159 */
160
161static inline int tipc_bearer_congested(struct bearer *b_ptr, struct link *l_ptr)
162{
163 if (unlikely(b_ptr->publ.blocked))
164 return 1;
165 if (likely(list_empty(&b_ptr->cong_links)))
166 return 0;
167 return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
168}
169
170#endif
diff --git a/net/tipc/cluster.c b/net/tipc/cluster.c
new file mode 100644
index 000000000000..ab974ca19371
--- /dev/null
+++ b/net/tipc/cluster.c
@@ -0,0 +1,576 @@
1/*
2 * net/tipc/cluster.c: TIPC cluster management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "cluster.h"
39#include "addr.h"
40#include "node_subscr.h"
41#include "link.h"
42#include "node.h"
43#include "net.h"
44#include "msg.h"
45#include "bearer.h"
46
47void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
48 u32 lower, u32 upper);
49struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest);
50
51struct node **tipc_local_nodes = 0;
52struct node_map tipc_cltr_bcast_nodes = {0,{0,}};
53u32 tipc_highest_allowed_slave = 0;
54
55struct cluster *tipc_cltr_create(u32 addr)
56{
57 struct _zone *z_ptr;
58 struct cluster *c_ptr;
59 int max_nodes;
60 int alloc;
61
62 c_ptr = (struct cluster *)kmalloc(sizeof(*c_ptr), GFP_ATOMIC);
63 if (c_ptr == NULL)
64 return 0;
65 memset(c_ptr, 0, sizeof(*c_ptr));
66
67 c_ptr->addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
68 if (in_own_cluster(addr))
69 max_nodes = LOWEST_SLAVE + tipc_max_slaves;
70 else
71 max_nodes = tipc_max_nodes + 1;
72 alloc = sizeof(void *) * (max_nodes + 1);
73 c_ptr->nodes = (struct node **)kmalloc(alloc, GFP_ATOMIC);
74 if (c_ptr->nodes == NULL) {
75 kfree(c_ptr);
76 return 0;
77 }
78 memset(c_ptr->nodes, 0, alloc);
79 if (in_own_cluster(addr))
80 tipc_local_nodes = c_ptr->nodes;
81 c_ptr->highest_slave = LOWEST_SLAVE - 1;
82 c_ptr->highest_node = 0;
83
84 z_ptr = tipc_zone_find(tipc_zone(addr));
85 if (z_ptr == NULL) {
86 z_ptr = tipc_zone_create(addr);
87 }
88 if (z_ptr != NULL) {
89 tipc_zone_attach_cluster(z_ptr, c_ptr);
90 c_ptr->owner = z_ptr;
91 }
92 else {
93 kfree(c_ptr);
94 c_ptr = 0;
95 }
96
97 return c_ptr;
98}
99
100void tipc_cltr_delete(struct cluster *c_ptr)
101{
102 u32 n_num;
103
104 if (!c_ptr)
105 return;
106 for (n_num = 1; n_num <= c_ptr->highest_node; n_num++) {
107 tipc_node_delete(c_ptr->nodes[n_num]);
108 }
109 for (n_num = LOWEST_SLAVE; n_num <= c_ptr->highest_slave; n_num++) {
110 tipc_node_delete(c_ptr->nodes[n_num]);
111 }
112 kfree(c_ptr->nodes);
113 kfree(c_ptr);
114}
115
116u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr)
117{
118 struct node *n_ptr;
119 u32 n_num = tipc_node(addr) + 1;
120
121 if (!c_ptr)
122 return addr;
123 for (; n_num <= c_ptr->highest_node; n_num++) {
124 n_ptr = c_ptr->nodes[n_num];
125 if (n_ptr && tipc_node_has_active_links(n_ptr))
126 return n_ptr->addr;
127 }
128 for (n_num = 1; n_num < tipc_node(addr); n_num++) {
129 n_ptr = c_ptr->nodes[n_num];
130 if (n_ptr && tipc_node_has_active_links(n_ptr))
131 return n_ptr->addr;
132 }
133 return 0;
134}
135
136void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr)
137{
138 u32 n_num = tipc_node(n_ptr->addr);
139 u32 max_n_num = tipc_max_nodes;
140
141 if (in_own_cluster(n_ptr->addr))
142 max_n_num = tipc_highest_allowed_slave;
143 assert(n_num > 0);
144 assert(n_num <= max_n_num);
145 assert(c_ptr->nodes[n_num] == 0);
146 c_ptr->nodes[n_num] = n_ptr;
147 if (n_num > c_ptr->highest_node)
148 c_ptr->highest_node = n_num;
149}
150
151/**
152 * tipc_cltr_select_router - select router to a cluster
153 *
154 * Uses deterministic and fair algorithm.
155 */
156
157u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref)
158{
159 u32 n_num;
160 u32 ulim = c_ptr->highest_node;
161 u32 mask;
162 u32 tstart;
163
164 assert(!in_own_cluster(c_ptr->addr));
165 if (!ulim)
166 return 0;
167
168 /* Start entry must be random */
169 mask = tipc_max_nodes;
170 while (mask > ulim)
171 mask >>= 1;
172 tstart = ref & mask;
173 n_num = tstart;
174
175 /* Lookup upwards with wrap-around */
176 do {
177 if (tipc_node_is_up(c_ptr->nodes[n_num]))
178 break;
179 } while (++n_num <= ulim);
180 if (n_num > ulim) {
181 n_num = 1;
182 do {
183 if (tipc_node_is_up(c_ptr->nodes[n_num]))
184 break;
185 } while (++n_num < tstart);
186 if (n_num == tstart)
187 return 0;
188 }
189 assert(n_num <= ulim);
190 return tipc_node_select_router(c_ptr->nodes[n_num], ref);
191}
192
193/**
194 * tipc_cltr_select_node - select destination node within a remote cluster
195 *
196 * Uses deterministic and fair algorithm.
197 */
198
199struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector)
200{
201 u32 n_num;
202 u32 mask = tipc_max_nodes;
203 u32 start_entry;
204
205 assert(!in_own_cluster(c_ptr->addr));
206 if (!c_ptr->highest_node)
207 return 0;
208
209 /* Start entry must be random */
210 while (mask > c_ptr->highest_node) {
211 mask >>= 1;
212 }
213 start_entry = (selector & mask) ? selector & mask : 1u;
214 assert(start_entry <= c_ptr->highest_node);
215
216 /* Lookup upwards with wrap-around */
217 for (n_num = start_entry; n_num <= c_ptr->highest_node; n_num++) {
218 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
219 return c_ptr->nodes[n_num];
220 }
221 for (n_num = 1; n_num < start_entry; n_num++) {
222 if (tipc_node_has_active_links(c_ptr->nodes[n_num]))
223 return c_ptr->nodes[n_num];
224 }
225 return 0;
226}
227
228/*
229 * Routing table management: See description in node.c
230 */
231
232struct sk_buff *tipc_cltr_prepare_routing_msg(u32 data_size, u32 dest)
233{
234 u32 size = INT_H_SIZE + data_size;
235 struct sk_buff *buf = buf_acquire(size);
236 struct tipc_msg *msg;
237
238 if (buf) {
239 msg = buf_msg(buf);
240 memset((char *)msg, 0, size);
241 msg_init(msg, ROUTE_DISTRIBUTOR, 0, TIPC_OK, INT_H_SIZE, dest);
242 }
243 return buf;
244}
245
246void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest,
247 u32 lower, u32 upper)
248{
249 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
250 struct tipc_msg *msg;
251
252 if (buf) {
253 msg = buf_msg(buf);
254 msg_set_remote_node(msg, dest);
255 msg_set_type(msg, ROUTE_ADDITION);
256 tipc_cltr_multicast(c_ptr, buf, lower, upper);
257 } else {
258 warn("Memory squeeze: broadcast of new route failed\n");
259 }
260}
261
262void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest,
263 u32 lower, u32 upper)
264{
265 struct sk_buff *buf = tipc_cltr_prepare_routing_msg(0, c_ptr->addr);
266 struct tipc_msg *msg;
267
268 if (buf) {
269 msg = buf_msg(buf);
270 msg_set_remote_node(msg, dest);
271 msg_set_type(msg, ROUTE_REMOVAL);
272 tipc_cltr_multicast(c_ptr, buf, lower, upper);
273 } else {
274 warn("Memory squeeze: broadcast of lost route failed\n");
275 }
276}
277
278void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest)
279{
280 struct sk_buff *buf;
281 struct tipc_msg *msg;
282 u32 highest = c_ptr->highest_slave;
283 u32 n_num;
284 int send = 0;
285
286 assert(!is_slave(dest));
287 assert(in_own_cluster(dest));
288 assert(in_own_cluster(c_ptr->addr));
289 if (highest <= LOWEST_SLAVE)
290 return;
291 buf = tipc_cltr_prepare_routing_msg(highest - LOWEST_SLAVE + 1,
292 c_ptr->addr);
293 if (buf) {
294 msg = buf_msg(buf);
295 msg_set_remote_node(msg, c_ptr->addr);
296 msg_set_type(msg, SLAVE_ROUTING_TABLE);
297 for (n_num = LOWEST_SLAVE; n_num <= highest; n_num++) {
298 if (c_ptr->nodes[n_num] &&
299 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
300 send = 1;
301 msg_set_dataoctet(msg, n_num);
302 }
303 }
304 if (send)
305 tipc_link_send(buf, dest, dest);
306 else
307 buf_discard(buf);
308 } else {
309 warn("Memory squeeze: broadcast of lost route failed\n");
310 }
311}
312
313void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest)
314{
315 struct sk_buff *buf;
316 struct tipc_msg *msg;
317 u32 highest = c_ptr->highest_node;
318 u32 n_num;
319 int send = 0;
320
321 if (in_own_cluster(c_ptr->addr))
322 return;
323 assert(!is_slave(dest));
324 assert(in_own_cluster(dest));
325 highest = c_ptr->highest_node;
326 buf = tipc_cltr_prepare_routing_msg(highest + 1, c_ptr->addr);
327 if (buf) {
328 msg = buf_msg(buf);
329 msg_set_remote_node(msg, c_ptr->addr);
330 msg_set_type(msg, EXT_ROUTING_TABLE);
331 for (n_num = 1; n_num <= highest; n_num++) {
332 if (c_ptr->nodes[n_num] &&
333 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
334 send = 1;
335 msg_set_dataoctet(msg, n_num);
336 }
337 }
338 if (send)
339 tipc_link_send(buf, dest, dest);
340 else
341 buf_discard(buf);
342 } else {
343 warn("Memory squeeze: broadcast of external route failed\n");
344 }
345}
346
347void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest)
348{
349 struct sk_buff *buf;
350 struct tipc_msg *msg;
351 u32 highest = c_ptr->highest_node;
352 u32 n_num;
353 int send = 0;
354
355 assert(is_slave(dest));
356 assert(in_own_cluster(c_ptr->addr));
357 buf = tipc_cltr_prepare_routing_msg(highest, c_ptr->addr);
358 if (buf) {
359 msg = buf_msg(buf);
360 msg_set_remote_node(msg, c_ptr->addr);
361 msg_set_type(msg, LOCAL_ROUTING_TABLE);
362 for (n_num = 1; n_num <= highest; n_num++) {
363 if (c_ptr->nodes[n_num] &&
364 tipc_node_has_active_links(c_ptr->nodes[n_num])) {
365 send = 1;
366 msg_set_dataoctet(msg, n_num);
367 }
368 }
369 if (send)
370 tipc_link_send(buf, dest, dest);
371 else
372 buf_discard(buf);
373 } else {
374 warn("Memory squeeze: broadcast of local route failed\n");
375 }
376}
377
378void tipc_cltr_recv_routing_table(struct sk_buff *buf)
379{
380 struct tipc_msg *msg = buf_msg(buf);
381 struct cluster *c_ptr;
382 struct node *n_ptr;
383 unchar *node_table;
384 u32 table_size;
385 u32 router;
386 u32 rem_node = msg_remote_node(msg);
387 u32 z_num;
388 u32 c_num;
389 u32 n_num;
390
391 c_ptr = tipc_cltr_find(rem_node);
392 if (!c_ptr) {
393 c_ptr = tipc_cltr_create(rem_node);
394 if (!c_ptr) {
395 buf_discard(buf);
396 return;
397 }
398 }
399
400 node_table = buf->data + msg_hdr_sz(msg);
401 table_size = msg_size(msg) - msg_hdr_sz(msg);
402 router = msg_prevnode(msg);
403 z_num = tipc_zone(rem_node);
404 c_num = tipc_cluster(rem_node);
405
406 switch (msg_type(msg)) {
407 case LOCAL_ROUTING_TABLE:
408 assert(is_slave(tipc_own_addr));
409 case EXT_ROUTING_TABLE:
410 for (n_num = 1; n_num < table_size; n_num++) {
411 if (node_table[n_num]) {
412 u32 addr = tipc_addr(z_num, c_num, n_num);
413 n_ptr = c_ptr->nodes[n_num];
414 if (!n_ptr) {
415 n_ptr = tipc_node_create(addr);
416 }
417 if (n_ptr)
418 tipc_node_add_router(n_ptr, router);
419 }
420 }
421 break;
422 case SLAVE_ROUTING_TABLE:
423 assert(!is_slave(tipc_own_addr));
424 assert(in_own_cluster(c_ptr->addr));
425 for (n_num = 1; n_num < table_size; n_num++) {
426 if (node_table[n_num]) {
427 u32 slave_num = n_num + LOWEST_SLAVE;
428 u32 addr = tipc_addr(z_num, c_num, slave_num);
429 n_ptr = c_ptr->nodes[slave_num];
430 if (!n_ptr) {
431 n_ptr = tipc_node_create(addr);
432 }
433 if (n_ptr)
434 tipc_node_add_router(n_ptr, router);
435 }
436 }
437 break;
438 case ROUTE_ADDITION:
439 if (!is_slave(tipc_own_addr)) {
440 assert(!in_own_cluster(c_ptr->addr)
441 || is_slave(rem_node));
442 } else {
443 assert(in_own_cluster(c_ptr->addr)
444 && !is_slave(rem_node));
445 }
446 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
447 if (!n_ptr)
448 n_ptr = tipc_node_create(rem_node);
449 if (n_ptr)
450 tipc_node_add_router(n_ptr, router);
451 break;
452 case ROUTE_REMOVAL:
453 if (!is_slave(tipc_own_addr)) {
454 assert(!in_own_cluster(c_ptr->addr)
455 || is_slave(rem_node));
456 } else {
457 assert(in_own_cluster(c_ptr->addr)
458 && !is_slave(rem_node));
459 }
460 n_ptr = c_ptr->nodes[tipc_node(rem_node)];
461 if (n_ptr)
462 tipc_node_remove_router(n_ptr, router);
463 break;
464 default:
465 assert(!"Illegal routing manager message received\n");
466 }
467 buf_discard(buf);
468}
469
470void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router)
471{
472 u32 start_entry;
473 u32 tstop;
474 u32 n_num;
475
476 if (is_slave(router))
477 return; /* Slave nodes can not be routers */
478
479 if (in_own_cluster(c_ptr->addr)) {
480 start_entry = LOWEST_SLAVE;
481 tstop = c_ptr->highest_slave;
482 } else {
483 start_entry = 1;
484 tstop = c_ptr->highest_node;
485 }
486
487 for (n_num = start_entry; n_num <= tstop; n_num++) {
488 if (c_ptr->nodes[n_num]) {
489 tipc_node_remove_router(c_ptr->nodes[n_num], router);
490 }
491 }
492}
493
494/**
495 * tipc_cltr_multicast - multicast message to local nodes
496 */
497
498void tipc_cltr_multicast(struct cluster *c_ptr, struct sk_buff *buf,
499 u32 lower, u32 upper)
500{
501 struct sk_buff *buf_copy;
502 struct node *n_ptr;
503 u32 n_num;
504 u32 tstop;
505
506 assert(lower <= upper);
507 assert(((lower >= 1) && (lower <= tipc_max_nodes)) ||
508 ((lower >= LOWEST_SLAVE) && (lower <= tipc_highest_allowed_slave)));
509 assert(((upper >= 1) && (upper <= tipc_max_nodes)) ||
510 ((upper >= LOWEST_SLAVE) && (upper <= tipc_highest_allowed_slave)));
511 assert(in_own_cluster(c_ptr->addr));
512
513 tstop = is_slave(upper) ? c_ptr->highest_slave : c_ptr->highest_node;
514 if (tstop > upper)
515 tstop = upper;
516 for (n_num = lower; n_num <= tstop; n_num++) {
517 n_ptr = c_ptr->nodes[n_num];
518 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
519 buf_copy = skb_copy(buf, GFP_ATOMIC);
520 if (buf_copy == NULL)
521 break;
522 msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
523 tipc_link_send(buf_copy, n_ptr->addr, n_ptr->addr);
524 }
525 }
526 buf_discard(buf);
527}
528
529/**
530 * tipc_cltr_broadcast - broadcast message to all nodes within cluster
531 */
532
533void tipc_cltr_broadcast(struct sk_buff *buf)
534{
535 struct sk_buff *buf_copy;
536 struct cluster *c_ptr;
537 struct node *n_ptr;
538 u32 n_num;
539 u32 tstart;
540 u32 tstop;
541 u32 node_type;
542
543 if (tipc_mode == TIPC_NET_MODE) {
544 c_ptr = tipc_cltr_find(tipc_own_addr);
545 assert(in_own_cluster(c_ptr->addr)); /* For now */
546
547 /* Send to standard nodes, then repeat loop sending to slaves */
548 tstart = 1;
549 tstop = c_ptr->highest_node;
550 for (node_type = 1; node_type <= 2; node_type++) {
551 for (n_num = tstart; n_num <= tstop; n_num++) {
552 n_ptr = c_ptr->nodes[n_num];
553 if (n_ptr && tipc_node_has_active_links(n_ptr)) {
554 buf_copy = skb_copy(buf, GFP_ATOMIC);
555 if (buf_copy == NULL)
556 goto exit;
557 msg_set_destnode(buf_msg(buf_copy),
558 n_ptr->addr);
559 tipc_link_send(buf_copy, n_ptr->addr,
560 n_ptr->addr);
561 }
562 }
563 tstart = LOWEST_SLAVE;
564 tstop = c_ptr->highest_slave;
565 }
566 }
567exit:
568 buf_discard(buf);
569}
570
571int tipc_cltr_init(void)
572{
573 tipc_highest_allowed_slave = LOWEST_SLAVE + tipc_max_slaves;
574 return tipc_cltr_create(tipc_own_addr) ? TIPC_OK : -ENOMEM;
575}
576
diff --git a/net/tipc/cluster.h b/net/tipc/cluster.h
new file mode 100644
index 000000000000..9963642e1058
--- /dev/null
+++ b/net/tipc/cluster.h
@@ -0,0 +1,92 @@
1/*
2 * net/tipc/cluster.h: Include file for TIPC cluster management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CLUSTER_H
38#define _TIPC_CLUSTER_H
39
40#include "addr.h"
41#include "zone.h"
42
43#define LOWEST_SLAVE 2048u
44
45/**
46 * struct cluster - TIPC cluster structure
47 * @addr: network address of cluster
48 * @owner: pointer to zone that cluster belongs to
49 * @nodes: array of pointers to all nodes within cluster
50 * @highest_node: id of highest numbered node within cluster
51 * @highest_slave: (used for secondary node support)
52 */
53
54struct cluster {
55 u32 addr;
56 struct _zone *owner;
57 struct node **nodes;
58 u32 highest_node;
59 u32 highest_slave;
60};
61
62
63extern struct node **tipc_local_nodes;
64extern u32 tipc_highest_allowed_slave;
65extern struct node_map tipc_cltr_bcast_nodes;
66
67void tipc_cltr_remove_as_router(struct cluster *c_ptr, u32 router);
68void tipc_cltr_send_ext_routes(struct cluster *c_ptr, u32 dest);
69struct node *tipc_cltr_select_node(struct cluster *c_ptr, u32 selector);
70u32 tipc_cltr_select_router(struct cluster *c_ptr, u32 ref);
71void tipc_cltr_recv_routing_table(struct sk_buff *buf);
72struct cluster *tipc_cltr_create(u32 addr);
73void tipc_cltr_delete(struct cluster *c_ptr);
74void tipc_cltr_attach_node(struct cluster *c_ptr, struct node *n_ptr);
75void tipc_cltr_send_slave_routes(struct cluster *c_ptr, u32 dest);
76void tipc_cltr_broadcast(struct sk_buff *buf);
77int tipc_cltr_init(void);
78u32 tipc_cltr_next_node(struct cluster *c_ptr, u32 addr);
79void tipc_cltr_bcast_new_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
80void tipc_cltr_send_local_routes(struct cluster *c_ptr, u32 dest);
81void tipc_cltr_bcast_lost_route(struct cluster *c_ptr, u32 dest, u32 lo, u32 hi);
82
83static inline struct cluster *tipc_cltr_find(u32 addr)
84{
85 struct _zone *z_ptr = tipc_zone_find(addr);
86
87 if (z_ptr)
88 return z_ptr->clusters[1];
89 return 0;
90}
91
92#endif
diff --git a/net/tipc/config.c b/net/tipc/config.c
new file mode 100644
index 000000000000..3c8e6740e5ae
--- /dev/null
+++ b/net/tipc/config.c
@@ -0,0 +1,718 @@
1/*
2 * net/tipc/config.c: TIPC configuration management code
3 *
4 * Copyright (c) 2002-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "bearer.h"
40#include "port.h"
41#include "link.h"
42#include "zone.h"
43#include "addr.h"
44#include "name_table.h"
45#include "node.h"
46#include "config.h"
47#include "discover.h"
48
49struct subscr_data {
50 char usr_handle[8];
51 u32 domain;
52 u32 port_ref;
53 struct list_head subd_list;
54};
55
56struct manager {
57 u32 user_ref;
58 u32 port_ref;
59 u32 subscr_ref;
60 u32 link_subscriptions;
61 struct list_head link_subscribers;
62};
63
64static struct manager mng = { 0};
65
66static spinlock_t config_lock = SPIN_LOCK_UNLOCKED;
67
68static const void *req_tlv_area; /* request message TLV area */
69static int req_tlv_space; /* request message TLV area size */
70static int rep_headroom; /* reply message headroom to use */
71
72
73void tipc_cfg_link_event(u32 addr, char *name, int up)
74{
75 /* TIPC DOESN'T HANDLE LINK EVENT SUBSCRIPTIONS AT THE MOMENT */
76}
77
78
79struct sk_buff *tipc_cfg_reply_alloc(int payload_size)
80{
81 struct sk_buff *buf;
82
83 buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC);
84 if (buf)
85 skb_reserve(buf, rep_headroom);
86 return buf;
87}
88
89int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
90 void *tlv_data, int tlv_data_size)
91{
92 struct tlv_desc *tlv = (struct tlv_desc *)buf->tail;
93 int new_tlv_space = TLV_SPACE(tlv_data_size);
94
95 if (skb_tailroom(buf) < new_tlv_space) {
96 dbg("tipc_cfg_append_tlv unable to append TLV\n");
97 return 0;
98 }
99 skb_put(buf, new_tlv_space);
100 tlv->tlv_type = htons(tlv_type);
101 tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size));
102 if (tlv_data_size && tlv_data)
103 memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size);
104 return 1;
105}
106
107struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value)
108{
109 struct sk_buff *buf;
110 u32 value_net;
111
112 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value)));
113 if (buf) {
114 value_net = htonl(value);
115 tipc_cfg_append_tlv(buf, tlv_type, &value_net,
116 sizeof(value_net));
117 }
118 return buf;
119}
120
121struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string)
122{
123 struct sk_buff *buf;
124 int string_len = strlen(string) + 1;
125
126 buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len));
127 if (buf)
128 tipc_cfg_append_tlv(buf, tlv_type, string, string_len);
129 return buf;
130}
131
132
133
134
135#if 0
136
137/* Now obsolete code for handling commands not yet implemented the new way */
138
139int tipc_cfg_cmd(const struct tipc_cmd_msg * msg,
140 char *data,
141 u32 sz,
142 u32 *ret_size,
143 struct tipc_portid *orig)
144{
145 int rv = -EINVAL;
146 u32 cmd = msg->cmd;
147
148 *ret_size = 0;
149 switch (cmd) {
150 case TIPC_REMOVE_LINK:
151 case TIPC_CMD_BLOCK_LINK:
152 case TIPC_CMD_UNBLOCK_LINK:
153 if (!cfg_check_connection(orig))
154 rv = link_control(msg->argv.link_name, msg->cmd, 0);
155 break;
156 case TIPC_ESTABLISH:
157 {
158 int connected;
159
160 tipc_isconnected(mng.conn_port_ref, &connected);
161 if (connected || !orig) {
162 rv = TIPC_FAILURE;
163 break;
164 }
165 rv = tipc_connect2port(mng.conn_port_ref, orig);
166 if (rv == TIPC_OK)
167 orig = 0;
168 break;
169 }
170 case TIPC_GET_PEER_ADDRESS:
171 *ret_size = link_peer_addr(msg->argv.link_name, data, sz);
172 break;
173 case TIPC_GET_ROUTES:
174 rv = TIPC_OK;
175 break;
176 default: {}
177 }
178 if (*ret_size)
179 rv = TIPC_OK;
180 return rv;
181}
182
183static void cfg_cmd_event(struct tipc_cmd_msg *msg,
184 char *data,
185 u32 sz,
186 struct tipc_portid const *orig)
187{
188 int rv = -EINVAL;
189 struct tipc_cmd_result_msg rmsg;
190 struct iovec msg_sect[2];
191 int *arg;
192
193 msg->cmd = ntohl(msg->cmd);
194
195 cfg_prepare_res_msg(msg->cmd, msg->usr_handle, rv, &rmsg, msg_sect,
196 data, 0);
197 if (ntohl(msg->magic) != TIPC_MAGIC)
198 goto exit;
199
200 switch (msg->cmd) {
201 case TIPC_CREATE_LINK:
202 if (!cfg_check_connection(orig))
203 rv = disc_create_link(&msg->argv.create_link);
204 break;
205 case TIPC_LINK_SUBSCRIBE:
206 {
207 struct subscr_data *sub;
208
209 if (mng.link_subscriptions > 64)
210 break;
211 sub = (struct subscr_data *)kmalloc(sizeof(*sub),
212 GFP_ATOMIC);
213 if (sub == NULL) {
214 warn("Memory squeeze; dropped remote link subscription\n");
215 break;
216 }
217 INIT_LIST_HEAD(&sub->subd_list);
218 tipc_createport(mng.user_ref,
219 (void *)sub,
220 TIPC_HIGH_IMPORTANCE,
221 0,
222 0,
223 (tipc_conn_shutdown_event)cfg_linksubscr_cancel,
224 0,
225 0,
226 (tipc_conn_msg_event)cfg_linksubscr_cancel,
227 0,
228 &sub->port_ref);
229 if (!sub->port_ref) {
230 kfree(sub);
231 break;
232 }
233 memcpy(sub->usr_handle,msg->usr_handle,
234 sizeof(sub->usr_handle));
235 sub->domain = msg->argv.domain;
236 list_add_tail(&sub->subd_list, &mng.link_subscribers);
237 tipc_connect2port(sub->port_ref, orig);
238 rmsg.retval = TIPC_OK;
239 tipc_send(sub->port_ref, 2u, msg_sect);
240 mng.link_subscriptions++;
241 return;
242 }
243 default:
244 rv = tipc_cfg_cmd(msg, data, sz, (u32 *)&msg_sect[1].iov_len, orig);
245 }
246 exit:
247 rmsg.result_len = htonl(msg_sect[1].iov_len);
248 rmsg.retval = htonl(rv);
249 tipc_cfg_respond(msg_sect, 2u, orig);
250}
251#endif
252
253static struct sk_buff *cfg_enable_bearer(void)
254{
255 struct tipc_bearer_config *args;
256
257 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG))
258 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
259
260 args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area);
261 if (tipc_enable_bearer(args->name,
262 ntohl(args->detect_scope),
263 ntohl(args->priority)))
264 return tipc_cfg_reply_error_string("unable to enable bearer");
265
266 return tipc_cfg_reply_none();
267}
268
269static struct sk_buff *cfg_disable_bearer(void)
270{
271 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME))
272 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
273
274 if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area)))
275 return tipc_cfg_reply_error_string("unable to disable bearer");
276
277 return tipc_cfg_reply_none();
278}
279
280static struct sk_buff *cfg_set_own_addr(void)
281{
282 u32 addr;
283
284 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
285 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
286
287 addr = *(u32 *)TLV_DATA(req_tlv_area);
288 addr = ntohl(addr);
289 if (addr == tipc_own_addr)
290 return tipc_cfg_reply_none();
291 if (!tipc_addr_node_valid(addr))
292 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
293 " (node address)");
294 if (tipc_own_addr)
295 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
296 " (cannot change node address once assigned)");
297
298 spin_unlock_bh(&config_lock);
299 tipc_core_stop_net();
300 tipc_own_addr = addr;
301 tipc_core_start_net();
302 spin_lock_bh(&config_lock);
303 return tipc_cfg_reply_none();
304}
305
306static struct sk_buff *cfg_set_remote_mng(void)
307{
308 u32 value;
309
310 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
311 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
312
313 value = *(u32 *)TLV_DATA(req_tlv_area);
314 value = ntohl(value);
315 tipc_remote_management = (value != 0);
316 return tipc_cfg_reply_none();
317}
318
319static struct sk_buff *cfg_set_max_publications(void)
320{
321 u32 value;
322
323 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
324 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
325
326 value = *(u32 *)TLV_DATA(req_tlv_area);
327 value = ntohl(value);
328 if (value != delimit(value, 1, 65535))
329 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
330 " (max publications must be 1-65535)");
331 tipc_max_publications = value;
332 return tipc_cfg_reply_none();
333}
334
335static struct sk_buff *cfg_set_max_subscriptions(void)
336{
337 u32 value;
338
339 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
340 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
341
342 value = *(u32 *)TLV_DATA(req_tlv_area);
343 value = ntohl(value);
344 if (value != delimit(value, 1, 65535))
345 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
346 " (max subscriptions must be 1-65535");
347 tipc_max_subscriptions = value;
348 return tipc_cfg_reply_none();
349}
350
351static struct sk_buff *cfg_set_max_ports(void)
352{
353 int orig_mode;
354 u32 value;
355
356 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
357 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
358 value = *(u32 *)TLV_DATA(req_tlv_area);
359 value = ntohl(value);
360 if (value != delimit(value, 127, 65535))
361 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
362 " (max ports must be 127-65535)");
363
364 if (value == tipc_max_ports)
365 return tipc_cfg_reply_none();
366
367 if (atomic_read(&tipc_user_count) > 2)
368 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
369 " (cannot change max ports while TIPC users exist)");
370
371 spin_unlock_bh(&config_lock);
372 orig_mode = tipc_get_mode();
373 if (orig_mode == TIPC_NET_MODE)
374 tipc_core_stop_net();
375 tipc_core_stop();
376 tipc_max_ports = value;
377 tipc_core_start();
378 if (orig_mode == TIPC_NET_MODE)
379 tipc_core_start_net();
380 spin_lock_bh(&config_lock);
381 return tipc_cfg_reply_none();
382}
383
384static struct sk_buff *set_net_max(int value, int *parameter)
385{
386 int orig_mode;
387
388 if (value != *parameter) {
389 orig_mode = tipc_get_mode();
390 if (orig_mode == TIPC_NET_MODE)
391 tipc_core_stop_net();
392 *parameter = value;
393 if (orig_mode == TIPC_NET_MODE)
394 tipc_core_start_net();
395 }
396
397 return tipc_cfg_reply_none();
398}
399
400static struct sk_buff *cfg_set_max_zones(void)
401{
402 u32 value;
403
404 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
405 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
406 value = *(u32 *)TLV_DATA(req_tlv_area);
407 value = ntohl(value);
408 if (value != delimit(value, 1, 255))
409 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
410 " (max zones must be 1-255)");
411 return set_net_max(value, &tipc_max_zones);
412}
413
414static struct sk_buff *cfg_set_max_clusters(void)
415{
416 u32 value;
417
418 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
419 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
420 value = *(u32 *)TLV_DATA(req_tlv_area);
421 value = ntohl(value);
422 if (value != 1)
423 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
424 " (max clusters fixed at 1)");
425 return tipc_cfg_reply_none();
426}
427
428static struct sk_buff *cfg_set_max_nodes(void)
429{
430 u32 value;
431
432 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
433 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
434 value = *(u32 *)TLV_DATA(req_tlv_area);
435 value = ntohl(value);
436 if (value != delimit(value, 8, 2047))
437 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
438 " (max nodes must be 8-2047)");
439 return set_net_max(value, &tipc_max_nodes);
440}
441
442static struct sk_buff *cfg_set_max_slaves(void)
443{
444 u32 value;
445
446 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
447 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
448 value = *(u32 *)TLV_DATA(req_tlv_area);
449 value = ntohl(value);
450 if (value != 0)
451 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
452 " (max secondary nodes fixed at 0)");
453 return tipc_cfg_reply_none();
454}
455
456static struct sk_buff *cfg_set_netid(void)
457{
458 u32 value;
459
460 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
461 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
462 value = *(u32 *)TLV_DATA(req_tlv_area);
463 value = ntohl(value);
464 if (value != delimit(value, 1, 9999))
465 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
466 " (network id must be 1-9999)");
467
468 if (tipc_own_addr)
469 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
470 " (cannot change network id once part of network)");
471
472 return set_net_max(value, &tipc_net_id);
473}
474
475struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area,
476 int request_space, int reply_headroom)
477{
478 struct sk_buff *rep_tlv_buf;
479
480 spin_lock_bh(&config_lock);
481
482 /* Save request and reply details in a well-known location */
483
484 req_tlv_area = request_area;
485 req_tlv_space = request_space;
486 rep_headroom = reply_headroom;
487
488 /* Check command authorization */
489
490 if (likely(orig_node == tipc_own_addr)) {
491 /* command is permitted */
492 } else if (cmd >= 0x8000) {
493 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
494 " (cannot be done remotely)");
495 goto exit;
496 } else if (!tipc_remote_management) {
497 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE);
498 goto exit;
499 }
500 else if (cmd >= 0x4000) {
501 u32 domain = 0;
502
503 if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) ||
504 (domain != orig_node)) {
505 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR);
506 goto exit;
507 }
508 }
509
510 /* Call appropriate processing routine */
511
512 switch (cmd) {
513 case TIPC_CMD_NOOP:
514 rep_tlv_buf = tipc_cfg_reply_none();
515 break;
516 case TIPC_CMD_GET_NODES:
517 rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space);
518 break;
519 case TIPC_CMD_GET_LINKS:
520 rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space);
521 break;
522 case TIPC_CMD_SHOW_LINK_STATS:
523 rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space);
524 break;
525 case TIPC_CMD_RESET_LINK_STATS:
526 rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space);
527 break;
528 case TIPC_CMD_SHOW_NAME_TABLE:
529 rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space);
530 break;
531 case TIPC_CMD_GET_BEARER_NAMES:
532 rep_tlv_buf = tipc_bearer_get_names();
533 break;
534 case TIPC_CMD_GET_MEDIA_NAMES:
535 rep_tlv_buf = tipc_media_get_names();
536 break;
537 case TIPC_CMD_SHOW_PORTS:
538 rep_tlv_buf = tipc_port_get_ports();
539 break;
540#if 0
541 case TIPC_CMD_SHOW_PORT_STATS:
542 rep_tlv_buf = port_show_stats(req_tlv_area, req_tlv_space);
543 break;
544 case TIPC_CMD_RESET_PORT_STATS:
545 rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED);
546 break;
547#endif
548 case TIPC_CMD_SET_LOG_SIZE:
549 rep_tlv_buf = tipc_log_resize(req_tlv_area, req_tlv_space);
550 break;
551 case TIPC_CMD_DUMP_LOG:
552 rep_tlv_buf = tipc_log_dump();
553 break;
554 case TIPC_CMD_SET_LINK_TOL:
555 case TIPC_CMD_SET_LINK_PRI:
556 case TIPC_CMD_SET_LINK_WINDOW:
557 rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd);
558 break;
559 case TIPC_CMD_ENABLE_BEARER:
560 rep_tlv_buf = cfg_enable_bearer();
561 break;
562 case TIPC_CMD_DISABLE_BEARER:
563 rep_tlv_buf = cfg_disable_bearer();
564 break;
565 case TIPC_CMD_SET_NODE_ADDR:
566 rep_tlv_buf = cfg_set_own_addr();
567 break;
568 case TIPC_CMD_SET_REMOTE_MNG:
569 rep_tlv_buf = cfg_set_remote_mng();
570 break;
571 case TIPC_CMD_SET_MAX_PORTS:
572 rep_tlv_buf = cfg_set_max_ports();
573 break;
574 case TIPC_CMD_SET_MAX_PUBL:
575 rep_tlv_buf = cfg_set_max_publications();
576 break;
577 case TIPC_CMD_SET_MAX_SUBSCR:
578 rep_tlv_buf = cfg_set_max_subscriptions();
579 break;
580 case TIPC_CMD_SET_MAX_ZONES:
581 rep_tlv_buf = cfg_set_max_zones();
582 break;
583 case TIPC_CMD_SET_MAX_CLUSTERS:
584 rep_tlv_buf = cfg_set_max_clusters();
585 break;
586 case TIPC_CMD_SET_MAX_NODES:
587 rep_tlv_buf = cfg_set_max_nodes();
588 break;
589 case TIPC_CMD_SET_MAX_SLAVES:
590 rep_tlv_buf = cfg_set_max_slaves();
591 break;
592 case TIPC_CMD_SET_NETID:
593 rep_tlv_buf = cfg_set_netid();
594 break;
595 case TIPC_CMD_GET_REMOTE_MNG:
596 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management);
597 break;
598 case TIPC_CMD_GET_MAX_PORTS:
599 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports);
600 break;
601 case TIPC_CMD_GET_MAX_PUBL:
602 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications);
603 break;
604 case TIPC_CMD_GET_MAX_SUBSCR:
605 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions);
606 break;
607 case TIPC_CMD_GET_MAX_ZONES:
608 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_zones);
609 break;
610 case TIPC_CMD_GET_MAX_CLUSTERS:
611 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_clusters);
612 break;
613 case TIPC_CMD_GET_MAX_NODES:
614 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_nodes);
615 break;
616 case TIPC_CMD_GET_MAX_SLAVES:
617 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_slaves);
618 break;
619 case TIPC_CMD_GET_NETID:
620 rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id);
621 break;
622 default:
623 rep_tlv_buf = NULL;
624 break;
625 }
626
627 /* Return reply buffer */
628exit:
629 spin_unlock_bh(&config_lock);
630 return rep_tlv_buf;
631}
632
633static void cfg_named_msg_event(void *userdata,
634 u32 port_ref,
635 struct sk_buff **buf,
636 const unchar *msg,
637 u32 size,
638 u32 importance,
639 struct tipc_portid const *orig,
640 struct tipc_name_seq const *dest)
641{
642 struct tipc_cfg_msg_hdr *req_hdr;
643 struct tipc_cfg_msg_hdr *rep_hdr;
644 struct sk_buff *rep_buf;
645
646 /* Validate configuration message header (ignore invalid message) */
647
648 req_hdr = (struct tipc_cfg_msg_hdr *)msg;
649 if ((size < sizeof(*req_hdr)) ||
650 (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) ||
651 (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) {
652 warn("discarded invalid configuration message\n");
653 return;
654 }
655
656 /* Generate reply for request (if can't, return request) */
657
658 rep_buf = tipc_cfg_do_cmd(orig->node,
659 ntohs(req_hdr->tcm_type),
660 msg + sizeof(*req_hdr),
661 size - sizeof(*req_hdr),
662 BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr));
663 if (rep_buf) {
664 skb_push(rep_buf, sizeof(*rep_hdr));
665 rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data;
666 memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
667 rep_hdr->tcm_len = htonl(rep_buf->len);
668 rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
669 } else {
670 rep_buf = *buf;
671 *buf = NULL;
672 }
673
674 /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */
675 tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len);
676}
677
678int tipc_cfg_init(void)
679{
680 struct tipc_name_seq seq;
681 int res;
682
683 memset(&mng, 0, sizeof(mng));
684 INIT_LIST_HEAD(&mng.link_subscribers);
685
686 res = tipc_attach(&mng.user_ref, 0, 0);
687 if (res)
688 goto failed;
689
690 res = tipc_createport(mng.user_ref, 0, TIPC_CRITICAL_IMPORTANCE,
691 NULL, NULL, NULL,
692 NULL, cfg_named_msg_event, NULL,
693 NULL, &mng.port_ref);
694 if (res)
695 goto failed;
696
697 seq.type = TIPC_CFG_SRV;
698 seq.lower = seq.upper = tipc_own_addr;
699 res = tipc_nametbl_publish_rsv(mng.port_ref, TIPC_ZONE_SCOPE, &seq);
700 if (res)
701 goto failed;
702
703 return 0;
704
705failed:
706 err("Unable to create configuration service\n");
707 tipc_detach(mng.user_ref);
708 mng.user_ref = 0;
709 return res;
710}
711
712void tipc_cfg_stop(void)
713{
714 if (mng.user_ref) {
715 tipc_detach(mng.user_ref);
716 mng.user_ref = 0;
717 }
718}
diff --git a/net/tipc/config.h b/net/tipc/config.h
new file mode 100644
index 000000000000..7a728f954d84
--- /dev/null
+++ b/net/tipc/config.h
@@ -0,0 +1,79 @@
1/*
2 * net/tipc/config.h: Include file for TIPC configuration service code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CONFIG_H
38#define _TIPC_CONFIG_H
39
40/* ---------------------------------------------------------------------- */
41
42#include "core.h"
43#include "link.h"
44
45struct sk_buff *tipc_cfg_reply_alloc(int payload_size);
46int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type,
47 void *tlv_data, int tlv_data_size);
48struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value);
49struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string);
50
51static inline struct sk_buff *tipc_cfg_reply_none(void)
52{
53 return tipc_cfg_reply_alloc(0);
54}
55
56static inline struct sk_buff *tipc_cfg_reply_unsigned(u32 value)
57{
58 return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value);
59}
60
61static inline struct sk_buff *tipc_cfg_reply_error_string(char *string)
62{
63 return tipc_cfg_reply_string_type(TIPC_TLV_ERROR_STRING, string);
64}
65
66static inline struct sk_buff *tipc_cfg_reply_ultra_string(char *string)
67{
68 return tipc_cfg_reply_string_type(TIPC_TLV_ULTRA_STRING, string);
69}
70
71struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd,
72 const void *req_tlv_area, int req_tlv_space,
73 int headroom);
74
75void tipc_cfg_link_event(u32 addr, char *name, int up);
76int tipc_cfg_init(void);
77void tipc_cfg_stop(void);
78
79#endif
diff --git a/net/tipc/core.c b/net/tipc/core.c
new file mode 100644
index 000000000000..3d0a8ee4e1d3
--- /dev/null
+++ b/net/tipc/core.c
@@ -0,0 +1,284 @@
1/*
2 * net/tipc/core.c: TIPC module code
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/init.h>
38#include <linux/module.h>
39#include <linux/kernel.h>
40#include <linux/random.h>
41
42#include "core.h"
43#include "dbg.h"
44#include "ref.h"
45#include "net.h"
46#include "user_reg.h"
47#include "name_table.h"
48#include "subscr.h"
49#include "config.h"
50
51int tipc_eth_media_start(void);
52void tipc_eth_media_stop(void);
53int tipc_handler_start(void);
54void tipc_handler_stop(void);
55int tipc_socket_init(void);
56void tipc_socket_stop(void);
57int tipc_netlink_start(void);
58void tipc_netlink_stop(void);
59
60#define MOD_NAME "tipc_start: "
61
62#ifndef CONFIG_TIPC_ZONES
63#define CONFIG_TIPC_ZONES 3
64#endif
65
66#ifndef CONFIG_TIPC_CLUSTERS
67#define CONFIG_TIPC_CLUSTERS 1
68#endif
69
70#ifndef CONFIG_TIPC_NODES
71#define CONFIG_TIPC_NODES 255
72#endif
73
74#ifndef CONFIG_TIPC_SLAVE_NODES
75#define CONFIG_TIPC_SLAVE_NODES 0
76#endif
77
78#ifndef CONFIG_TIPC_PORTS
79#define CONFIG_TIPC_PORTS 8191
80#endif
81
82#ifndef CONFIG_TIPC_LOG
83#define CONFIG_TIPC_LOG 0
84#endif
85
86/* global variables used by multiple sub-systems within TIPC */
87
88int tipc_mode = TIPC_NOT_RUNNING;
89int tipc_random;
90atomic_t tipc_user_count = ATOMIC_INIT(0);
91
92const char tipc_alphabet[] =
93 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789_";
94
95/* configurable TIPC parameters */
96
97u32 tipc_own_addr;
98int tipc_max_zones;
99int tipc_max_clusters;
100int tipc_max_nodes;
101int tipc_max_slaves;
102int tipc_max_ports;
103int tipc_max_subscriptions;
104int tipc_max_publications;
105int tipc_net_id;
106int tipc_remote_management;
107
108
109int tipc_get_mode(void)
110{
111 return tipc_mode;
112}
113
114/**
115 * tipc_core_stop_net - shut down TIPC networking sub-systems
116 */
117
118void tipc_core_stop_net(void)
119{
120 tipc_eth_media_stop();
121 tipc_net_stop();
122}
123
124/**
125 * start_net - start TIPC networking sub-systems
126 */
127
128int tipc_core_start_net(void)
129{
130 int res;
131
132 if ((res = tipc_net_start()) ||
133 (res = tipc_eth_media_start())) {
134 tipc_core_stop_net();
135 }
136 return res;
137}
138
139/**
140 * tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
141 */
142
143void tipc_core_stop(void)
144{
145 if (tipc_mode != TIPC_NODE_MODE)
146 return;
147
148 tipc_mode = TIPC_NOT_RUNNING;
149
150 tipc_netlink_stop();
151 tipc_handler_stop();
152 tipc_cfg_stop();
153 tipc_subscr_stop();
154 tipc_reg_stop();
155 tipc_nametbl_stop();
156 tipc_ref_table_stop();
157 tipc_socket_stop();
158}
159
160/**
161 * tipc_core_start - switch TIPC from NOT RUNNING to SINGLE NODE mode
162 */
163
164int tipc_core_start(void)
165{
166 int res;
167
168 if (tipc_mode != TIPC_NOT_RUNNING)
169 return -ENOPROTOOPT;
170
171 get_random_bytes(&tipc_random, sizeof(tipc_random));
172 tipc_mode = TIPC_NODE_MODE;
173
174 if ((res = tipc_handler_start()) ||
175 (res = tipc_ref_table_init(tipc_max_ports + tipc_max_subscriptions,
176 tipc_random)) ||
177 (res = tipc_reg_start()) ||
178 (res = tipc_nametbl_init()) ||
179 (res = tipc_k_signal((Handler)tipc_subscr_start, 0)) ||
180 (res = tipc_k_signal((Handler)tipc_cfg_init, 0)) ||
181 (res = tipc_netlink_start()) ||
182 (res = tipc_socket_init())) {
183 tipc_core_stop();
184 }
185 return res;
186}
187
188
189static int __init tipc_init(void)
190{
191 int res;
192
193 tipc_log_reinit(CONFIG_TIPC_LOG);
194 info("Activated (compiled " __DATE__ " " __TIME__ ")\n");
195
196 tipc_own_addr = 0;
197 tipc_remote_management = 1;
198 tipc_max_publications = 10000;
199 tipc_max_subscriptions = 2000;
200 tipc_max_ports = delimit(CONFIG_TIPC_PORTS, 127, 65536);
201 tipc_max_zones = delimit(CONFIG_TIPC_ZONES, 1, 511);
202 tipc_max_clusters = delimit(CONFIG_TIPC_CLUSTERS, 1, 1);
203 tipc_max_nodes = delimit(CONFIG_TIPC_NODES, 8, 2047);
204 tipc_max_slaves = delimit(CONFIG_TIPC_SLAVE_NODES, 0, 2047);
205 tipc_net_id = 4711;
206
207 if ((res = tipc_core_start()))
208 err("Unable to start in single node mode\n");
209 else
210 info("Started in single node mode\n");
211 return res;
212}
213
214static void __exit tipc_exit(void)
215{
216 tipc_core_stop_net();
217 tipc_core_stop();
218 info("Deactivated\n");
219 tipc_log_stop();
220}
221
222module_init(tipc_init);
223module_exit(tipc_exit);
224
225MODULE_DESCRIPTION("TIPC: Transparent Inter Process Communication");
226MODULE_LICENSE("Dual BSD/GPL");
227
228/* Native TIPC API for kernel-space applications (see tipc.h) */
229
230EXPORT_SYMBOL(tipc_attach);
231EXPORT_SYMBOL(tipc_detach);
232EXPORT_SYMBOL(tipc_get_addr);
233EXPORT_SYMBOL(tipc_get_mode);
234EXPORT_SYMBOL(tipc_createport);
235EXPORT_SYMBOL(tipc_deleteport);
236EXPORT_SYMBOL(tipc_ownidentity);
237EXPORT_SYMBOL(tipc_portimportance);
238EXPORT_SYMBOL(tipc_set_portimportance);
239EXPORT_SYMBOL(tipc_portunreliable);
240EXPORT_SYMBOL(tipc_set_portunreliable);
241EXPORT_SYMBOL(tipc_portunreturnable);
242EXPORT_SYMBOL(tipc_set_portunreturnable);
243EXPORT_SYMBOL(tipc_publish);
244EXPORT_SYMBOL(tipc_withdraw);
245EXPORT_SYMBOL(tipc_connect2port);
246EXPORT_SYMBOL(tipc_disconnect);
247EXPORT_SYMBOL(tipc_shutdown);
248EXPORT_SYMBOL(tipc_isconnected);
249EXPORT_SYMBOL(tipc_peer);
250EXPORT_SYMBOL(tipc_ref_valid);
251EXPORT_SYMBOL(tipc_send);
252EXPORT_SYMBOL(tipc_send_buf);
253EXPORT_SYMBOL(tipc_send2name);
254EXPORT_SYMBOL(tipc_forward2name);
255EXPORT_SYMBOL(tipc_send_buf2name);
256EXPORT_SYMBOL(tipc_forward_buf2name);
257EXPORT_SYMBOL(tipc_send2port);
258EXPORT_SYMBOL(tipc_forward2port);
259EXPORT_SYMBOL(tipc_send_buf2port);
260EXPORT_SYMBOL(tipc_forward_buf2port);
261EXPORT_SYMBOL(tipc_multicast);
262/* EXPORT_SYMBOL(tipc_multicast_buf); not available yet */
263EXPORT_SYMBOL(tipc_ispublished);
264EXPORT_SYMBOL(tipc_available_nodes);
265
266/* TIPC API for external bearers (see tipc_bearer.h) */
267
268EXPORT_SYMBOL(tipc_block_bearer);
269EXPORT_SYMBOL(tipc_continue);
270EXPORT_SYMBOL(tipc_disable_bearer);
271EXPORT_SYMBOL(tipc_enable_bearer);
272EXPORT_SYMBOL(tipc_recv_msg);
273EXPORT_SYMBOL(tipc_register_media);
274
275/* TIPC API for external APIs (see tipc_port.h) */
276
277EXPORT_SYMBOL(tipc_createport_raw);
278EXPORT_SYMBOL(tipc_set_msg_option);
279EXPORT_SYMBOL(tipc_reject_msg);
280EXPORT_SYMBOL(tipc_send_buf_fast);
281EXPORT_SYMBOL(tipc_acknowledge);
282EXPORT_SYMBOL(tipc_get_port);
283EXPORT_SYMBOL(tipc_get_handle);
284
diff --git a/net/tipc/core.h b/net/tipc/core.h
new file mode 100644
index 000000000000..1f2e8b27a13f
--- /dev/null
+++ b/net/tipc/core.h
@@ -0,0 +1,321 @@
1/*
2 * net/tipc/core.h: Include file for TIPC global declarations
3 *
4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_CORE_H
38#define _TIPC_CORE_H
39
40#include <linux/tipc.h>
41#include <linux/tipc_config.h>
42#include <net/tipc/tipc_msg.h>
43#include <net/tipc/tipc_port.h>
44#include <net/tipc/tipc_bearer.h>
45#include <net/tipc/tipc.h>
46#include <linux/types.h>
47#include <linux/kernel.h>
48#include <linux/errno.h>
49#include <linux/mm.h>
50#include <linux/timer.h>
51#include <linux/string.h>
52#include <asm/uaccess.h>
53#include <linux/interrupt.h>
54#include <asm/atomic.h>
55#include <asm/hardirq.h>
56#include <linux/netdevice.h>
57#include <linux/in.h>
58#include <linux/list.h>
59#include <linux/vmalloc.h>
60
61/*
62 * TIPC debugging code
63 */
64
65#define assert(i) BUG_ON(!(i))
66
67struct tipc_msg;
68extern struct print_buf *TIPC_CONS, *TIPC_LOG;
69extern struct print_buf *TIPC_TEE(struct print_buf *, struct print_buf *);
70void tipc_msg_print(struct print_buf*,struct tipc_msg *,const char*);
71void tipc_printf(struct print_buf *, const char *fmt, ...);
72void tipc_dump(struct print_buf*,const char *fmt, ...);
73
74#ifdef CONFIG_TIPC_DEBUG
75
76/*
77 * TIPC debug support included:
78 * - system messages are printed to TIPC_OUTPUT print buffer
79 * - debug messages are printed to DBG_OUTPUT print buffer
80 */
81
82#define err(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_ERR "TIPC: " fmt, ## arg)
83#define warn(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_WARNING "TIPC: " fmt, ## arg)
84#define info(fmt, arg...) tipc_printf(TIPC_OUTPUT, KERN_NOTICE "TIPC: " fmt, ## arg)
85
86#define dbg(fmt, arg...) do {if (DBG_OUTPUT) tipc_printf(DBG_OUTPUT, fmt, ## arg);} while(0)
87#define msg_dbg(msg, txt) do {if (DBG_OUTPUT) tipc_msg_print(DBG_OUTPUT, msg, txt);} while(0)
88#define dump(fmt, arg...) do {if (DBG_OUTPUT) tipc_dump(DBG_OUTPUT, fmt, ##arg);} while(0)
89
90
91/*
92 * By default, TIPC_OUTPUT is defined to be system console and TIPC log buffer,
93 * while DBG_OUTPUT is the null print buffer. These defaults can be changed
94 * here, or on a per .c file basis, by redefining these symbols. The following
95 * print buffer options are available:
96 *
97 * NULL : Output to null print buffer (i.e. print nowhere)
98 * TIPC_CONS : Output to system console
99 * TIPC_LOG : Output to TIPC log buffer
100 * &buf : Output to user-defined buffer (struct print_buf *)
101 * TIPC_TEE(&buf_a,&buf_b) : Output to two print buffers (eg. TIPC_TEE(TIPC_CONS,TIPC_LOG) )
102 */
103
104#ifndef TIPC_OUTPUT
105#define TIPC_OUTPUT TIPC_TEE(TIPC_CONS,TIPC_LOG)
106#endif
107
108#ifndef DBG_OUTPUT
109#define DBG_OUTPUT NULL
110#endif
111
112#else
113
114#ifndef DBG_OUTPUT
115#define DBG_OUTPUT NULL
116#endif
117
118/*
119 * TIPC debug support not included:
120 * - system messages are printed to system console
121 * - debug messages are not printed
122 */
123
124#define err(fmt, arg...) printk(KERN_ERR "TIPC: " fmt , ## arg)
125#define info(fmt, arg...) printk(KERN_INFO "TIPC: " fmt , ## arg)
126#define warn(fmt, arg...) printk(KERN_WARNING "TIPC: " fmt , ## arg)
127
128#define dbg(fmt, arg...) do {} while (0)
129#define msg_dbg(msg,txt) do {} while (0)
130#define dump(fmt,arg...) do {} while (0)
131
132#endif
133
134
135/*
136 * TIPC-specific error codes
137 */
138
139#define ELINKCONG EAGAIN /* link congestion <=> resource unavailable */
140
141/*
142 * Global configuration variables
143 */
144
145extern u32 tipc_own_addr;
146extern int tipc_max_zones;
147extern int tipc_max_clusters;
148extern int tipc_max_nodes;
149extern int tipc_max_slaves;
150extern int tipc_max_ports;
151extern int tipc_max_subscriptions;
152extern int tipc_max_publications;
153extern int tipc_net_id;
154extern int tipc_remote_management;
155
156/*
157 * Other global variables
158 */
159
160extern int tipc_mode;
161extern int tipc_random;
162extern const char tipc_alphabet[];
163extern atomic_t tipc_user_count;
164
165
166/*
167 * Routines available to privileged subsystems
168 */
169
170extern int tipc_core_start(void);
171extern void tipc_core_stop(void);
172extern int tipc_core_start_net(void);
173extern void tipc_core_stop_net(void);
174
175static inline int delimit(int val, int min, int max)
176{
177 if (val > max)
178 return max;
179 if (val < min)
180 return min;
181 return val;
182}
183
184
185/*
186 * TIPC timer and signal code
187 */
188
189typedef void (*Handler) (unsigned long);
190
191u32 tipc_k_signal(Handler routine, unsigned long argument);
192
193/**
194 * k_init_timer - initialize a timer
195 * @timer: pointer to timer structure
196 * @routine: pointer to routine to invoke when timer expires
197 * @argument: value to pass to routine when timer expires
198 *
199 * Timer must be initialized before use (and terminated when no longer needed).
200 */
201
202static inline void k_init_timer(struct timer_list *timer, Handler routine,
203 unsigned long argument)
204{
205 dbg("initializing timer %p\n", timer);
206 init_timer(timer);
207 timer->function = routine;
208 timer->data = argument;
209}
210
211/**
212 * k_start_timer - start a timer
213 * @timer: pointer to timer structure
214 * @msec: time to delay (in ms)
215 *
216 * Schedules a previously initialized timer for later execution.
217 * If timer is already running, the new timeout overrides the previous request.
218 *
219 * To ensure the timer doesn't expire before the specified delay elapses,
220 * the amount of delay is rounded up when converting to the jiffies
221 * then an additional jiffy is added to account for the fact that
222 * the starting time may be in the middle of the current jiffy.
223 */
224
225static inline void k_start_timer(struct timer_list *timer, unsigned long msec)
226{
227 dbg("starting timer %p for %u\n", timer, msec);
228 mod_timer(timer, jiffies + msecs_to_jiffies(msec) + 1);
229}
230
231/**
232 * k_cancel_timer - cancel a timer
233 * @timer: pointer to timer structure
234 *
235 * Cancels a previously initialized timer.
236 * Can be called safely even if the timer is already inactive.
237 *
238 * WARNING: Must not be called when holding locks required by the timer's
239 * timeout routine, otherwise deadlock can occur on SMP systems!
240 */
241
242static inline void k_cancel_timer(struct timer_list *timer)
243{
244 dbg("cancelling timer %p\n", timer);
245 del_timer_sync(timer);
246}
247
248/**
249 * k_term_timer - terminate a timer
250 * @timer: pointer to timer structure
251 *
252 * Prevents further use of a previously initialized timer.
253 *
254 * WARNING: Caller must ensure timer isn't currently running.
255 *
256 * (Do not "enhance" this routine to automatically cancel an active timer,
257 * otherwise deadlock can arise when a timeout routine calls k_term_timer.)
258 */
259
260static inline void k_term_timer(struct timer_list *timer)
261{
262 dbg("terminating timer %p\n", timer);
263}
264
265
266/*
267 * TIPC message buffer code
268 *
269 * TIPC message buffer headroom leaves room for 14 byte Ethernet header,
270 * while ensuring TIPC header is word aligned for quicker access
271 */
272
273#define BUF_HEADROOM 16u
274
275struct tipc_skb_cb {
276 void *handle;
277};
278
279#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
280
281
282static inline struct tipc_msg *buf_msg(struct sk_buff *skb)
283{
284 return (struct tipc_msg *)skb->data;
285}
286
287/**
288 * buf_acquire - creates a TIPC message buffer
289 * @size: message size (including TIPC header)
290 *
291 * Returns a new buffer. Space is reserved for a data link header.
292 */
293
294static inline struct sk_buff *buf_acquire(u32 size)
295{
296 struct sk_buff *skb;
297 unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
298
299 skb = alloc_skb(buf_size, GFP_ATOMIC);
300 if (skb) {
301 skb_reserve(skb, BUF_HEADROOM);
302 skb_put(skb, size);
303 skb->next = NULL;
304 }
305 return skb;
306}
307
308/**
309 * buf_discard - frees a TIPC message buffer
310 * @skb: message buffer
311 *
312 * Frees a new buffer. If passed NULL, just returns.
313 */
314
315static inline void buf_discard(struct sk_buff *skb)
316{
317 if (likely(skb != NULL))
318 kfree_skb(skb);
319}
320
321#endif
diff --git a/net/tipc/dbg.c b/net/tipc/dbg.c
new file mode 100644
index 000000000000..4f4beefa7830
--- /dev/null
+++ b/net/tipc/dbg.c
@@ -0,0 +1,395 @@
1/*
2 * net/tipc/dbg.c: TIPC print buffer routines for debuggign
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40
41#define MAX_STRING 512
42
43static char print_string[MAX_STRING];
44static spinlock_t print_lock = SPIN_LOCK_UNLOCKED;
45
46static struct print_buf cons_buf = { NULL, 0, NULL, NULL };
47struct print_buf *TIPC_CONS = &cons_buf;
48
49static struct print_buf log_buf = { NULL, 0, NULL, NULL };
50struct print_buf *TIPC_LOG = &log_buf;
51
52
53#define FORMAT(PTR,LEN,FMT) \
54{\
55 va_list args;\
56 va_start(args, FMT);\
57 LEN = vsprintf(PTR, FMT, args);\
58 va_end(args);\
59 *(PTR + LEN) = '\0';\
60}
61
62/*
63 * Locking policy when using print buffers.
64 *
65 * 1) Routines of the form printbuf_XXX() rely on the caller to prevent
66 * simultaneous use of the print buffer(s) being manipulated.
67 * 2) tipc_printf() uses 'print_lock' to prevent simultaneous use of
68 * 'print_string' and to protect its print buffer(s).
69 * 3) TIPC_TEE() uses 'print_lock' to protect its print buffer(s).
70 * 4) Routines of the form log_XXX() uses 'print_lock' to protect TIPC_LOG.
71 */
72
73/**
74 * tipc_printbuf_init - initialize print buffer to empty
75 */
76
77void tipc_printbuf_init(struct print_buf *pb, char *raw, u32 sz)
78{
79 if (!pb || !raw || (sz < (MAX_STRING + 1)))
80 return;
81
82 pb->crs = pb->buf = raw;
83 pb->size = sz;
84 pb->next = 0;
85 pb->buf[0] = 0;
86 pb->buf[sz-1] = ~0;
87}
88
89/**
90 * tipc_printbuf_reset - reinitialize print buffer to empty state
91 */
92
93void tipc_printbuf_reset(struct print_buf *pb)
94{
95 if (pb && pb->buf)
96 tipc_printbuf_init(pb, pb->buf, pb->size);
97}
98
99/**
100 * tipc_printbuf_empty - test if print buffer is in empty state
101 */
102
103int tipc_printbuf_empty(struct print_buf *pb)
104{
105 return (!pb || !pb->buf || (pb->crs == pb->buf));
106}
107
108/**
109 * tipc_printbuf_validate - check for print buffer overflow
110 *
111 * Verifies that a print buffer has captured all data written to it.
112 * If data has been lost, linearize buffer and prepend an error message
113 *
114 * Returns length of print buffer data string (including trailing NULL)
115 */
116
117int tipc_printbuf_validate(struct print_buf *pb)
118{
119 char *err = " *** PRINT BUFFER WRAPPED AROUND ***\n";
120 char *cp_buf;
121 struct print_buf cb;
122
123 if (!pb || !pb->buf)
124 return 0;
125
126 if (pb->buf[pb->size - 1] == '\0') {
127 cp_buf = kmalloc(pb->size, GFP_ATOMIC);
128 if (cp_buf != NULL){
129 tipc_printbuf_init(&cb, cp_buf, pb->size);
130 tipc_printbuf_move(&cb, pb);
131 tipc_printbuf_move(pb, &cb);
132 kfree(cp_buf);
133 memcpy(pb->buf, err, strlen(err));
134 } else {
135 tipc_printbuf_reset(pb);
136 tipc_printf(pb, err);
137 }
138 }
139 return (pb->crs - pb->buf + 1);
140}
141
142/**
143 * tipc_printbuf_move - move print buffer contents to another print buffer
144 *
145 * Current contents of destination print buffer (if any) are discarded.
146 * Source print buffer becomes empty if a successful move occurs.
147 */
148
149void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from)
150{
151 int len;
152
153 /* Handle the cases where contents can't be moved */
154
155 if (!pb_to || !pb_to->buf)
156 return;
157
158 if (!pb_from || !pb_from->buf) {
159 tipc_printbuf_reset(pb_to);
160 return;
161 }
162
163 if (pb_to->size < pb_from->size) {
164 tipc_printbuf_reset(pb_to);
165 tipc_printf(pb_to, "*** PRINT BUFFER OVERFLOW ***");
166 return;
167 }
168
169 /* Copy data from char after cursor to end (if used) */
170 len = pb_from->buf + pb_from->size - pb_from->crs - 2;
171 if ((pb_from->buf[pb_from->size-1] == 0) && (len > 0)) {
172 strcpy(pb_to->buf, pb_from->crs + 1);
173 pb_to->crs = pb_to->buf + len;
174 } else
175 pb_to->crs = pb_to->buf;
176
177 /* Copy data from start to cursor (always) */
178 len = pb_from->crs - pb_from->buf;
179 strcpy(pb_to->crs, pb_from->buf);
180 pb_to->crs += len;
181
182 tipc_printbuf_reset(pb_from);
183}
184
185/**
186 * tipc_printf - append formatted output to print buffer chain
187 */
188
189void tipc_printf(struct print_buf *pb, const char *fmt, ...)
190{
191 int chars_to_add;
192 int chars_left;
193 char save_char;
194 struct print_buf *pb_next;
195
196 spin_lock_bh(&print_lock);
197 FORMAT(print_string, chars_to_add, fmt);
198 if (chars_to_add >= MAX_STRING)
199 strcpy(print_string, "*** STRING TOO LONG ***");
200
201 while (pb) {
202 if (pb == TIPC_CONS)
203 printk(print_string);
204 else if (pb->buf) {
205 chars_left = pb->buf + pb->size - pb->crs - 1;
206 if (chars_to_add <= chars_left) {
207 strcpy(pb->crs, print_string);
208 pb->crs += chars_to_add;
209 } else {
210 strcpy(pb->buf, print_string + chars_left);
211 save_char = print_string[chars_left];
212 print_string[chars_left] = 0;
213 strcpy(pb->crs, print_string);
214 print_string[chars_left] = save_char;
215 pb->crs = pb->buf + chars_to_add - chars_left;
216 }
217 }
218 pb_next = pb->next;
219 pb->next = 0;
220 pb = pb_next;
221 }
222 spin_unlock_bh(&print_lock);
223}
224
225/**
226 * TIPC_TEE - perform next output operation on both print buffers
227 */
228
229struct print_buf *TIPC_TEE(struct print_buf *b0, struct print_buf *b1)
230{
231 struct print_buf *pb = b0;
232
233 if (!b0 || (b0 == b1))
234 return b1;
235 if (!b1)
236 return b0;
237
238 spin_lock_bh(&print_lock);
239 while (pb->next) {
240 if ((pb->next == b1) || (pb->next == b0))
241 pb->next = pb->next->next;
242 else
243 pb = pb->next;
244 }
245 pb->next = b1;
246 spin_unlock_bh(&print_lock);
247 return b0;
248}
249
250/**
251 * print_to_console - write string of bytes to console in multiple chunks
252 */
253
254static void print_to_console(char *crs, int len)
255{
256 int rest = len;
257
258 while (rest > 0) {
259 int sz = rest < MAX_STRING ? rest : MAX_STRING;
260 char c = crs[sz];
261
262 crs[sz] = 0;
263 printk((const char *)crs);
264 crs[sz] = c;
265 rest -= sz;
266 crs += sz;
267 }
268}
269
270/**
271 * printbuf_dump - write print buffer contents to console
272 */
273
274static void printbuf_dump(struct print_buf *pb)
275{
276 int len;
277
278 /* Dump print buffer from char after cursor to end (if used) */
279 len = pb->buf + pb->size - pb->crs - 2;
280 if ((pb->buf[pb->size - 1] == 0) && (len > 0))
281 print_to_console(pb->crs + 1, len);
282
283 /* Dump print buffer from start to cursor (always) */
284 len = pb->crs - pb->buf;
285 print_to_console(pb->buf, len);
286}
287
288/**
289 * tipc_dump - dump non-console print buffer(s) to console
290 */
291
292void tipc_dump(struct print_buf *pb, const char *fmt, ...)
293{
294 int len;
295
296 spin_lock_bh(&print_lock);
297 FORMAT(TIPC_CONS->buf, len, fmt);
298 printk(TIPC_CONS->buf);
299
300 for (; pb; pb = pb->next) {
301 if (pb == TIPC_CONS)
302 continue;
303 printk("\n---- Start of dump,%s log ----\n\n",
304 (pb == TIPC_LOG) ? "global" : "local");
305 printbuf_dump(pb);
306 tipc_printbuf_reset(pb);
307 printk("\n-------- End of dump --------\n");
308 }
309 spin_unlock_bh(&print_lock);
310}
311
312/**
313 * tipc_log_stop - free up TIPC log print buffer
314 */
315
316void tipc_log_stop(void)
317{
318 spin_lock_bh(&print_lock);
319 if (TIPC_LOG->buf) {
320 kfree(TIPC_LOG->buf);
321 TIPC_LOG->buf = NULL;
322 }
323 spin_unlock_bh(&print_lock);
324}
325
326/**
327 * tipc_log_reinit - set TIPC log print buffer to specified size
328 */
329
330void tipc_log_reinit(int log_size)
331{
332 tipc_log_stop();
333
334 if (log_size) {
335 if (log_size <= MAX_STRING)
336 log_size = MAX_STRING + 1;
337 spin_lock_bh(&print_lock);
338 tipc_printbuf_init(TIPC_LOG, kmalloc(log_size, GFP_ATOMIC), log_size);
339 spin_unlock_bh(&print_lock);
340 }
341}
342
343/**
344 * tipc_log_resize - reconfigure size of TIPC log buffer
345 */
346
347struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space)
348{
349 u32 value;
350
351 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED))
352 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
353
354 value = *(u32 *)TLV_DATA(req_tlv_area);
355 value = ntohl(value);
356 if (value != delimit(value, 0, 32768))
357 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
358 " (log size must be 0-32768)");
359 tipc_log_reinit(value);
360 return tipc_cfg_reply_none();
361}
362
363/**
364 * tipc_log_dump - capture TIPC log buffer contents in configuration message
365 */
366
367struct sk_buff *tipc_log_dump(void)
368{
369 struct sk_buff *reply;
370
371 spin_lock_bh(&print_lock);
372 if (!TIPC_LOG->buf)
373 reply = tipc_cfg_reply_ultra_string("log not activated\n");
374 else if (tipc_printbuf_empty(TIPC_LOG))
375 reply = tipc_cfg_reply_ultra_string("log is empty\n");
376 else {
377 struct tlv_desc *rep_tlv;
378 struct print_buf pb;
379 int str_len;
380
381 str_len = min(TIPC_LOG->size, 32768u);
382 reply = tipc_cfg_reply_alloc(TLV_SPACE(str_len));
383 if (reply) {
384 rep_tlv = (struct tlv_desc *)reply->data;
385 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), str_len);
386 tipc_printbuf_move(&pb, TIPC_LOG);
387 str_len = strlen(TLV_DATA(rep_tlv)) + 1;
388 skb_put(reply, TLV_SPACE(str_len));
389 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
390 }
391 }
392 spin_unlock_bh(&print_lock);
393 return reply;
394}
395
diff --git a/net/tipc/dbg.h b/net/tipc/dbg.h
new file mode 100644
index 000000000000..227f050d2a52
--- /dev/null
+++ b/net/tipc/dbg.h
@@ -0,0 +1,59 @@
1/*
2 * net/tipc/dbg.h: Include file for TIPC print buffer routines
3 *
4 * Copyright (c) 1997-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_DBG_H
38#define _TIPC_DBG_H
39
40struct print_buf {
41 char *buf;
42 u32 size;
43 char *crs;
44 struct print_buf *next;
45};
46
47void tipc_printbuf_init(struct print_buf *pb, char *buf, u32 sz);
48void tipc_printbuf_reset(struct print_buf *pb);
49int tipc_printbuf_empty(struct print_buf *pb);
50int tipc_printbuf_validate(struct print_buf *pb);
51void tipc_printbuf_move(struct print_buf *pb_to, struct print_buf *pb_from);
52
53void tipc_log_reinit(int log_size);
54void tipc_log_stop(void);
55
56struct sk_buff *tipc_log_resize(const void *req_tlv_area, int req_tlv_space);
57struct sk_buff *tipc_log_dump(void);
58
59#endif
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
new file mode 100644
index 000000000000..53ba4630c10d
--- /dev/null
+++ b/net/tipc/discover.c
@@ -0,0 +1,318 @@
1/*
2 * net/tipc/discover.c
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "link.h"
40#include "zone.h"
41#include "discover.h"
42#include "port.h"
43#include "name_table.h"
44
45#define TIPC_LINK_REQ_INIT 125 /* min delay during bearer start up */
46#define TIPC_LINK_REQ_FAST 2000 /* normal delay if bearer has no links */
47#define TIPC_LINK_REQ_SLOW 600000 /* normal delay if bearer has links */
48
49#if 0
50#define GET_NODE_INFO 300
51#define GET_NODE_INFO_RESULT 301
52#define FORWARD_LINK_PROBE 302
53#define LINK_REQUEST_REJECTED 303
54#define LINK_REQUEST_ACCEPTED 304
55#define DROP_LINK_REQUEST 305
56#define CHECK_LINK_COUNT 306
57#endif
58
59/*
60 * TODO: Most of the inter-cluster setup stuff should be
61 * rewritten, and be made conformant with specification.
62 */
63
64
65/**
66 * struct link_req - information about an ongoing link setup request
67 * @bearer: bearer issuing requests
68 * @dest: destination address for request messages
69 * @buf: request message to be (repeatedly) sent
70 * @timer: timer governing period between requests
71 * @timer_intv: current interval between requests (in ms)
72 */
73struct link_req {
74 struct bearer *bearer;
75 struct tipc_media_addr dest;
76 struct sk_buff *buf;
77 struct timer_list timer;
78 unsigned int timer_intv;
79};
80
81
82#if 0
83int disc_create_link(const struct tipc_link_create *argv)
84{
85 /*
86 * Code for inter cluster link setup here
87 */
88 return TIPC_OK;
89}
90#endif
91
92/*
93 * disc_lost_link(): A link has lost contact
94 */
95
96void tipc_disc_link_event(u32 addr, char *name, int up)
97{
98 if (in_own_cluster(addr))
99 return;
100 /*
101 * Code for inter cluster link setup here
102 */
103}
104
105/**
106 * tipc_disc_init_msg - initialize a link setup message
107 * @type: message type (request or response)
108 * @req_links: number of links associated with message
109 * @dest_domain: network domain of node(s) which should respond to message
110 * @b_ptr: ptr to bearer issuing message
111 */
112
113struct sk_buff *tipc_disc_init_msg(u32 type,
114 u32 req_links,
115 u32 dest_domain,
116 struct bearer *b_ptr)
117{
118 struct sk_buff *buf = buf_acquire(DSC_H_SIZE);
119 struct tipc_msg *msg;
120
121 if (buf) {
122 msg = buf_msg(buf);
123 msg_init(msg, LINK_CONFIG, type, TIPC_OK, DSC_H_SIZE,
124 dest_domain);
125 msg_set_non_seq(msg);
126 msg_set_req_links(msg, req_links);
127 msg_set_dest_domain(msg, dest_domain);
128 msg_set_bc_netid(msg, tipc_net_id);
129 msg_set_media_addr(msg, &b_ptr->publ.addr);
130 }
131 return buf;
132}
133
134/**
135 * tipc_disc_recv_msg - handle incoming link setup message (request or response)
136 * @buf: buffer containing message
137 */
138
139void tipc_disc_recv_msg(struct sk_buff *buf)
140{
141 struct bearer *b_ptr = (struct bearer *)TIPC_SKB_CB(buf)->handle;
142 struct link *link;
143 struct tipc_media_addr media_addr;
144 struct tipc_msg *msg = buf_msg(buf);
145 u32 dest = msg_dest_domain(msg);
146 u32 orig = msg_prevnode(msg);
147 u32 net_id = msg_bc_netid(msg);
148 u32 type = msg_type(msg);
149
150 msg_get_media_addr(msg,&media_addr);
151 msg_dbg(msg, "RECV:");
152 buf_discard(buf);
153
154 if (net_id != tipc_net_id)
155 return;
156 if (!tipc_addr_domain_valid(dest))
157 return;
158 if (!tipc_addr_node_valid(orig))
159 return;
160 if (orig == tipc_own_addr)
161 return;
162 if (!in_scope(dest, tipc_own_addr))
163 return;
164 if (is_slave(tipc_own_addr) && is_slave(orig))
165 return;
166 if (is_slave(orig) && !in_own_cluster(orig))
167 return;
168 if (in_own_cluster(orig)) {
169 /* Always accept link here */
170 struct sk_buff *rbuf;
171 struct tipc_media_addr *addr;
172 struct node *n_ptr = tipc_node_find(orig);
173 int link_up;
174 dbg(" in own cluster\n");
175 if (n_ptr == NULL) {
176 n_ptr = tipc_node_create(orig);
177 }
178 if (n_ptr == NULL) {
179 warn("Memory squeeze; Failed to create node\n");
180 return;
181 }
182 spin_lock_bh(&n_ptr->lock);
183 link = n_ptr->links[b_ptr->identity];
184 if (!link) {
185 dbg("creating link\n");
186 link = tipc_link_create(b_ptr, orig, &media_addr);
187 if (!link) {
188 spin_unlock_bh(&n_ptr->lock);
189 return;
190 }
191 }
192 addr = &link->media_addr;
193 if (memcmp(addr, &media_addr, sizeof(*addr))) {
194 char addr_string[16];
195
196 warn("New bearer address for %s\n",
197 addr_string_fill(addr_string, orig));
198 memcpy(addr, &media_addr, sizeof(*addr));
199 tipc_link_reset(link);
200 }
201 link_up = tipc_link_is_up(link);
202 spin_unlock_bh(&n_ptr->lock);
203 if ((type == DSC_RESP_MSG) || link_up)
204 return;
205 rbuf = tipc_disc_init_msg(DSC_RESP_MSG, 1, orig, b_ptr);
206 if (rbuf != NULL) {
207 msg_dbg(buf_msg(rbuf),"SEND:");
208 b_ptr->media->send_msg(rbuf, &b_ptr->publ, &media_addr);
209 buf_discard(rbuf);
210 }
211 }
212}
213
214/**
215 * tipc_disc_stop_link_req - stop sending periodic link setup requests
216 * @req: ptr to link request structure
217 */
218
219void tipc_disc_stop_link_req(struct link_req *req)
220{
221 if (!req)
222 return;
223
224 k_cancel_timer(&req->timer);
225 k_term_timer(&req->timer);
226 buf_discard(req->buf);
227 kfree(req);
228}
229
230/**
231 * tipc_disc_update_link_req - update frequency of periodic link setup requests
232 * @req: ptr to link request structure
233 */
234
235void tipc_disc_update_link_req(struct link_req *req)
236{
237 if (!req)
238 return;
239
240 if (req->timer_intv == TIPC_LINK_REQ_SLOW) {
241 if (!req->bearer->nodes.count) {
242 req->timer_intv = TIPC_LINK_REQ_FAST;
243 k_start_timer(&req->timer, req->timer_intv);
244 }
245 } else if (req->timer_intv == TIPC_LINK_REQ_FAST) {
246 if (req->bearer->nodes.count) {
247 req->timer_intv = TIPC_LINK_REQ_SLOW;
248 k_start_timer(&req->timer, req->timer_intv);
249 }
250 } else {
251 /* leave timer "as is" if haven't yet reached a "normal" rate */
252 }
253}
254
255/**
256 * disc_timeout - send a periodic link setup request
257 * @req: ptr to link request structure
258 *
259 * Called whenever a link setup request timer associated with a bearer expires.
260 */
261
262static void disc_timeout(struct link_req *req)
263{
264 spin_lock_bh(&req->bearer->publ.lock);
265
266 req->bearer->media->send_msg(req->buf, &req->bearer->publ, &req->dest);
267
268 if ((req->timer_intv == TIPC_LINK_REQ_SLOW) ||
269 (req->timer_intv == TIPC_LINK_REQ_FAST)) {
270 /* leave timer interval "as is" if already at a "normal" rate */
271 } else {
272 req->timer_intv *= 2;
273 if (req->timer_intv > TIPC_LINK_REQ_SLOW)
274 req->timer_intv = TIPC_LINK_REQ_SLOW;
275 if ((req->timer_intv == TIPC_LINK_REQ_FAST) &&
276 (req->bearer->nodes.count))
277 req->timer_intv = TIPC_LINK_REQ_SLOW;
278 }
279 k_start_timer(&req->timer, req->timer_intv);
280
281 spin_unlock_bh(&req->bearer->publ.lock);
282}
283
284/**
285 * tipc_disc_init_link_req - start sending periodic link setup requests
286 * @b_ptr: ptr to bearer issuing requests
287 * @dest: destination address for request messages
288 * @dest_domain: network domain of node(s) which should respond to message
289 * @req_links: max number of desired links
290 *
291 * Returns pointer to link request structure, or NULL if unable to create.
292 */
293
294struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
295 const struct tipc_media_addr *dest,
296 u32 dest_domain,
297 u32 req_links)
298{
299 struct link_req *req;
300
301 req = (struct link_req *)kmalloc(sizeof(*req), GFP_ATOMIC);
302 if (!req)
303 return NULL;
304
305 req->buf = tipc_disc_init_msg(DSC_REQ_MSG, req_links, dest_domain, b_ptr);
306 if (!req->buf) {
307 kfree(req);
308 return NULL;
309 }
310
311 memcpy(&req->dest, dest, sizeof(*dest));
312 req->bearer = b_ptr;
313 req->timer_intv = TIPC_LINK_REQ_INIT;
314 k_init_timer(&req->timer, (Handler)disc_timeout, (unsigned long)req);
315 k_start_timer(&req->timer, req->timer_intv);
316 return req;
317}
318
diff --git a/net/tipc/discover.h b/net/tipc/discover.h
new file mode 100644
index 000000000000..0454fd1ae7f3
--- /dev/null
+++ b/net/tipc/discover.h
@@ -0,0 +1,58 @@
1/*
2 * net/tipc/discover.h
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_DISCOVER_H
38#define _TIPC_DISCOVER_H
39
40#include "core.h"
41
42struct link_req;
43
44struct link_req *tipc_disc_init_link_req(struct bearer *b_ptr,
45 const struct tipc_media_addr *dest,
46 u32 dest_domain,
47 u32 req_links);
48void tipc_disc_update_link_req(struct link_req *req);
49void tipc_disc_stop_link_req(struct link_req *req);
50
51void tipc_disc_recv_msg(struct sk_buff *buf);
52
53void tipc_disc_link_event(u32 addr, char *name, int up);
54#if 0
55int disc_create_link(const struct tipc_link_create *argv);
56#endif
57
58#endif
diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c
new file mode 100644
index 000000000000..1f8d83b9c8b4
--- /dev/null
+++ b/net/tipc/eth_media.c
@@ -0,0 +1,297 @@
1/*
2 * net/tipc/eth_media.c: Ethernet bearer support for TIPC
3 *
4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <net/tipc/tipc.h>
38#include <net/tipc/tipc_bearer.h>
39#include <net/tipc/tipc_msg.h>
40#include <linux/netdevice.h>
41
42#define MAX_ETH_BEARERS 2
43#define ETH_LINK_PRIORITY TIPC_DEF_LINK_PRI
44#define ETH_LINK_TOLERANCE TIPC_DEF_LINK_TOL
45#define ETH_LINK_WINDOW TIPC_DEF_LINK_WIN
46
47/**
48 * struct eth_bearer - Ethernet bearer data structure
49 * @bearer: ptr to associated "generic" bearer structure
50 * @dev: ptr to associated Ethernet network device
51 * @tipc_packet_type: used in binding TIPC to Ethernet driver
52 */
53
54struct eth_bearer {
55 struct tipc_bearer *bearer;
56 struct net_device *dev;
57 struct packet_type tipc_packet_type;
58};
59
60static struct eth_bearer eth_bearers[MAX_ETH_BEARERS];
61static int eth_started = 0;
62static struct notifier_block notifier;
63
64/**
65 * send_msg - send a TIPC message out over an Ethernet interface
66 */
67
68static int send_msg(struct sk_buff *buf, struct tipc_bearer *tb_ptr,
69 struct tipc_media_addr *dest)
70{
71 struct sk_buff *clone;
72 struct net_device *dev;
73
74 clone = skb_clone(buf, GFP_ATOMIC);
75 if (clone) {
76 clone->nh.raw = clone->data;
77 dev = ((struct eth_bearer *)(tb_ptr->usr_handle))->dev;
78 clone->dev = dev;
79 dev->hard_header(clone, dev, ETH_P_TIPC,
80 &dest->dev_addr.eth_addr,
81 dev->dev_addr, clone->len);
82 dev_queue_xmit(clone);
83 }
84 return TIPC_OK;
85}
86
87/**
88 * recv_msg - handle incoming TIPC message from an Ethernet interface
89 *
90 * Routine truncates any Ethernet padding/CRC appended to the message,
91 * and ensures message size matches actual length
92 */
93
94static int recv_msg(struct sk_buff *buf, struct net_device *dev,
95 struct packet_type *pt, struct net_device *orig_dev)
96{
97 struct eth_bearer *eb_ptr = (struct eth_bearer *)pt->af_packet_priv;
98 u32 size;
99
100 if (likely(eb_ptr->bearer)) {
101 size = msg_size((struct tipc_msg *)buf->data);
102 skb_trim(buf, size);
103 if (likely(buf->len == size)) {
104 buf->next = NULL;
105 tipc_recv_msg(buf, eb_ptr->bearer);
106 } else {
107 kfree_skb(buf);
108 }
109 } else {
110 kfree_skb(buf);
111 }
112 return TIPC_OK;
113}
114
115/**
116 * enable_bearer - attach TIPC bearer to an Ethernet interface
117 */
118
119static int enable_bearer(struct tipc_bearer *tb_ptr)
120{
121 struct net_device *dev = dev_base;
122 struct eth_bearer *eb_ptr = &eth_bearers[0];
123 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
124 char *driver_name = strchr((const char *)tb_ptr->name, ':') + 1;
125
126 /* Find device with specified name */
127
128 while (dev && dev->name &&
129 (memcmp(dev->name, driver_name, strlen(dev->name)))) {
130 dev = dev->next;
131 }
132 if (!dev)
133 return -ENODEV;
134
135 /* Find Ethernet bearer for device (or create one) */
136
137 for (;(eb_ptr != stop) && eb_ptr->dev && (eb_ptr->dev != dev); eb_ptr++);
138 if (eb_ptr == stop)
139 return -EDQUOT;
140 if (!eb_ptr->dev) {
141 eb_ptr->dev = dev;
142 eb_ptr->tipc_packet_type.type = __constant_htons(ETH_P_TIPC);
143 eb_ptr->tipc_packet_type.dev = dev;
144 eb_ptr->tipc_packet_type.func = recv_msg;
145 eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr;
146 INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list));
147 dev_hold(dev);
148 dev_add_pack(&eb_ptr->tipc_packet_type);
149 }
150
151 /* Associate TIPC bearer with Ethernet bearer */
152
153 eb_ptr->bearer = tb_ptr;
154 tb_ptr->usr_handle = (void *)eb_ptr;
155 tb_ptr->mtu = dev->mtu;
156 tb_ptr->blocked = 0;
157 tb_ptr->addr.type = htonl(TIPC_MEDIA_TYPE_ETH);
158 memcpy(&tb_ptr->addr.dev_addr, &dev->dev_addr, ETH_ALEN);
159 return 0;
160}
161
162/**
163 * disable_bearer - detach TIPC bearer from an Ethernet interface
164 *
165 * We really should do dev_remove_pack() here, but this function can not be
166 * called at tasklet level. => Use eth_bearer->bearer as a flag to throw away
167 * incoming buffers, & postpone dev_remove_pack() to eth_media_stop() on exit.
168 */
169
170static void disable_bearer(struct tipc_bearer *tb_ptr)
171{
172 ((struct eth_bearer *)tb_ptr->usr_handle)->bearer = 0;
173}
174
175/**
176 * recv_notification - handle device updates from OS
177 *
178 * Change the state of the Ethernet bearer (if any) associated with the
179 * specified device.
180 */
181
182static int recv_notification(struct notifier_block *nb, unsigned long evt,
183 void *dv)
184{
185 struct net_device *dev = (struct net_device *)dv;
186 struct eth_bearer *eb_ptr = &eth_bearers[0];
187 struct eth_bearer *stop = &eth_bearers[MAX_ETH_BEARERS];
188
189 while ((eb_ptr->dev != dev)) {
190 if (++eb_ptr == stop)
191 return NOTIFY_DONE; /* couldn't find device */
192 }
193 if (!eb_ptr->bearer)
194 return NOTIFY_DONE; /* bearer had been disabled */
195
196 eb_ptr->bearer->mtu = dev->mtu;
197
198 switch (evt) {
199 case NETDEV_CHANGE:
200 if (netif_carrier_ok(dev))
201 tipc_continue(eb_ptr->bearer);
202 else
203 tipc_block_bearer(eb_ptr->bearer->name);
204 break;
205 case NETDEV_UP:
206 tipc_continue(eb_ptr->bearer);
207 break;
208 case NETDEV_DOWN:
209 tipc_block_bearer(eb_ptr->bearer->name);
210 break;
211 case NETDEV_CHANGEMTU:
212 case NETDEV_CHANGEADDR:
213 tipc_block_bearer(eb_ptr->bearer->name);
214 tipc_continue(eb_ptr->bearer);
215 break;
216 case NETDEV_UNREGISTER:
217 case NETDEV_CHANGENAME:
218 tipc_disable_bearer(eb_ptr->bearer->name);
219 break;
220 }
221 return NOTIFY_OK;
222}
223
224/**
225 * eth_addr2str - convert Ethernet address to string
226 */
227
228static char *eth_addr2str(struct tipc_media_addr *a, char *str_buf, int str_size)
229{
230 unchar *addr = (unchar *)&a->dev_addr;
231
232 if (str_size < 18)
233 *str_buf = '\0';
234 else
235 sprintf(str_buf, "%02x:%02x:%02x:%02x:%02x:%02x",
236 addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
237 return str_buf;
238}
239
240/**
241 * tipc_eth_media_start - activate Ethernet bearer support
242 *
243 * Register Ethernet media type with TIPC bearer code. Also register
244 * with OS for notifications about device state changes.
245 */
246
247int tipc_eth_media_start(void)
248{
249 struct tipc_media_addr bcast_addr;
250 int res;
251
252 if (eth_started)
253 return -EINVAL;
254
255 memset(&bcast_addr, 0xff, sizeof(bcast_addr));
256 memset(eth_bearers, 0, sizeof(eth_bearers));
257
258 res = tipc_register_media(TIPC_MEDIA_TYPE_ETH, "eth",
259 enable_bearer, disable_bearer, send_msg,
260 eth_addr2str, &bcast_addr, ETH_LINK_PRIORITY,
261 ETH_LINK_TOLERANCE, ETH_LINK_WINDOW);
262 if (res)
263 return res;
264
265 notifier.notifier_call = &recv_notification;
266 notifier.priority = 0;
267 res = register_netdevice_notifier(&notifier);
268 if (!res)
269 eth_started = 1;
270 return res;
271}
272
273/**
274 * tipc_eth_media_stop - deactivate Ethernet bearer support
275 */
276
277void tipc_eth_media_stop(void)
278{
279 int i;
280
281 if (!eth_started)
282 return;
283
284 unregister_netdevice_notifier(&notifier);
285 for (i = 0; i < MAX_ETH_BEARERS ; i++) {
286 if (eth_bearers[i].bearer) {
287 eth_bearers[i].bearer->blocked = 1;
288 eth_bearers[i].bearer = 0;
289 }
290 if (eth_bearers[i].dev) {
291 dev_remove_pack(&eth_bearers[i].tipc_packet_type);
292 dev_put(eth_bearers[i].dev);
293 }
294 }
295 memset(&eth_bearers, 0, sizeof(eth_bearers));
296 eth_started = 0;
297}
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
new file mode 100644
index 000000000000..966f70a1b608
--- /dev/null
+++ b/net/tipc/handler.c
@@ -0,0 +1,132 @@
1/*
2 * net/tipc/handler.c: TIPC signal handling
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38
39struct queue_item {
40 struct list_head next_signal;
41 void (*handler) (unsigned long);
42 unsigned long data;
43};
44
45static kmem_cache_t *tipc_queue_item_cache;
46static struct list_head signal_queue_head;
47static spinlock_t qitem_lock = SPIN_LOCK_UNLOCKED;
48static int handler_enabled = 0;
49
50static void process_signal_queue(unsigned long dummy);
51
52static DECLARE_TASKLET_DISABLED(tipc_tasklet, process_signal_queue, 0);
53
54
55unsigned int tipc_k_signal(Handler routine, unsigned long argument)
56{
57 struct queue_item *item;
58
59 if (!handler_enabled) {
60 err("Signal request ignored by handler\n");
61 return -ENOPROTOOPT;
62 }
63
64 spin_lock_bh(&qitem_lock);
65 item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
66 if (!item) {
67 err("Signal queue out of memory\n");
68 spin_unlock_bh(&qitem_lock);
69 return -ENOMEM;
70 }
71 item->handler = routine;
72 item->data = argument;
73 list_add_tail(&item->next_signal, &signal_queue_head);
74 spin_unlock_bh(&qitem_lock);
75 tasklet_schedule(&tipc_tasklet);
76 return 0;
77}
78
79static void process_signal_queue(unsigned long dummy)
80{
81 struct queue_item *__volatile__ item;
82 struct list_head *l, *n;
83
84 spin_lock_bh(&qitem_lock);
85 list_for_each_safe(l, n, &signal_queue_head) {
86 item = list_entry(l, struct queue_item, next_signal);
87 list_del(&item->next_signal);
88 spin_unlock_bh(&qitem_lock);
89 item->handler(item->data);
90 spin_lock_bh(&qitem_lock);
91 kmem_cache_free(tipc_queue_item_cache, item);
92 }
93 spin_unlock_bh(&qitem_lock);
94}
95
96int tipc_handler_start(void)
97{
98 tipc_queue_item_cache =
99 kmem_cache_create("tipc_queue_items", sizeof(struct queue_item),
100 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
101 if (!tipc_queue_item_cache)
102 return -ENOMEM;
103
104 INIT_LIST_HEAD(&signal_queue_head);
105 tasklet_enable(&tipc_tasklet);
106 handler_enabled = 1;
107 return 0;
108}
109
110void tipc_handler_stop(void)
111{
112 struct list_head *l, *n;
113 struct queue_item *item;
114
115 if (!handler_enabled)
116 return;
117
118 handler_enabled = 0;
119 tasklet_disable(&tipc_tasklet);
120 tasklet_kill(&tipc_tasklet);
121
122 spin_lock_bh(&qitem_lock);
123 list_for_each_safe(l, n, &signal_queue_head) {
124 item = list_entry(l, struct queue_item, next_signal);
125 list_del(&item->next_signal);
126 kmem_cache_free(tipc_queue_item_cache, item);
127 }
128 spin_unlock_bh(&qitem_lock);
129
130 kmem_cache_destroy(tipc_queue_item_cache);
131}
132
diff --git a/net/tipc/link.c b/net/tipc/link.c
new file mode 100644
index 000000000000..511872afa459
--- /dev/null
+++ b/net/tipc/link.c
@@ -0,0 +1,3166 @@
1/*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "link.h"
40#include "net.h"
41#include "node.h"
42#include "port.h"
43#include "addr.h"
44#include "node_subscr.h"
45#include "name_distr.h"
46#include "bearer.h"
47#include "name_table.h"
48#include "discover.h"
49#include "config.h"
50#include "bcast.h"
51
52
53/*
54 * Limit for deferred reception queue:
55 */
56
57#define DEF_QUEUE_LIMIT 256u
58
59/*
60 * Link state events:
61 */
62
63#define STARTING_EVT 856384768 /* link processing trigger */
64#define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
65#define TIMEOUT_EVT 560817u /* link timer expired */
66
67/*
68 * The following two 'message types' is really just implementation
69 * data conveniently stored in the message header.
70 * They must not be considered part of the protocol
71 */
72#define OPEN_MSG 0
73#define CLOSED_MSG 1
74
75/*
76 * State value stored in 'exp_msg_count'
77 */
78
79#define START_CHANGEOVER 100000u
80
81/**
82 * struct link_name - deconstructed link name
83 * @addr_local: network address of node at this end
84 * @if_local: name of interface at this end
85 * @addr_peer: network address of node at far end
86 * @if_peer: name of interface at far end
87 */
88
89struct link_name {
90 u32 addr_local;
91 char if_local[TIPC_MAX_IF_NAME];
92 u32 addr_peer;
93 char if_peer[TIPC_MAX_IF_NAME];
94};
95
96#if 0
97
98/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
99
100/**
101 * struct link_event - link up/down event notification
102 */
103
104struct link_event {
105 u32 addr;
106 int up;
107 void (*fcn)(u32, char *, int);
108 char name[TIPC_MAX_LINK_NAME];
109};
110
111#endif
112
113static void link_handle_out_of_seq_msg(struct link *l_ptr,
114 struct sk_buff *buf);
115static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf);
116static int link_recv_changeover_msg(struct link **l_ptr, struct sk_buff **buf);
117static void link_set_supervision_props(struct link *l_ptr, u32 tolerance);
118static int link_send_sections_long(struct port *sender,
119 struct iovec const *msg_sect,
120 u32 num_sect, u32 destnode);
121static void link_check_defragm_bufs(struct link *l_ptr);
122static void link_state_event(struct link *l_ptr, u32 event);
123static void link_reset_statistics(struct link *l_ptr);
124static void link_print(struct link *l_ptr, struct print_buf *buf,
125 const char *str);
126
127/*
128 * Debugging code used by link routines only
129 *
130 * When debugging link problems on a system that has multiple links,
131 * the standard TIPC debugging routines may not be useful since they
132 * allow the output from multiple links to be intermixed. For this reason
133 * routines of the form "dbg_link_XXX()" have been created that will capture
134 * debug info into a link's personal print buffer, which can then be dumped
135 * into the TIPC system log (LOG) upon request.
136 *
137 * To enable per-link debugging, use LINK_LOG_BUF_SIZE to specify the size
138 * of the print buffer used by each link. If LINK_LOG_BUF_SIZE is set to 0,
139 * the dbg_link_XXX() routines simply send their output to the standard
140 * debug print buffer (DBG_OUTPUT), if it has been defined; this can be useful
141 * when there is only a single link in the system being debugged.
142 *
143 * Notes:
144 * - When enabled, LINK_LOG_BUF_SIZE should be set to at least 1000 (bytes)
145 * - "l_ptr" must be valid when using dbg_link_XXX() macros
146 */
147
148#define LINK_LOG_BUF_SIZE 0
149
150#define dbg_link(fmt, arg...) do {if (LINK_LOG_BUF_SIZE) tipc_printf(&l_ptr->print_buf, fmt, ## arg); } while(0)
151#define dbg_link_msg(msg, txt) do {if (LINK_LOG_BUF_SIZE) tipc_msg_print(&l_ptr->print_buf, msg, txt); } while(0)
152#define dbg_link_state(txt) do {if (LINK_LOG_BUF_SIZE) link_print(l_ptr, &l_ptr->print_buf, txt); } while(0)
153#define dbg_link_dump() do { \
154 if (LINK_LOG_BUF_SIZE) { \
155 tipc_printf(LOG, "\n\nDumping link <%s>:\n", l_ptr->name); \
156 tipc_printbuf_move(LOG, &l_ptr->print_buf); \
157 } \
158} while (0)
159
160static inline void dbg_print_link(struct link *l_ptr, const char *str)
161{
162 if (DBG_OUTPUT)
163 link_print(l_ptr, DBG_OUTPUT, str);
164}
165
166static inline void dbg_print_buf_chain(struct sk_buff *root_buf)
167{
168 if (DBG_OUTPUT) {
169 struct sk_buff *buf = root_buf;
170
171 while (buf) {
172 msg_dbg(buf_msg(buf), "In chain: ");
173 buf = buf->next;
174 }
175 }
176}
177
178/*
179 * Simple inlined link routines
180 */
181
182static inline unsigned int align(unsigned int i)
183{
184 return (i + 3) & ~3u;
185}
186
187static inline int link_working_working(struct link *l_ptr)
188{
189 return (l_ptr->state == WORKING_WORKING);
190}
191
192static inline int link_working_unknown(struct link *l_ptr)
193{
194 return (l_ptr->state == WORKING_UNKNOWN);
195}
196
197static inline int link_reset_unknown(struct link *l_ptr)
198{
199 return (l_ptr->state == RESET_UNKNOWN);
200}
201
202static inline int link_reset_reset(struct link *l_ptr)
203{
204 return (l_ptr->state == RESET_RESET);
205}
206
207static inline int link_blocked(struct link *l_ptr)
208{
209 return (l_ptr->exp_msg_count || l_ptr->blocked);
210}
211
212static inline int link_congested(struct link *l_ptr)
213{
214 return (l_ptr->out_queue_size >= l_ptr->queue_limit[0]);
215}
216
217static inline u32 link_max_pkt(struct link *l_ptr)
218{
219 return l_ptr->max_pkt;
220}
221
222static inline void link_init_max_pkt(struct link *l_ptr)
223{
224 u32 max_pkt;
225
226 max_pkt = (l_ptr->b_ptr->publ.mtu & ~3);
227 if (max_pkt > MAX_MSG_SIZE)
228 max_pkt = MAX_MSG_SIZE;
229
230 l_ptr->max_pkt_target = max_pkt;
231 if (l_ptr->max_pkt_target < MAX_PKT_DEFAULT)
232 l_ptr->max_pkt = l_ptr->max_pkt_target;
233 else
234 l_ptr->max_pkt = MAX_PKT_DEFAULT;
235
236 l_ptr->max_pkt_probes = 0;
237}
238
239static inline u32 link_next_sent(struct link *l_ptr)
240{
241 if (l_ptr->next_out)
242 return msg_seqno(buf_msg(l_ptr->next_out));
243 return mod(l_ptr->next_out_no);
244}
245
246static inline u32 link_last_sent(struct link *l_ptr)
247{
248 return mod(link_next_sent(l_ptr) - 1);
249}
250
251/*
252 * Simple non-inlined link routines (i.e. referenced outside this file)
253 */
254
255int tipc_link_is_up(struct link *l_ptr)
256{
257 if (!l_ptr)
258 return 0;
259 return (link_working_working(l_ptr) || link_working_unknown(l_ptr));
260}
261
262int tipc_link_is_active(struct link *l_ptr)
263{
264 return ((l_ptr->owner->active_links[0] == l_ptr) ||
265 (l_ptr->owner->active_links[1] == l_ptr));
266}
267
268/**
269 * link_name_validate - validate & (optionally) deconstruct link name
270 * @name - ptr to link name string
271 * @name_parts - ptr to area for link name components (or NULL if not needed)
272 *
273 * Returns 1 if link name is valid, otherwise 0.
274 */
275
276static int link_name_validate(const char *name, struct link_name *name_parts)
277{
278 char name_copy[TIPC_MAX_LINK_NAME];
279 char *addr_local;
280 char *if_local;
281 char *addr_peer;
282 char *if_peer;
283 char dummy;
284 u32 z_local, c_local, n_local;
285 u32 z_peer, c_peer, n_peer;
286 u32 if_local_len;
287 u32 if_peer_len;
288
289 /* copy link name & ensure length is OK */
290
291 name_copy[TIPC_MAX_LINK_NAME - 1] = 0;
292 /* need above in case non-Posix strncpy() doesn't pad with nulls */
293 strncpy(name_copy, name, TIPC_MAX_LINK_NAME);
294 if (name_copy[TIPC_MAX_LINK_NAME - 1] != 0)
295 return 0;
296
297 /* ensure all component parts of link name are present */
298
299 addr_local = name_copy;
300 if ((if_local = strchr(addr_local, ':')) == NULL)
301 return 0;
302 *(if_local++) = 0;
303 if ((addr_peer = strchr(if_local, '-')) == NULL)
304 return 0;
305 *(addr_peer++) = 0;
306 if_local_len = addr_peer - if_local;
307 if ((if_peer = strchr(addr_peer, ':')) == NULL)
308 return 0;
309 *(if_peer++) = 0;
310 if_peer_len = strlen(if_peer) + 1;
311
312 /* validate component parts of link name */
313
314 if ((sscanf(addr_local, "%u.%u.%u%c",
315 &z_local, &c_local, &n_local, &dummy) != 3) ||
316 (sscanf(addr_peer, "%u.%u.%u%c",
317 &z_peer, &c_peer, &n_peer, &dummy) != 3) ||
318 (z_local > 255) || (c_local > 4095) || (n_local > 4095) ||
319 (z_peer > 255) || (c_peer > 4095) || (n_peer > 4095) ||
320 (if_local_len <= 1) || (if_local_len > TIPC_MAX_IF_NAME) ||
321 (if_peer_len <= 1) || (if_peer_len > TIPC_MAX_IF_NAME) ||
322 (strspn(if_local, tipc_alphabet) != (if_local_len - 1)) ||
323 (strspn(if_peer, tipc_alphabet) != (if_peer_len - 1)))
324 return 0;
325
326 /* return link name components, if necessary */
327
328 if (name_parts) {
329 name_parts->addr_local = tipc_addr(z_local, c_local, n_local);
330 strcpy(name_parts->if_local, if_local);
331 name_parts->addr_peer = tipc_addr(z_peer, c_peer, n_peer);
332 strcpy(name_parts->if_peer, if_peer);
333 }
334 return 1;
335}
336
337/**
338 * link_timeout - handle expiration of link timer
339 * @l_ptr: pointer to link
340 *
341 * This routine must not grab "tipc_net_lock" to avoid a potential deadlock conflict
342 * with tipc_link_delete(). (There is no risk that the node will be deleted by
343 * another thread because tipc_link_delete() always cancels the link timer before
344 * tipc_node_delete() is called.)
345 */
346
347static void link_timeout(struct link *l_ptr)
348{
349 tipc_node_lock(l_ptr->owner);
350
351 /* update counters used in statistical profiling of send traffic */
352
353 l_ptr->stats.accu_queue_sz += l_ptr->out_queue_size;
354 l_ptr->stats.queue_sz_counts++;
355
356 if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
357 l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
358
359 if (l_ptr->first_out) {
360 struct tipc_msg *msg = buf_msg(l_ptr->first_out);
361 u32 length = msg_size(msg);
362
363 if ((msg_user(msg) == MSG_FRAGMENTER)
364 && (msg_type(msg) == FIRST_FRAGMENT)) {
365 length = msg_size(msg_get_wrapped(msg));
366 }
367 if (length) {
368 l_ptr->stats.msg_lengths_total += length;
369 l_ptr->stats.msg_length_counts++;
370 if (length <= 64)
371 l_ptr->stats.msg_length_profile[0]++;
372 else if (length <= 256)
373 l_ptr->stats.msg_length_profile[1]++;
374 else if (length <= 1024)
375 l_ptr->stats.msg_length_profile[2]++;
376 else if (length <= 4096)
377 l_ptr->stats.msg_length_profile[3]++;
378 else if (length <= 16384)
379 l_ptr->stats.msg_length_profile[4]++;
380 else if (length <= 32768)
381 l_ptr->stats.msg_length_profile[5]++;
382 else
383 l_ptr->stats.msg_length_profile[6]++;
384 }
385 }
386
387 /* do all other link processing performed on a periodic basis */
388
389 link_check_defragm_bufs(l_ptr);
390
391 link_state_event(l_ptr, TIMEOUT_EVT);
392
393 if (l_ptr->next_out)
394 tipc_link_push_queue(l_ptr);
395
396 tipc_node_unlock(l_ptr->owner);
397}
398
399static inline void link_set_timer(struct link *l_ptr, u32 time)
400{
401 k_start_timer(&l_ptr->timer, time);
402}
403
404/**
405 * tipc_link_create - create a new link
406 * @b_ptr: pointer to associated bearer
407 * @peer: network address of node at other end of link
408 * @media_addr: media address to use when sending messages over link
409 *
410 * Returns pointer to link.
411 */
412
413struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
414 const struct tipc_media_addr *media_addr)
415{
416 struct link *l_ptr;
417 struct tipc_msg *msg;
418 char *if_name;
419
420 l_ptr = (struct link *)kmalloc(sizeof(*l_ptr), GFP_ATOMIC);
421 if (!l_ptr) {
422 warn("Memory squeeze; Failed to create link\n");
423 return NULL;
424 }
425 memset(l_ptr, 0, sizeof(*l_ptr));
426
427 l_ptr->addr = peer;
428 if_name = strchr(b_ptr->publ.name, ':') + 1;
429 sprintf(l_ptr->name, "%u.%u.%u:%s-%u.%u.%u:",
430 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
431 tipc_node(tipc_own_addr),
432 if_name,
433 tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
434 /* note: peer i/f is appended to link name by reset/activate */
435 memcpy(&l_ptr->media_addr, media_addr, sizeof(*media_addr));
436 k_init_timer(&l_ptr->timer, (Handler)link_timeout, (unsigned long)l_ptr);
437 list_add_tail(&l_ptr->link_list, &b_ptr->links);
438 l_ptr->checkpoint = 1;
439 l_ptr->b_ptr = b_ptr;
440 link_set_supervision_props(l_ptr, b_ptr->media->tolerance);
441 l_ptr->state = RESET_UNKNOWN;
442
443 l_ptr->pmsg = (struct tipc_msg *)&l_ptr->proto_msg;
444 msg = l_ptr->pmsg;
445 msg_init(msg, LINK_PROTOCOL, RESET_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
446 msg_set_size(msg, sizeof(l_ptr->proto_msg));
447 msg_set_session(msg, tipc_random);
448 msg_set_bearer_id(msg, b_ptr->identity);
449 strcpy((char *)msg_data(msg), if_name);
450
451 l_ptr->priority = b_ptr->priority;
452 tipc_link_set_queue_limits(l_ptr, b_ptr->media->window);
453
454 link_init_max_pkt(l_ptr);
455
456 l_ptr->next_out_no = 1;
457 INIT_LIST_HEAD(&l_ptr->waiting_ports);
458
459 link_reset_statistics(l_ptr);
460
461 l_ptr->owner = tipc_node_attach_link(l_ptr);
462 if (!l_ptr->owner) {
463 kfree(l_ptr);
464 return NULL;
465 }
466
467 if (LINK_LOG_BUF_SIZE) {
468 char *pb = kmalloc(LINK_LOG_BUF_SIZE, GFP_ATOMIC);
469
470 if (!pb) {
471 kfree(l_ptr);
472 warn("Memory squeeze; Failed to create link\n");
473 return NULL;
474 }
475 tipc_printbuf_init(&l_ptr->print_buf, pb, LINK_LOG_BUF_SIZE);
476 }
477
478 tipc_k_signal((Handler)tipc_link_start, (unsigned long)l_ptr);
479
480 dbg("tipc_link_create(): tolerance = %u,cont intv = %u, abort_limit = %u\n",
481 l_ptr->tolerance, l_ptr->continuity_interval, l_ptr->abort_limit);
482
483 return l_ptr;
484}
485
486/**
487 * tipc_link_delete - delete a link
488 * @l_ptr: pointer to link
489 *
490 * Note: 'tipc_net_lock' is write_locked, bearer is locked.
491 * This routine must not grab the node lock until after link timer cancellation
492 * to avoid a potential deadlock situation.
493 */
494
495void tipc_link_delete(struct link *l_ptr)
496{
497 if (!l_ptr) {
498 err("Attempt to delete non-existent link\n");
499 return;
500 }
501
502 dbg("tipc_link_delete()\n");
503
504 k_cancel_timer(&l_ptr->timer);
505
506 tipc_node_lock(l_ptr->owner);
507 tipc_link_reset(l_ptr);
508 tipc_node_detach_link(l_ptr->owner, l_ptr);
509 tipc_link_stop(l_ptr);
510 list_del_init(&l_ptr->link_list);
511 if (LINK_LOG_BUF_SIZE)
512 kfree(l_ptr->print_buf.buf);
513 tipc_node_unlock(l_ptr->owner);
514 k_term_timer(&l_ptr->timer);
515 kfree(l_ptr);
516}
517
518void tipc_link_start(struct link *l_ptr)
519{
520 dbg("tipc_link_start %x\n", l_ptr);
521 link_state_event(l_ptr, STARTING_EVT);
522}
523
524/**
525 * link_schedule_port - schedule port for deferred sending
526 * @l_ptr: pointer to link
527 * @origport: reference to sending port
528 * @sz: amount of data to be sent
529 *
530 * Schedules port for renewed sending of messages after link congestion
531 * has abated.
532 */
533
534static int link_schedule_port(struct link *l_ptr, u32 origport, u32 sz)
535{
536 struct port *p_ptr;
537
538 spin_lock_bh(&tipc_port_list_lock);
539 p_ptr = tipc_port_lock(origport);
540 if (p_ptr) {
541 if (!p_ptr->wakeup)
542 goto exit;
543 if (!list_empty(&p_ptr->wait_list))
544 goto exit;
545 p_ptr->congested_link = l_ptr;
546 p_ptr->publ.congested = 1;
547 p_ptr->waiting_pkts = 1 + ((sz - 1) / link_max_pkt(l_ptr));
548 list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
549 l_ptr->stats.link_congs++;
550exit:
551 tipc_port_unlock(p_ptr);
552 }
553 spin_unlock_bh(&tipc_port_list_lock);
554 return -ELINKCONG;
555}
556
557void tipc_link_wakeup_ports(struct link *l_ptr, int all)
558{
559 struct port *p_ptr;
560 struct port *temp_p_ptr;
561 int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
562
563 if (all)
564 win = 100000;
565 if (win <= 0)
566 return;
567 if (!spin_trylock_bh(&tipc_port_list_lock))
568 return;
569 if (link_congested(l_ptr))
570 goto exit;
571 list_for_each_entry_safe(p_ptr, temp_p_ptr, &l_ptr->waiting_ports,
572 wait_list) {
573 if (win <= 0)
574 break;
575 list_del_init(&p_ptr->wait_list);
576 p_ptr->congested_link = 0;
577 assert(p_ptr->wakeup);
578 spin_lock_bh(p_ptr->publ.lock);
579 p_ptr->publ.congested = 0;
580 p_ptr->wakeup(&p_ptr->publ);
581 win -= p_ptr->waiting_pkts;
582 spin_unlock_bh(p_ptr->publ.lock);
583 }
584
585exit:
586 spin_unlock_bh(&tipc_port_list_lock);
587}
588
589/**
590 * link_release_outqueue - purge link's outbound message queue
591 * @l_ptr: pointer to link
592 */
593
594static void link_release_outqueue(struct link *l_ptr)
595{
596 struct sk_buff *buf = l_ptr->first_out;
597 struct sk_buff *next;
598
599 while (buf) {
600 next = buf->next;
601 buf_discard(buf);
602 buf = next;
603 }
604 l_ptr->first_out = NULL;
605 l_ptr->out_queue_size = 0;
606}
607
608/**
609 * tipc_link_reset_fragments - purge link's inbound message fragments queue
610 * @l_ptr: pointer to link
611 */
612
613void tipc_link_reset_fragments(struct link *l_ptr)
614{
615 struct sk_buff *buf = l_ptr->defragm_buf;
616 struct sk_buff *next;
617
618 while (buf) {
619 next = buf->next;
620 buf_discard(buf);
621 buf = next;
622 }
623 l_ptr->defragm_buf = NULL;
624}
625
626/**
627 * tipc_link_stop - purge all inbound and outbound messages associated with link
628 * @l_ptr: pointer to link
629 */
630
631void tipc_link_stop(struct link *l_ptr)
632{
633 struct sk_buff *buf;
634 struct sk_buff *next;
635
636 buf = l_ptr->oldest_deferred_in;
637 while (buf) {
638 next = buf->next;
639 buf_discard(buf);
640 buf = next;
641 }
642
643 buf = l_ptr->first_out;
644 while (buf) {
645 next = buf->next;
646 buf_discard(buf);
647 buf = next;
648 }
649
650 tipc_link_reset_fragments(l_ptr);
651
652 buf_discard(l_ptr->proto_msg_queue);
653 l_ptr->proto_msg_queue = NULL;
654}
655
656#if 0
657
658/* LINK EVENT CODE IS NOT SUPPORTED AT PRESENT */
659
660static void link_recv_event(struct link_event *ev)
661{
662 ev->fcn(ev->addr, ev->name, ev->up);
663 kfree(ev);
664}
665
666static void link_send_event(void (*fcn)(u32 a, char *n, int up),
667 struct link *l_ptr, int up)
668{
669 struct link_event *ev;
670
671 ev = kmalloc(sizeof(*ev), GFP_ATOMIC);
672 if (!ev) {
673 warn("Link event allocation failure\n");
674 return;
675 }
676 ev->addr = l_ptr->addr;
677 ev->up = up;
678 ev->fcn = fcn;
679 memcpy(ev->name, l_ptr->name, TIPC_MAX_LINK_NAME);
680 tipc_k_signal((Handler)link_recv_event, (unsigned long)ev);
681}
682
683#else
684
685#define link_send_event(fcn, l_ptr, up) do { } while (0)
686
687#endif
688
689void tipc_link_reset(struct link *l_ptr)
690{
691 struct sk_buff *buf;
692 u32 prev_state = l_ptr->state;
693 u32 checkpoint = l_ptr->next_in_no;
694
695 msg_set_session(l_ptr->pmsg, msg_session(l_ptr->pmsg) + 1);
696
697 /* Link is down, accept any session: */
698 l_ptr->peer_session = 0;
699
700 /* Prepare for max packet size negotiation */
701 link_init_max_pkt(l_ptr);
702
703 l_ptr->state = RESET_UNKNOWN;
704 dbg_link_state("Resetting Link\n");
705
706 if ((prev_state == RESET_UNKNOWN) || (prev_state == RESET_RESET))
707 return;
708
709 tipc_node_link_down(l_ptr->owner, l_ptr);
710 tipc_bearer_remove_dest(l_ptr->b_ptr, l_ptr->addr);
711#if 0
712 tipc_printf(TIPC_CONS, "\nReset link <%s>\n", l_ptr->name);
713 dbg_link_dump();
714#endif
715 if (tipc_node_has_active_links(l_ptr->owner) &&
716 l_ptr->owner->permit_changeover) {
717 l_ptr->reset_checkpoint = checkpoint;
718 l_ptr->exp_msg_count = START_CHANGEOVER;
719 }
720
721 /* Clean up all queues: */
722
723 link_release_outqueue(l_ptr);
724 buf_discard(l_ptr->proto_msg_queue);
725 l_ptr->proto_msg_queue = NULL;
726 buf = l_ptr->oldest_deferred_in;
727 while (buf) {
728 struct sk_buff *next = buf->next;
729 buf_discard(buf);
730 buf = next;
731 }
732 if (!list_empty(&l_ptr->waiting_ports))
733 tipc_link_wakeup_ports(l_ptr, 1);
734
735 l_ptr->retransm_queue_head = 0;
736 l_ptr->retransm_queue_size = 0;
737 l_ptr->last_out = NULL;
738 l_ptr->first_out = NULL;
739 l_ptr->next_out = NULL;
740 l_ptr->unacked_window = 0;
741 l_ptr->checkpoint = 1;
742 l_ptr->next_out_no = 1;
743 l_ptr->deferred_inqueue_sz = 0;
744 l_ptr->oldest_deferred_in = NULL;
745 l_ptr->newest_deferred_in = NULL;
746 l_ptr->fsm_msg_cnt = 0;
747 l_ptr->stale_count = 0;
748 link_reset_statistics(l_ptr);
749
750 link_send_event(tipc_cfg_link_event, l_ptr, 0);
751 if (!in_own_cluster(l_ptr->addr))
752 link_send_event(tipc_disc_link_event, l_ptr, 0);
753}
754
755
756static void link_activate(struct link *l_ptr)
757{
758 l_ptr->next_in_no = 1;
759 tipc_node_link_up(l_ptr->owner, l_ptr);
760 tipc_bearer_add_dest(l_ptr->b_ptr, l_ptr->addr);
761 link_send_event(tipc_cfg_link_event, l_ptr, 1);
762 if (!in_own_cluster(l_ptr->addr))
763 link_send_event(tipc_disc_link_event, l_ptr, 1);
764}
765
766/**
767 * link_state_event - link finite state machine
768 * @l_ptr: pointer to link
769 * @event: state machine event to process
770 */
771
772static void link_state_event(struct link *l_ptr, unsigned event)
773{
774 struct link *other;
775 u32 cont_intv = l_ptr->continuity_interval;
776
777 if (!l_ptr->started && (event != STARTING_EVT))
778 return; /* Not yet. */
779
780 if (link_blocked(l_ptr)) {
781 if (event == TIMEOUT_EVT) {
782 link_set_timer(l_ptr, cont_intv);
783 }
784 return; /* Changeover going on */
785 }
786 dbg_link("STATE_EV: <%s> ", l_ptr->name);
787
788 switch (l_ptr->state) {
789 case WORKING_WORKING:
790 dbg_link("WW/");
791 switch (event) {
792 case TRAFFIC_MSG_EVT:
793 dbg_link("TRF-");
794 /* fall through */
795 case ACTIVATE_MSG:
796 dbg_link("ACT\n");
797 break;
798 case TIMEOUT_EVT:
799 dbg_link("TIM ");
800 if (l_ptr->next_in_no != l_ptr->checkpoint) {
801 l_ptr->checkpoint = l_ptr->next_in_no;
802 if (tipc_bclink_acks_missing(l_ptr->owner)) {
803 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
804 0, 0, 0, 0, 0);
805 l_ptr->fsm_msg_cnt++;
806 } else if (l_ptr->max_pkt < l_ptr->max_pkt_target) {
807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
808 1, 0, 0, 0, 0);
809 l_ptr->fsm_msg_cnt++;
810 }
811 link_set_timer(l_ptr, cont_intv);
812 break;
813 }
814 dbg_link(" -> WU\n");
815 l_ptr->state = WORKING_UNKNOWN;
816 l_ptr->fsm_msg_cnt = 0;
817 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
818 l_ptr->fsm_msg_cnt++;
819 link_set_timer(l_ptr, cont_intv / 4);
820 break;
821 case RESET_MSG:
822 dbg_link("RES -> RR\n");
823 tipc_link_reset(l_ptr);
824 l_ptr->state = RESET_RESET;
825 l_ptr->fsm_msg_cnt = 0;
826 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
827 l_ptr->fsm_msg_cnt++;
828 link_set_timer(l_ptr, cont_intv);
829 break;
830 default:
831 err("Unknown link event %u in WW state\n", event);
832 }
833 break;
834 case WORKING_UNKNOWN:
835 dbg_link("WU/");
836 switch (event) {
837 case TRAFFIC_MSG_EVT:
838 dbg_link("TRF-");
839 case ACTIVATE_MSG:
840 dbg_link("ACT -> WW\n");
841 l_ptr->state = WORKING_WORKING;
842 l_ptr->fsm_msg_cnt = 0;
843 link_set_timer(l_ptr, cont_intv);
844 break;
845 case RESET_MSG:
846 dbg_link("RES -> RR\n");
847 tipc_link_reset(l_ptr);
848 l_ptr->state = RESET_RESET;
849 l_ptr->fsm_msg_cnt = 0;
850 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
851 l_ptr->fsm_msg_cnt++;
852 link_set_timer(l_ptr, cont_intv);
853 break;
854 case TIMEOUT_EVT:
855 dbg_link("TIM ");
856 if (l_ptr->next_in_no != l_ptr->checkpoint) {
857 dbg_link("-> WW \n");
858 l_ptr->state = WORKING_WORKING;
859 l_ptr->fsm_msg_cnt = 0;
860 l_ptr->checkpoint = l_ptr->next_in_no;
861 if (tipc_bclink_acks_missing(l_ptr->owner)) {
862 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
863 0, 0, 0, 0, 0);
864 l_ptr->fsm_msg_cnt++;
865 }
866 link_set_timer(l_ptr, cont_intv);
867 } else if (l_ptr->fsm_msg_cnt < l_ptr->abort_limit) {
868 dbg_link("Probing %u/%u,timer = %u ms)\n",
869 l_ptr->fsm_msg_cnt, l_ptr->abort_limit,
870 cont_intv / 4);
871 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
872 1, 0, 0, 0, 0);
873 l_ptr->fsm_msg_cnt++;
874 link_set_timer(l_ptr, cont_intv / 4);
875 } else { /* Link has failed */
876 dbg_link("-> RU (%u probes unanswered)\n",
877 l_ptr->fsm_msg_cnt);
878 tipc_link_reset(l_ptr);
879 l_ptr->state = RESET_UNKNOWN;
880 l_ptr->fsm_msg_cnt = 0;
881 tipc_link_send_proto_msg(l_ptr, RESET_MSG,
882 0, 0, 0, 0, 0);
883 l_ptr->fsm_msg_cnt++;
884 link_set_timer(l_ptr, cont_intv);
885 }
886 break;
887 default:
888 err("Unknown link event %u in WU state\n", event);
889 }
890 break;
891 case RESET_UNKNOWN:
892 dbg_link("RU/");
893 switch (event) {
894 case TRAFFIC_MSG_EVT:
895 dbg_link("TRF-\n");
896 break;
897 case ACTIVATE_MSG:
898 other = l_ptr->owner->active_links[0];
899 if (other && link_working_unknown(other)) {
900 dbg_link("ACT\n");
901 break;
902 }
903 dbg_link("ACT -> WW\n");
904 l_ptr->state = WORKING_WORKING;
905 l_ptr->fsm_msg_cnt = 0;
906 link_activate(l_ptr);
907 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
908 l_ptr->fsm_msg_cnt++;
909 link_set_timer(l_ptr, cont_intv);
910 break;
911 case RESET_MSG:
912 dbg_link("RES \n");
913 dbg_link(" -> RR\n");
914 l_ptr->state = RESET_RESET;
915 l_ptr->fsm_msg_cnt = 0;
916 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 1, 0, 0, 0, 0);
917 l_ptr->fsm_msg_cnt++;
918 link_set_timer(l_ptr, cont_intv);
919 break;
920 case STARTING_EVT:
921 dbg_link("START-");
922 l_ptr->started = 1;
923 /* fall through */
924 case TIMEOUT_EVT:
925 dbg_link("TIM \n");
926 tipc_link_send_proto_msg(l_ptr, RESET_MSG, 0, 0, 0, 0, 0);
927 l_ptr->fsm_msg_cnt++;
928 link_set_timer(l_ptr, cont_intv);
929 break;
930 default:
931 err("Unknown link event %u in RU state\n", event);
932 }
933 break;
934 case RESET_RESET:
935 dbg_link("RR/ ");
936 switch (event) {
937 case TRAFFIC_MSG_EVT:
938 dbg_link("TRF-");
939 /* fall through */
940 case ACTIVATE_MSG:
941 other = l_ptr->owner->active_links[0];
942 if (other && link_working_unknown(other)) {
943 dbg_link("ACT\n");
944 break;
945 }
946 dbg_link("ACT -> WW\n");
947 l_ptr->state = WORKING_WORKING;
948 l_ptr->fsm_msg_cnt = 0;
949 link_activate(l_ptr);
950 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 1, 0, 0, 0, 0);
951 l_ptr->fsm_msg_cnt++;
952 link_set_timer(l_ptr, cont_intv);
953 break;
954 case RESET_MSG:
955 dbg_link("RES\n");
956 break;
957 case TIMEOUT_EVT:
958 dbg_link("TIM\n");
959 tipc_link_send_proto_msg(l_ptr, ACTIVATE_MSG, 0, 0, 0, 0, 0);
960 l_ptr->fsm_msg_cnt++;
961 link_set_timer(l_ptr, cont_intv);
962 dbg_link("fsm_msg_cnt %u\n", l_ptr->fsm_msg_cnt);
963 break;
964 default:
965 err("Unknown link event %u in RR state\n", event);
966 }
967 break;
968 default:
969 err("Unknown link state %u/%u\n", l_ptr->state, event);
970 }
971}
972
973/*
974 * link_bundle_buf(): Append contents of a buffer to
975 * the tail of an existing one.
976 */
977
978static int link_bundle_buf(struct link *l_ptr,
979 struct sk_buff *bundler,
980 struct sk_buff *buf)
981{
982 struct tipc_msg *bundler_msg = buf_msg(bundler);
983 struct tipc_msg *msg = buf_msg(buf);
984 u32 size = msg_size(msg);
985 u32 to_pos = align(msg_size(bundler_msg));
986 u32 rest = link_max_pkt(l_ptr) - to_pos;
987
988 if (msg_user(bundler_msg) != MSG_BUNDLER)
989 return 0;
990 if (msg_type(bundler_msg) != OPEN_MSG)
991 return 0;
992 if (rest < align(size))
993 return 0;
994
995 skb_put(bundler, (to_pos - msg_size(bundler_msg)) + size);
996 memcpy(bundler->data + to_pos, buf->data, size);
997 msg_set_size(bundler_msg, to_pos + size);
998 msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
999 dbg("Packed msg # %u(%u octets) into pos %u in buf(#%u)\n",
1000 msg_msgcnt(bundler_msg), size, to_pos, msg_seqno(bundler_msg));
1001 msg_dbg(msg, "PACKD:");
1002 buf_discard(buf);
1003 l_ptr->stats.sent_bundled++;
1004 return 1;
1005}
1006
1007static inline void link_add_to_outqueue(struct link *l_ptr,
1008 struct sk_buff *buf,
1009 struct tipc_msg *msg)
1010{
1011 u32 ack = mod(l_ptr->next_in_no - 1);
1012 u32 seqno = mod(l_ptr->next_out_no++);
1013
1014 msg_set_word(msg, 2, ((ack << 16) | seqno));
1015 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1016 buf->next = NULL;
1017 if (l_ptr->first_out) {
1018 l_ptr->last_out->next = buf;
1019 l_ptr->last_out = buf;
1020 } else
1021 l_ptr->first_out = l_ptr->last_out = buf;
1022 l_ptr->out_queue_size++;
1023}
1024
1025/*
1026 * tipc_link_send_buf() is the 'full path' for messages, called from
1027 * inside TIPC when the 'fast path' in tipc_send_buf
1028 * has failed, and from link_send()
1029 */
1030
1031int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf)
1032{
1033 struct tipc_msg *msg = buf_msg(buf);
1034 u32 size = msg_size(msg);
1035 u32 dsz = msg_data_sz(msg);
1036 u32 queue_size = l_ptr->out_queue_size;
1037 u32 imp = msg_tot_importance(msg);
1038 u32 queue_limit = l_ptr->queue_limit[imp];
1039 u32 max_packet = link_max_pkt(l_ptr);
1040
1041 msg_set_prevnode(msg, tipc_own_addr); /* If routed message */
1042
1043 /* Match msg importance against queue limits: */
1044
1045 if (unlikely(queue_size >= queue_limit)) {
1046 if (imp <= TIPC_CRITICAL_IMPORTANCE) {
1047 return link_schedule_port(l_ptr, msg_origport(msg),
1048 size);
1049 }
1050 msg_dbg(msg, "TIPC: Congestion, throwing away\n");
1051 buf_discard(buf);
1052 if (imp > CONN_MANAGER) {
1053 warn("Resetting <%s>, send queue full", l_ptr->name);
1054 tipc_link_reset(l_ptr);
1055 }
1056 return dsz;
1057 }
1058
1059 /* Fragmentation needed ? */
1060
1061 if (size > max_packet)
1062 return tipc_link_send_long_buf(l_ptr, buf);
1063
1064 /* Packet can be queued or sent: */
1065
1066 if (queue_size > l_ptr->stats.max_queue_sz)
1067 l_ptr->stats.max_queue_sz = queue_size;
1068
1069 if (likely(!tipc_bearer_congested(l_ptr->b_ptr, l_ptr) &&
1070 !link_congested(l_ptr))) {
1071 link_add_to_outqueue(l_ptr, buf, msg);
1072
1073 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr))) {
1074 l_ptr->unacked_window = 0;
1075 } else {
1076 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1077 l_ptr->stats.bearer_congs++;
1078 l_ptr->next_out = buf;
1079 }
1080 return dsz;
1081 }
1082 /* Congestion: can message be bundled ?: */
1083
1084 if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
1085 (msg_user(msg) != MSG_FRAGMENTER)) {
1086
1087 /* Try adding message to an existing bundle */
1088
1089 if (l_ptr->next_out &&
1090 link_bundle_buf(l_ptr, l_ptr->last_out, buf)) {
1091 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1092 return dsz;
1093 }
1094
1095 /* Try creating a new bundle */
1096
1097 if (size <= max_packet * 2 / 3) {
1098 struct sk_buff *bundler = buf_acquire(max_packet);
1099 struct tipc_msg bundler_hdr;
1100
1101 if (bundler) {
1102 msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
1103 TIPC_OK, INT_H_SIZE, l_ptr->addr);
1104 memcpy(bundler->data, (unchar *)&bundler_hdr,
1105 INT_H_SIZE);
1106 skb_trim(bundler, INT_H_SIZE);
1107 link_bundle_buf(l_ptr, bundler, buf);
1108 buf = bundler;
1109 msg = buf_msg(buf);
1110 l_ptr->stats.sent_bundles++;
1111 }
1112 }
1113 }
1114 if (!l_ptr->next_out)
1115 l_ptr->next_out = buf;
1116 link_add_to_outqueue(l_ptr, buf, msg);
1117 tipc_bearer_resolve_congestion(l_ptr->b_ptr, l_ptr);
1118 return dsz;
1119}
1120
1121/*
1122 * tipc_link_send(): same as tipc_link_send_buf(), but the link to use has
1123 * not been selected yet, and the the owner node is not locked
1124 * Called by TIPC internal users, e.g. the name distributor
1125 */
1126
1127int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector)
1128{
1129 struct link *l_ptr;
1130 struct node *n_ptr;
1131 int res = -ELINKCONG;
1132
1133 read_lock_bh(&tipc_net_lock);
1134 n_ptr = tipc_node_select(dest, selector);
1135 if (n_ptr) {
1136 tipc_node_lock(n_ptr);
1137 l_ptr = n_ptr->active_links[selector & 1];
1138 dbg("tipc_link_send: found link %x for dest %x\n", l_ptr, dest);
1139 if (l_ptr) {
1140 res = tipc_link_send_buf(l_ptr, buf);
1141 }
1142 tipc_node_unlock(n_ptr);
1143 } else {
1144 dbg("Attempt to send msg to unknown node:\n");
1145 msg_dbg(buf_msg(buf),">>>");
1146 buf_discard(buf);
1147 }
1148 read_unlock_bh(&tipc_net_lock);
1149 return res;
1150}
1151
1152/*
1153 * link_send_buf_fast: Entry for data messages where the
1154 * destination link is known and the header is complete,
1155 * inclusive total message length. Very time critical.
1156 * Link is locked. Returns user data length.
1157 */
1158
1159static inline int link_send_buf_fast(struct link *l_ptr, struct sk_buff *buf,
1160 u32 *used_max_pkt)
1161{
1162 struct tipc_msg *msg = buf_msg(buf);
1163 int res = msg_data_sz(msg);
1164
1165 if (likely(!link_congested(l_ptr))) {
1166 if (likely(msg_size(msg) <= link_max_pkt(l_ptr))) {
1167 if (likely(list_empty(&l_ptr->b_ptr->cong_links))) {
1168 link_add_to_outqueue(l_ptr, buf, msg);
1169 if (likely(tipc_bearer_send(l_ptr->b_ptr, buf,
1170 &l_ptr->media_addr))) {
1171 l_ptr->unacked_window = 0;
1172 msg_dbg(msg,"SENT_FAST:");
1173 return res;
1174 }
1175 dbg("failed sent fast...\n");
1176 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1177 l_ptr->stats.bearer_congs++;
1178 l_ptr->next_out = buf;
1179 return res;
1180 }
1181 }
1182 else
1183 *used_max_pkt = link_max_pkt(l_ptr);
1184 }
1185 return tipc_link_send_buf(l_ptr, buf); /* All other cases */
1186}
1187
1188/*
1189 * tipc_send_buf_fast: Entry for data messages where the
1190 * destination node is known and the header is complete,
1191 * inclusive total message length.
1192 * Returns user data length.
1193 */
1194int tipc_send_buf_fast(struct sk_buff *buf, u32 destnode)
1195{
1196 struct link *l_ptr;
1197 struct node *n_ptr;
1198 int res;
1199 u32 selector = msg_origport(buf_msg(buf)) & 1;
1200 u32 dummy;
1201
1202 if (destnode == tipc_own_addr)
1203 return tipc_port_recv_msg(buf);
1204
1205 read_lock_bh(&tipc_net_lock);
1206 n_ptr = tipc_node_select(destnode, selector);
1207 if (likely(n_ptr)) {
1208 tipc_node_lock(n_ptr);
1209 l_ptr = n_ptr->active_links[selector];
1210 dbg("send_fast: buf %x selected %x, destnode = %x\n",
1211 buf, l_ptr, destnode);
1212 if (likely(l_ptr)) {
1213 res = link_send_buf_fast(l_ptr, buf, &dummy);
1214 tipc_node_unlock(n_ptr);
1215 read_unlock_bh(&tipc_net_lock);
1216 return res;
1217 }
1218 tipc_node_unlock(n_ptr);
1219 }
1220 read_unlock_bh(&tipc_net_lock);
1221 res = msg_data_sz(buf_msg(buf));
1222 tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1223 return res;
1224}
1225
1226
1227/*
1228 * tipc_link_send_sections_fast: Entry for messages where the
1229 * destination processor is known and the header is complete,
1230 * except for total message length.
1231 * Returns user data length or errno.
1232 */
1233int tipc_link_send_sections_fast(struct port *sender,
1234 struct iovec const *msg_sect,
1235 const u32 num_sect,
1236 u32 destaddr)
1237{
1238 struct tipc_msg *hdr = &sender->publ.phdr;
1239 struct link *l_ptr;
1240 struct sk_buff *buf;
1241 struct node *node;
1242 int res;
1243 u32 selector = msg_origport(hdr) & 1;
1244
1245 assert(destaddr != tipc_own_addr);
1246
1247again:
1248 /*
1249 * Try building message using port's max_pkt hint.
1250 * (Must not hold any locks while building message.)
1251 */
1252
1253 res = msg_build(hdr, msg_sect, num_sect, sender->max_pkt,
1254 !sender->user_port, &buf);
1255
1256 read_lock_bh(&tipc_net_lock);
1257 node = tipc_node_select(destaddr, selector);
1258 if (likely(node)) {
1259 tipc_node_lock(node);
1260 l_ptr = node->active_links[selector];
1261 if (likely(l_ptr)) {
1262 if (likely(buf)) {
1263 res = link_send_buf_fast(l_ptr, buf,
1264 &sender->max_pkt);
1265 if (unlikely(res < 0))
1266 buf_discard(buf);
1267exit:
1268 tipc_node_unlock(node);
1269 read_unlock_bh(&tipc_net_lock);
1270 return res;
1271 }
1272
1273 /* Exit if build request was invalid */
1274
1275 if (unlikely(res < 0))
1276 goto exit;
1277
1278 /* Exit if link (or bearer) is congested */
1279
1280 if (link_congested(l_ptr) ||
1281 !list_empty(&l_ptr->b_ptr->cong_links)) {
1282 res = link_schedule_port(l_ptr,
1283 sender->publ.ref, res);
1284 goto exit;
1285 }
1286
1287 /*
1288 * Message size exceeds max_pkt hint; update hint,
1289 * then re-try fast path or fragment the message
1290 */
1291
1292 sender->max_pkt = link_max_pkt(l_ptr);
1293 tipc_node_unlock(node);
1294 read_unlock_bh(&tipc_net_lock);
1295
1296
1297 if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
1298 goto again;
1299
1300 return link_send_sections_long(sender, msg_sect,
1301 num_sect, destaddr);
1302 }
1303 tipc_node_unlock(node);
1304 }
1305 read_unlock_bh(&tipc_net_lock);
1306
1307 /* Couldn't find a link to the destination node */
1308
1309 if (buf)
1310 return tipc_reject_msg(buf, TIPC_ERR_NO_NODE);
1311 if (res >= 0)
1312 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1313 TIPC_ERR_NO_NODE);
1314 return res;
1315}
1316
1317/*
1318 * link_send_sections_long(): Entry for long messages where the
1319 * destination node is known and the header is complete,
1320 * inclusive total message length.
1321 * Link and bearer congestion status have been checked to be ok,
1322 * and are ignored if they change.
1323 *
1324 * Note that fragments do not use the full link MTU so that they won't have
1325 * to undergo refragmentation if link changeover causes them to be sent
1326 * over another link with an additional tunnel header added as prefix.
1327 * (Refragmentation will still occur if the other link has a smaller MTU.)
1328 *
1329 * Returns user data length or errno.
1330 */
1331static int link_send_sections_long(struct port *sender,
1332 struct iovec const *msg_sect,
1333 u32 num_sect,
1334 u32 destaddr)
1335{
1336 struct link *l_ptr;
1337 struct node *node;
1338 struct tipc_msg *hdr = &sender->publ.phdr;
1339 u32 dsz = msg_data_sz(hdr);
1340 u32 max_pkt,fragm_sz,rest;
1341 struct tipc_msg fragm_hdr;
1342 struct sk_buff *buf,*buf_chain,*prev;
1343 u32 fragm_crs,fragm_rest,hsz,sect_rest;
1344 const unchar *sect_crs;
1345 int curr_sect;
1346 u32 fragm_no;
1347
1348again:
1349 fragm_no = 1;
1350 max_pkt = sender->max_pkt - INT_H_SIZE;
1351 /* leave room for tunnel header in case of link changeover */
1352 fragm_sz = max_pkt - INT_H_SIZE;
1353 /* leave room for fragmentation header in each fragment */
1354 rest = dsz;
1355 fragm_crs = 0;
1356 fragm_rest = 0;
1357 sect_rest = 0;
1358 sect_crs = 0;
1359 curr_sect = -1;
1360
1361 /* Prepare reusable fragment header: */
1362
1363 msg_dbg(hdr, ">FRAGMENTING>");
1364 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
1365 TIPC_OK, INT_H_SIZE, msg_destnode(hdr));
1366 msg_set_link_selector(&fragm_hdr, sender->publ.ref);
1367 msg_set_size(&fragm_hdr, max_pkt);
1368 msg_set_fragm_no(&fragm_hdr, 1);
1369
1370 /* Prepare header of first fragment: */
1371
1372 buf_chain = buf = buf_acquire(max_pkt);
1373 if (!buf)
1374 return -ENOMEM;
1375 buf->next = NULL;
1376 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1377 hsz = msg_hdr_sz(hdr);
1378 memcpy(buf->data + INT_H_SIZE, (unchar *)hdr, hsz);
1379 msg_dbg(buf_msg(buf), ">BUILD>");
1380
1381 /* Chop up message: */
1382
1383 fragm_crs = INT_H_SIZE + hsz;
1384 fragm_rest = fragm_sz - hsz;
1385
1386 do { /* For all sections */
1387 u32 sz;
1388
1389 if (!sect_rest) {
1390 sect_rest = msg_sect[++curr_sect].iov_len;
1391 sect_crs = (const unchar *)msg_sect[curr_sect].iov_base;
1392 }
1393
1394 if (sect_rest < fragm_rest)
1395 sz = sect_rest;
1396 else
1397 sz = fragm_rest;
1398
1399 if (likely(!sender->user_port)) {
1400 if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
1401error:
1402 for (; buf_chain; buf_chain = buf) {
1403 buf = buf_chain->next;
1404 buf_discard(buf_chain);
1405 }
1406 return -EFAULT;
1407 }
1408 } else
1409 memcpy(buf->data + fragm_crs, sect_crs, sz);
1410
1411 sect_crs += sz;
1412 sect_rest -= sz;
1413 fragm_crs += sz;
1414 fragm_rest -= sz;
1415 rest -= sz;
1416
1417 if (!fragm_rest && rest) {
1418
1419 /* Initiate new fragment: */
1420 if (rest <= fragm_sz) {
1421 fragm_sz = rest;
1422 msg_set_type(&fragm_hdr,LAST_FRAGMENT);
1423 } else {
1424 msg_set_type(&fragm_hdr, FRAGMENT);
1425 }
1426 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
1427 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
1428 prev = buf;
1429 buf = buf_acquire(fragm_sz + INT_H_SIZE);
1430 if (!buf)
1431 goto error;
1432
1433 buf->next = NULL;
1434 prev->next = buf;
1435 memcpy(buf->data, (unchar *)&fragm_hdr, INT_H_SIZE);
1436 fragm_crs = INT_H_SIZE;
1437 fragm_rest = fragm_sz;
1438 msg_dbg(buf_msg(buf)," >BUILD>");
1439 }
1440 }
1441 while (rest > 0);
1442
1443 /*
1444 * Now we have a buffer chain. Select a link and check
1445 * that packet size is still OK
1446 */
1447 node = tipc_node_select(destaddr, sender->publ.ref & 1);
1448 if (likely(node)) {
1449 tipc_node_lock(node);
1450 l_ptr = node->active_links[sender->publ.ref & 1];
1451 if (!l_ptr) {
1452 tipc_node_unlock(node);
1453 goto reject;
1454 }
1455 if (link_max_pkt(l_ptr) < max_pkt) {
1456 sender->max_pkt = link_max_pkt(l_ptr);
1457 tipc_node_unlock(node);
1458 for (; buf_chain; buf_chain = buf) {
1459 buf = buf_chain->next;
1460 buf_discard(buf_chain);
1461 }
1462 goto again;
1463 }
1464 } else {
1465reject:
1466 for (; buf_chain; buf_chain = buf) {
1467 buf = buf_chain->next;
1468 buf_discard(buf_chain);
1469 }
1470 return tipc_port_reject_sections(sender, hdr, msg_sect, num_sect,
1471 TIPC_ERR_NO_NODE);
1472 }
1473
1474 /* Append whole chain to send queue: */
1475
1476 buf = buf_chain;
1477 l_ptr->long_msg_seq_no = mod(l_ptr->long_msg_seq_no + 1);
1478 if (!l_ptr->next_out)
1479 l_ptr->next_out = buf_chain;
1480 l_ptr->stats.sent_fragmented++;
1481 while (buf) {
1482 struct sk_buff *next = buf->next;
1483 struct tipc_msg *msg = buf_msg(buf);
1484
1485 l_ptr->stats.sent_fragments++;
1486 msg_set_long_msgno(msg, l_ptr->long_msg_seq_no);
1487 link_add_to_outqueue(l_ptr, buf, msg);
1488 msg_dbg(msg, ">ADD>");
1489 buf = next;
1490 }
1491
1492 /* Send it, if possible: */
1493
1494 tipc_link_push_queue(l_ptr);
1495 tipc_node_unlock(node);
1496 return dsz;
1497}
1498
1499/*
1500 * tipc_link_push_packet: Push one unsent packet to the media
1501 */
1502u32 tipc_link_push_packet(struct link *l_ptr)
1503{
1504 struct sk_buff *buf = l_ptr->first_out;
1505 u32 r_q_size = l_ptr->retransm_queue_size;
1506 u32 r_q_head = l_ptr->retransm_queue_head;
1507
1508 /* Step to position where retransmission failed, if any, */
1509 /* consider that buffers may have been released in meantime */
1510
1511 if (r_q_size && buf) {
1512 u32 last = lesser(mod(r_q_head + r_q_size),
1513 link_last_sent(l_ptr));
1514 u32 first = msg_seqno(buf_msg(buf));
1515
1516 while (buf && less(first, r_q_head)) {
1517 first = mod(first + 1);
1518 buf = buf->next;
1519 }
1520 l_ptr->retransm_queue_head = r_q_head = first;
1521 l_ptr->retransm_queue_size = r_q_size = mod(last - first);
1522 }
1523
1524 /* Continue retransmission now, if there is anything: */
1525
1526 if (r_q_size && buf && !skb_cloned(buf)) {
1527 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1528 msg_set_bcast_ack(buf_msg(buf), l_ptr->owner->bclink.last_in);
1529 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1530 msg_dbg(buf_msg(buf), ">DEF-RETR>");
1531 l_ptr->retransm_queue_head = mod(++r_q_head);
1532 l_ptr->retransm_queue_size = --r_q_size;
1533 l_ptr->stats.retransmitted++;
1534 return TIPC_OK;
1535 } else {
1536 l_ptr->stats.bearer_congs++;
1537 msg_dbg(buf_msg(buf), "|>DEF-RETR>");
1538 return PUSH_FAILED;
1539 }
1540 }
1541
1542 /* Send deferred protocol message, if any: */
1543
1544 buf = l_ptr->proto_msg_queue;
1545 if (buf) {
1546 msg_set_ack(buf_msg(buf), mod(l_ptr->next_in_no - 1));
1547 msg_set_bcast_ack(buf_msg(buf),l_ptr->owner->bclink.last_in);
1548 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1549 msg_dbg(buf_msg(buf), ">DEF-PROT>");
1550 l_ptr->unacked_window = 0;
1551 buf_discard(buf);
1552 l_ptr->proto_msg_queue = 0;
1553 return TIPC_OK;
1554 } else {
1555 msg_dbg(buf_msg(buf), "|>DEF-PROT>");
1556 l_ptr->stats.bearer_congs++;
1557 return PUSH_FAILED;
1558 }
1559 }
1560
1561 /* Send one deferred data message, if send window not full: */
1562
1563 buf = l_ptr->next_out;
1564 if (buf) {
1565 struct tipc_msg *msg = buf_msg(buf);
1566 u32 next = msg_seqno(msg);
1567 u32 first = msg_seqno(buf_msg(l_ptr->first_out));
1568
1569 if (mod(next - first) < l_ptr->queue_limit[0]) {
1570 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1571 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1572 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1573 if (msg_user(msg) == MSG_BUNDLER)
1574 msg_set_type(msg, CLOSED_MSG);
1575 msg_dbg(msg, ">PUSH-DATA>");
1576 l_ptr->next_out = buf->next;
1577 return TIPC_OK;
1578 } else {
1579 msg_dbg(msg, "|PUSH-DATA|");
1580 l_ptr->stats.bearer_congs++;
1581 return PUSH_FAILED;
1582 }
1583 }
1584 }
1585 return PUSH_FINISHED;
1586}
1587
1588/*
1589 * push_queue(): push out the unsent messages of a link where
1590 * congestion has abated. Node is locked
1591 */
1592void tipc_link_push_queue(struct link *l_ptr)
1593{
1594 u32 res;
1595
1596 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr))
1597 return;
1598
1599 do {
1600 res = tipc_link_push_packet(l_ptr);
1601 }
1602 while (res == TIPC_OK);
1603 if (res == PUSH_FAILED)
1604 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1605}
1606
1607void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1608 u32 retransmits)
1609{
1610 struct tipc_msg *msg;
1611
1612 dbg("Retransmitting %u in link %x\n", retransmits, l_ptr);
1613
1614 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr) && buf && !skb_cloned(buf)) {
1615 msg_dbg(buf_msg(buf), ">NO_RETR->BCONG>");
1616 dbg_print_link(l_ptr, " ");
1617 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1618 l_ptr->retransm_queue_size = retransmits;
1619 return;
1620 }
1621 while (retransmits && (buf != l_ptr->next_out) && buf && !skb_cloned(buf)) {
1622 msg = buf_msg(buf);
1623 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1624 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
1625 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
1626 /* Catch if retransmissions fail repeatedly: */
1627 if (l_ptr->last_retransmitted == msg_seqno(msg)) {
1628 if (++l_ptr->stale_count > 100) {
1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count);
1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");;
1633 tipc_link_reset(l_ptr);
1634 break;
1635 }
1636 } else {
1637 l_ptr->stale_count = 0;
1638 }
1639 l_ptr->last_retransmitted = msg_seqno(msg);
1640
1641 msg_dbg(buf_msg(buf), ">RETR>");
1642 buf = buf->next;
1643 retransmits--;
1644 l_ptr->stats.retransmitted++;
1645 } else {
1646 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
1647 l_ptr->stats.bearer_congs++;
1648 l_ptr->retransm_queue_head = msg_seqno(buf_msg(buf));
1649 l_ptr->retransm_queue_size = retransmits;
1650 return;
1651 }
1652 }
1653 l_ptr->retransm_queue_head = l_ptr->retransm_queue_size = 0;
1654}
1655
1656/*
1657 * link_recv_non_seq: Receive packets which are outside
1658 * the link sequence flow
1659 */
1660
1661static void link_recv_non_seq(struct sk_buff *buf)
1662{
1663 struct tipc_msg *msg = buf_msg(buf);
1664
1665 if (msg_user(msg) == LINK_CONFIG)
1666 tipc_disc_recv_msg(buf);
1667 else
1668 tipc_bclink_recv_pkt(buf);
1669}
1670
1671/**
1672 * link_insert_deferred_queue - insert deferred messages back into receive chain
1673 */
1674
1675static struct sk_buff *link_insert_deferred_queue(struct link *l_ptr,
1676 struct sk_buff *buf)
1677{
1678 u32 seq_no;
1679
1680 if (l_ptr->oldest_deferred_in == NULL)
1681 return buf;
1682
1683 seq_no = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1684 if (seq_no == mod(l_ptr->next_in_no)) {
1685 l_ptr->newest_deferred_in->next = buf;
1686 buf = l_ptr->oldest_deferred_in;
1687 l_ptr->oldest_deferred_in = NULL;
1688 l_ptr->deferred_inqueue_sz = 0;
1689 }
1690 return buf;
1691}
1692
1693void tipc_recv_msg(struct sk_buff *head, struct tipc_bearer *tb_ptr)
1694{
1695 read_lock_bh(&tipc_net_lock);
1696 while (head) {
1697 struct bearer *b_ptr;
1698 struct node *n_ptr;
1699 struct link *l_ptr;
1700 struct sk_buff *crs;
1701 struct sk_buff *buf = head;
1702 struct tipc_msg *msg = buf_msg(buf);
1703 u32 seq_no = msg_seqno(msg);
1704 u32 ackd = msg_ack(msg);
1705 u32 released = 0;
1706 int type;
1707
1708 b_ptr = (struct bearer *)tb_ptr;
1709 TIPC_SKB_CB(buf)->handle = b_ptr;
1710
1711 head = head->next;
1712 if (unlikely(msg_version(msg) != TIPC_VERSION))
1713 goto cont;
1714#if 0
1715 if (msg_user(msg) != LINK_PROTOCOL)
1716#endif
1717 msg_dbg(msg,"<REC<");
1718
1719 if (unlikely(msg_non_seq(msg))) {
1720 link_recv_non_seq(buf);
1721 continue;
1722 }
1723 n_ptr = tipc_node_find(msg_prevnode(msg));
1724 if (unlikely(!n_ptr))
1725 goto cont;
1726
1727 tipc_node_lock(n_ptr);
1728 l_ptr = n_ptr->links[b_ptr->identity];
1729 if (unlikely(!l_ptr)) {
1730 tipc_node_unlock(n_ptr);
1731 goto cont;
1732 }
1733 /*
1734 * Release acked messages
1735 */
1736 if (less(n_ptr->bclink.acked, msg_bcast_ack(msg))) {
1737 if (tipc_node_is_up(n_ptr) && n_ptr->bclink.supported)
1738 tipc_bclink_acknowledge(n_ptr, msg_bcast_ack(msg));
1739 }
1740
1741 crs = l_ptr->first_out;
1742 while ((crs != l_ptr->next_out) &&
1743 less_eq(msg_seqno(buf_msg(crs)), ackd)) {
1744 struct sk_buff *next = crs->next;
1745
1746 buf_discard(crs);
1747 crs = next;
1748 released++;
1749 }
1750 if (released) {
1751 l_ptr->first_out = crs;
1752 l_ptr->out_queue_size -= released;
1753 }
1754 if (unlikely(l_ptr->next_out))
1755 tipc_link_push_queue(l_ptr);
1756 if (unlikely(!list_empty(&l_ptr->waiting_ports)))
1757 tipc_link_wakeup_ports(l_ptr, 0);
1758 if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
1759 l_ptr->stats.sent_acks++;
1760 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1761 }
1762
1763protocol_check:
1764 if (likely(link_working_working(l_ptr))) {
1765 if (likely(seq_no == mod(l_ptr->next_in_no))) {
1766 l_ptr->next_in_no++;
1767 if (unlikely(l_ptr->oldest_deferred_in))
1768 head = link_insert_deferred_queue(l_ptr,
1769 head);
1770 if (likely(msg_is_dest(msg, tipc_own_addr))) {
1771deliver:
1772 if (likely(msg_isdata(msg))) {
1773 tipc_node_unlock(n_ptr);
1774 tipc_port_recv_msg(buf);
1775 continue;
1776 }
1777 switch (msg_user(msg)) {
1778 case MSG_BUNDLER:
1779 l_ptr->stats.recv_bundles++;
1780 l_ptr->stats.recv_bundled +=
1781 msg_msgcnt(msg);
1782 tipc_node_unlock(n_ptr);
1783 tipc_link_recv_bundle(buf);
1784 continue;
1785 case ROUTE_DISTRIBUTOR:
1786 tipc_node_unlock(n_ptr);
1787 tipc_cltr_recv_routing_table(buf);
1788 continue;
1789 case NAME_DISTRIBUTOR:
1790 tipc_node_unlock(n_ptr);
1791 tipc_named_recv(buf);
1792 continue;
1793 case CONN_MANAGER:
1794 tipc_node_unlock(n_ptr);
1795 tipc_port_recv_proto_msg(buf);
1796 continue;
1797 case MSG_FRAGMENTER:
1798 l_ptr->stats.recv_fragments++;
1799 if (tipc_link_recv_fragment(&l_ptr->defragm_buf,
1800 &buf, &msg)) {
1801 l_ptr->stats.recv_fragmented++;
1802 goto deliver;
1803 }
1804 break;
1805 case CHANGEOVER_PROTOCOL:
1806 type = msg_type(msg);
1807 if (link_recv_changeover_msg(&l_ptr, &buf)) {
1808 msg = buf_msg(buf);
1809 seq_no = msg_seqno(msg);
1810 TIPC_SKB_CB(buf)->handle
1811 = b_ptr;
1812 if (type == ORIGINAL_MSG)
1813 goto deliver;
1814 goto protocol_check;
1815 }
1816 break;
1817 }
1818 }
1819 tipc_node_unlock(n_ptr);
1820 tipc_net_route_msg(buf);
1821 continue;
1822 }
1823 link_handle_out_of_seq_msg(l_ptr, buf);
1824 head = link_insert_deferred_queue(l_ptr, head);
1825 tipc_node_unlock(n_ptr);
1826 continue;
1827 }
1828
1829 if (msg_user(msg) == LINK_PROTOCOL) {
1830 link_recv_proto_msg(l_ptr, buf);
1831 head = link_insert_deferred_queue(l_ptr, head);
1832 tipc_node_unlock(n_ptr);
1833 continue;
1834 }
1835 msg_dbg(msg,"NSEQ<REC<");
1836 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
1837
1838 if (link_working_working(l_ptr)) {
1839 /* Re-insert in front of queue */
1840 msg_dbg(msg,"RECV-REINS:");
1841 buf->next = head;
1842 head = buf;
1843 tipc_node_unlock(n_ptr);
1844 continue;
1845 }
1846 tipc_node_unlock(n_ptr);
1847cont:
1848 buf_discard(buf);
1849 }
1850 read_unlock_bh(&tipc_net_lock);
1851}
1852
1853/*
1854 * link_defer_buf(): Sort a received out-of-sequence packet
1855 * into the deferred reception queue.
1856 * Returns the increase of the queue length,i.e. 0 or 1
1857 */
1858
1859u32 tipc_link_defer_pkt(struct sk_buff **head,
1860 struct sk_buff **tail,
1861 struct sk_buff *buf)
1862{
1863 struct sk_buff *prev = 0;
1864 struct sk_buff *crs = *head;
1865 u32 seq_no = msg_seqno(buf_msg(buf));
1866
1867 buf->next = NULL;
1868
1869 /* Empty queue ? */
1870 if (*head == NULL) {
1871 *head = *tail = buf;
1872 return 1;
1873 }
1874
1875 /* Last ? */
1876 if (less(msg_seqno(buf_msg(*tail)), seq_no)) {
1877 (*tail)->next = buf;
1878 *tail = buf;
1879 return 1;
1880 }
1881
1882 /* Scan through queue and sort it in */
1883 do {
1884 struct tipc_msg *msg = buf_msg(crs);
1885
1886 if (less(seq_no, msg_seqno(msg))) {
1887 buf->next = crs;
1888 if (prev)
1889 prev->next = buf;
1890 else
1891 *head = buf;
1892 return 1;
1893 }
1894 if (seq_no == msg_seqno(msg)) {
1895 break;
1896 }
1897 prev = crs;
1898 crs = crs->next;
1899 }
1900 while (crs);
1901
1902 /* Message is a duplicate of an existing message */
1903
1904 buf_discard(buf);
1905 return 0;
1906}
1907
1908/**
1909 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1910 */
1911
1912static void link_handle_out_of_seq_msg(struct link *l_ptr,
1913 struct sk_buff *buf)
1914{
1915 u32 seq_no = msg_seqno(buf_msg(buf));
1916
1917 if (likely(msg_user(buf_msg(buf)) == LINK_PROTOCOL)) {
1918 link_recv_proto_msg(l_ptr, buf);
1919 return;
1920 }
1921
1922 dbg("rx OOS msg: seq_no %u, expecting %u (%u)\n",
1923 seq_no, mod(l_ptr->next_in_no), l_ptr->next_in_no);
1924
1925 /* Record OOS packet arrival (force mismatch on next timeout) */
1926
1927 l_ptr->checkpoint--;
1928
1929 /*
1930 * Discard packet if a duplicate; otherwise add it to deferred queue
1931 * and notify peer of gap as per protocol specification
1932 */
1933
1934 if (less(seq_no, mod(l_ptr->next_in_no))) {
1935 l_ptr->stats.duplicates++;
1936 buf_discard(buf);
1937 return;
1938 }
1939
1940 if (tipc_link_defer_pkt(&l_ptr->oldest_deferred_in,
1941 &l_ptr->newest_deferred_in, buf)) {
1942 l_ptr->deferred_inqueue_sz++;
1943 l_ptr->stats.deferred_recv++;
1944 if ((l_ptr->deferred_inqueue_sz % 16) == 1)
1945 tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
1946 } else
1947 l_ptr->stats.duplicates++;
1948}
1949
1950/*
1951 * Send protocol message to the other endpoint.
1952 */
1953void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int probe_msg,
1954 u32 gap, u32 tolerance, u32 priority, u32 ack_mtu)
1955{
1956 struct sk_buff *buf = 0;
1957 struct tipc_msg *msg = l_ptr->pmsg;
1958 u32 msg_size = sizeof(l_ptr->proto_msg);
1959
1960 if (link_blocked(l_ptr))
1961 return;
1962 msg_set_type(msg, msg_typ);
1963 msg_set_net_plane(msg, l_ptr->b_ptr->net_plane);
1964 msg_set_bcast_ack(msg, mod(l_ptr->owner->bclink.last_in));
1965 msg_set_last_bcast(msg, tipc_bclink_get_last_sent());
1966
1967 if (msg_typ == STATE_MSG) {
1968 u32 next_sent = mod(l_ptr->next_out_no);
1969
1970 if (!tipc_link_is_up(l_ptr))
1971 return;
1972 if (l_ptr->next_out)
1973 next_sent = msg_seqno(buf_msg(l_ptr->next_out));
1974 msg_set_next_sent(msg, next_sent);
1975 if (l_ptr->oldest_deferred_in) {
1976 u32 rec = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
1977 gap = mod(rec - mod(l_ptr->next_in_no));
1978 }
1979 msg_set_seq_gap(msg, gap);
1980 if (gap)
1981 l_ptr->stats.sent_nacks++;
1982 msg_set_link_tolerance(msg, tolerance);
1983 msg_set_linkprio(msg, priority);
1984 msg_set_max_pkt(msg, ack_mtu);
1985 msg_set_ack(msg, mod(l_ptr->next_in_no - 1));
1986 msg_set_probe(msg, probe_msg != 0);
1987 if (probe_msg) {
1988 u32 mtu = l_ptr->max_pkt;
1989
1990 if ((mtu < l_ptr->max_pkt_target) &&
1991 link_working_working(l_ptr) &&
1992 l_ptr->fsm_msg_cnt) {
1993 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1994 if (l_ptr->max_pkt_probes == 10) {
1995 l_ptr->max_pkt_target = (msg_size - 4);
1996 l_ptr->max_pkt_probes = 0;
1997 msg_size = (mtu + (l_ptr->max_pkt_target - mtu)/2 + 2) & ~3;
1998 }
1999 l_ptr->max_pkt_probes++;
2000 }
2001
2002 l_ptr->stats.sent_probes++;
2003 }
2004 l_ptr->stats.sent_states++;
2005 } else { /* RESET_MSG or ACTIVATE_MSG */
2006 msg_set_ack(msg, mod(l_ptr->reset_checkpoint - 1));
2007 msg_set_seq_gap(msg, 0);
2008 msg_set_next_sent(msg, 1);
2009 msg_set_link_tolerance(msg, l_ptr->tolerance);
2010 msg_set_linkprio(msg, l_ptr->priority);
2011 msg_set_max_pkt(msg, l_ptr->max_pkt_target);
2012 }
2013
2014 if (tipc_node_has_redundant_links(l_ptr->owner)) {
2015 msg_set_redundant_link(msg);
2016 } else {
2017 msg_clear_redundant_link(msg);
2018 }
2019 msg_set_linkprio(msg, l_ptr->priority);
2020
2021 /* Ensure sequence number will not fit : */
2022
2023 msg_set_seqno(msg, mod(l_ptr->next_out_no + (0xffff/2)));
2024
2025 /* Congestion? */
2026
2027 if (tipc_bearer_congested(l_ptr->b_ptr, l_ptr)) {
2028 if (!l_ptr->proto_msg_queue) {
2029 l_ptr->proto_msg_queue =
2030 buf_acquire(sizeof(l_ptr->proto_msg));
2031 }
2032 buf = l_ptr->proto_msg_queue;
2033 if (!buf)
2034 return;
2035 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2036 return;
2037 }
2038 msg_set_timestamp(msg, jiffies_to_msecs(jiffies));
2039
2040 /* Message can be sent */
2041
2042 msg_dbg(msg, ">>");
2043
2044 buf = buf_acquire(msg_size);
2045 if (!buf)
2046 return;
2047
2048 memcpy(buf->data, (unchar *)msg, sizeof(l_ptr->proto_msg));
2049 msg_set_size(buf_msg(buf), msg_size);
2050
2051 if (tipc_bearer_send(l_ptr->b_ptr, buf, &l_ptr->media_addr)) {
2052 l_ptr->unacked_window = 0;
2053 buf_discard(buf);
2054 return;
2055 }
2056
2057 /* New congestion */
2058 tipc_bearer_schedule(l_ptr->b_ptr, l_ptr);
2059 l_ptr->proto_msg_queue = buf;
2060 l_ptr->stats.bearer_congs++;
2061}
2062
2063/*
2064 * Receive protocol message :
2065 * Note that network plane id propagates through the network, and may
2066 * change at any time. The node with lowest address rules
2067 */
2068
2069static void link_recv_proto_msg(struct link *l_ptr, struct sk_buff *buf)
2070{
2071 u32 rec_gap = 0;
2072 u32 max_pkt_info;
2073 u32 max_pkt_ack;
2074 u32 msg_tol;
2075 struct tipc_msg *msg = buf_msg(buf);
2076
2077 dbg("AT(%u):", jiffies_to_msecs(jiffies));
2078 msg_dbg(msg, "<<");
2079 if (link_blocked(l_ptr))
2080 goto exit;
2081
2082 /* record unnumbered packet arrival (force mismatch on next timeout) */
2083
2084 l_ptr->checkpoint--;
2085
2086 if (l_ptr->b_ptr->net_plane != msg_net_plane(msg))
2087 if (tipc_own_addr > msg_prevnode(msg))
2088 l_ptr->b_ptr->net_plane = msg_net_plane(msg);
2089
2090 l_ptr->owner->permit_changeover = msg_redundant_link(msg);
2091
2092 switch (msg_type(msg)) {
2093
2094 case RESET_MSG:
2095 if (!link_working_unknown(l_ptr) && l_ptr->peer_session) {
2096 if (msg_session(msg) == l_ptr->peer_session) {
2097 dbg("Duplicate RESET: %u<->%u\n",
2098 msg_session(msg), l_ptr->peer_session);
2099 break; /* duplicate: ignore */
2100 }
2101 }
2102 /* fall thru' */
2103 case ACTIVATE_MSG:
2104 /* Update link settings according other endpoint's values */
2105
2106 strcpy((strrchr(l_ptr->name, ':') + 1), (char *)msg_data(msg));
2107
2108 if ((msg_tol = msg_link_tolerance(msg)) &&
2109 (msg_tol > l_ptr->tolerance))
2110 link_set_supervision_props(l_ptr, msg_tol);
2111
2112 if (msg_linkprio(msg) > l_ptr->priority)
2113 l_ptr->priority = msg_linkprio(msg);
2114
2115 max_pkt_info = msg_max_pkt(msg);
2116 if (max_pkt_info) {
2117 if (max_pkt_info < l_ptr->max_pkt_target)
2118 l_ptr->max_pkt_target = max_pkt_info;
2119 if (l_ptr->max_pkt > l_ptr->max_pkt_target)
2120 l_ptr->max_pkt = l_ptr->max_pkt_target;
2121 } else {
2122 l_ptr->max_pkt = l_ptr->max_pkt_target;
2123 }
2124 l_ptr->owner->bclink.supported = (max_pkt_info != 0);
2125
2126 link_state_event(l_ptr, msg_type(msg));
2127
2128 l_ptr->peer_session = msg_session(msg);
2129 l_ptr->peer_bearer_id = msg_bearer_id(msg);
2130
2131 /* Synchronize broadcast sequence numbers */
2132 if (!tipc_node_has_redundant_links(l_ptr->owner)) {
2133 l_ptr->owner->bclink.last_in = mod(msg_last_bcast(msg));
2134 }
2135 break;
2136 case STATE_MSG:
2137
2138 if ((msg_tol = msg_link_tolerance(msg)))
2139 link_set_supervision_props(l_ptr, msg_tol);
2140
2141 if (msg_linkprio(msg) &&
2142 (msg_linkprio(msg) != l_ptr->priority)) {
2143 warn("Changing prio <%s>: %u->%u\n",
2144 l_ptr->name, l_ptr->priority, msg_linkprio(msg));
2145 l_ptr->priority = msg_linkprio(msg);
2146 tipc_link_reset(l_ptr); /* Enforce change to take effect */
2147 break;
2148 }
2149 link_state_event(l_ptr, TRAFFIC_MSG_EVT);
2150 l_ptr->stats.recv_states++;
2151 if (link_reset_unknown(l_ptr))
2152 break;
2153
2154 if (less_eq(mod(l_ptr->next_in_no), msg_next_sent(msg))) {
2155 rec_gap = mod(msg_next_sent(msg) -
2156 mod(l_ptr->next_in_no));
2157 }
2158
2159 max_pkt_ack = msg_max_pkt(msg);
2160 if (max_pkt_ack > l_ptr->max_pkt) {
2161 dbg("Link <%s> updated MTU %u -> %u\n",
2162 l_ptr->name, l_ptr->max_pkt, max_pkt_ack);
2163 l_ptr->max_pkt = max_pkt_ack;
2164 l_ptr->max_pkt_probes = 0;
2165 }
2166
2167 max_pkt_ack = 0;
2168 if (msg_probe(msg)) {
2169 l_ptr->stats.recv_probes++;
2170 if (msg_size(msg) > sizeof(l_ptr->proto_msg)) {
2171 max_pkt_ack = msg_size(msg);
2172 }
2173 }
2174
2175 /* Protocol message before retransmits, reduce loss risk */
2176
2177 tipc_bclink_check_gap(l_ptr->owner, msg_last_bcast(msg));
2178
2179 if (rec_gap || (msg_probe(msg))) {
2180 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2181 0, rec_gap, 0, 0, max_pkt_ack);
2182 }
2183 if (msg_seq_gap(msg)) {
2184 msg_dbg(msg, "With Gap:");
2185 l_ptr->stats.recv_nacks++;
2186 tipc_link_retransmit(l_ptr, l_ptr->first_out,
2187 msg_seq_gap(msg));
2188 }
2189 break;
2190 default:
2191 msg_dbg(buf_msg(buf), "<DISCARDING UNKNOWN<");
2192 }
2193exit:
2194 buf_discard(buf);
2195}
2196
2197
2198/*
2199 * tipc_link_tunnel(): Send one message via a link belonging to
2200 * another bearer. Owner node is locked.
2201 */
2202void tipc_link_tunnel(struct link *l_ptr,
2203 struct tipc_msg *tunnel_hdr,
2204 struct tipc_msg *msg,
2205 u32 selector)
2206{
2207 struct link *tunnel;
2208 struct sk_buff *buf;
2209 u32 length = msg_size(msg);
2210
2211 tunnel = l_ptr->owner->active_links[selector & 1];
2212 if (!tipc_link_is_up(tunnel))
2213 return;
2214 msg_set_size(tunnel_hdr, length + INT_H_SIZE);
2215 buf = buf_acquire(length + INT_H_SIZE);
2216 if (!buf)
2217 return;
2218 memcpy(buf->data, (unchar *)tunnel_hdr, INT_H_SIZE);
2219 memcpy(buf->data + INT_H_SIZE, (unchar *)msg, length);
2220 dbg("%c->%c:", l_ptr->b_ptr->net_plane, tunnel->b_ptr->net_plane);
2221 msg_dbg(buf_msg(buf), ">SEND>");
2222 assert(tunnel);
2223 tipc_link_send_buf(tunnel, buf);
2224}
2225
2226
2227
2228/*
2229 * changeover(): Send whole message queue via the remaining link
2230 * Owner node is locked.
2231 */
2232
2233void tipc_link_changeover(struct link *l_ptr)
2234{
2235 u32 msgcount = l_ptr->out_queue_size;
2236 struct sk_buff *crs = l_ptr->first_out;
2237 struct link *tunnel = l_ptr->owner->active_links[0];
2238 int split_bundles = tipc_node_has_redundant_links(l_ptr->owner);
2239 struct tipc_msg tunnel_hdr;
2240
2241 if (!tunnel)
2242 return;
2243
2244 if (!l_ptr->owner->permit_changeover)
2245 return;
2246
2247 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2248 ORIGINAL_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2249 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2250 msg_set_msgcnt(&tunnel_hdr, msgcount);
2251 if (!l_ptr->first_out) {
2252 struct sk_buff *buf;
2253
2254 assert(!msgcount);
2255 buf = buf_acquire(INT_H_SIZE);
2256 if (buf) {
2257 memcpy(buf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2258 msg_set_size(&tunnel_hdr, INT_H_SIZE);
2259 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2260 tunnel->b_ptr->net_plane);
2261 msg_dbg(&tunnel_hdr, "EMPTY>SEND>");
2262 tipc_link_send_buf(tunnel, buf);
2263 } else {
2264 warn("Memory squeeze; link changeover failed\n");
2265 }
2266 return;
2267 }
2268 while (crs) {
2269 struct tipc_msg *msg = buf_msg(crs);
2270
2271 if ((msg_user(msg) == MSG_BUNDLER) && split_bundles) {
2272 u32 msgcount = msg_msgcnt(msg);
2273 struct tipc_msg *m = msg_get_wrapped(msg);
2274 unchar* pos = (unchar*)m;
2275
2276 while (msgcount--) {
2277 msg_set_seqno(m,msg_seqno(msg));
2278 tipc_link_tunnel(l_ptr, &tunnel_hdr, m,
2279 msg_link_selector(m));
2280 pos += align(msg_size(m));
2281 m = (struct tipc_msg *)pos;
2282 }
2283 } else {
2284 tipc_link_tunnel(l_ptr, &tunnel_hdr, msg,
2285 msg_link_selector(msg));
2286 }
2287 crs = crs->next;
2288 }
2289}
2290
2291void tipc_link_send_duplicate(struct link *l_ptr, struct link *tunnel)
2292{
2293 struct sk_buff *iter;
2294 struct tipc_msg tunnel_hdr;
2295
2296 msg_init(&tunnel_hdr, CHANGEOVER_PROTOCOL,
2297 DUPLICATE_MSG, TIPC_OK, INT_H_SIZE, l_ptr->addr);
2298 msg_set_msgcnt(&tunnel_hdr, l_ptr->out_queue_size);
2299 msg_set_bearer_id(&tunnel_hdr, l_ptr->peer_bearer_id);
2300 iter = l_ptr->first_out;
2301 while (iter) {
2302 struct sk_buff *outbuf;
2303 struct tipc_msg *msg = buf_msg(iter);
2304 u32 length = msg_size(msg);
2305
2306 if (msg_user(msg) == MSG_BUNDLER)
2307 msg_set_type(msg, CLOSED_MSG);
2308 msg_set_ack(msg, mod(l_ptr->next_in_no - 1)); /* Update */
2309 msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
2310 msg_set_size(&tunnel_hdr, length + INT_H_SIZE);
2311 outbuf = buf_acquire(length + INT_H_SIZE);
2312 if (outbuf == NULL) {
2313 warn("Memory squeeze; buffer duplication failed\n");
2314 return;
2315 }
2316 memcpy(outbuf->data, (unchar *)&tunnel_hdr, INT_H_SIZE);
2317 memcpy(outbuf->data + INT_H_SIZE, iter->data, length);
2318 dbg("%c->%c:", l_ptr->b_ptr->net_plane,
2319 tunnel->b_ptr->net_plane);
2320 msg_dbg(buf_msg(outbuf), ">SEND>");
2321 tipc_link_send_buf(tunnel, outbuf);
2322 if (!tipc_link_is_up(l_ptr))
2323 return;
2324 iter = iter->next;
2325 }
2326}
2327
2328
2329
2330/**
2331 * buf_extract - extracts embedded TIPC message from another message
2332 * @skb: encapsulating message buffer
2333 * @from_pos: offset to extract from
2334 *
2335 * Returns a new message buffer containing an embedded message. The
2336 * encapsulating message itself is left unchanged.
2337 */
2338
2339static struct sk_buff *buf_extract(struct sk_buff *skb, u32 from_pos)
2340{
2341 struct tipc_msg *msg = (struct tipc_msg *)(skb->data + from_pos);
2342 u32 size = msg_size(msg);
2343 struct sk_buff *eb;
2344
2345 eb = buf_acquire(size);
2346 if (eb)
2347 memcpy(eb->data, (unchar *)msg, size);
2348 return eb;
2349}
2350
2351/*
2352 * link_recv_changeover_msg(): Receive tunneled packet sent
2353 * via other link. Node is locked. Return extracted buffer.
2354 */
2355
2356static int link_recv_changeover_msg(struct link **l_ptr,
2357 struct sk_buff **buf)
2358{
2359 struct sk_buff *tunnel_buf = *buf;
2360 struct link *dest_link;
2361 struct tipc_msg *msg;
2362 struct tipc_msg *tunnel_msg = buf_msg(tunnel_buf);
2363 u32 msg_typ = msg_type(tunnel_msg);
2364 u32 msg_count = msg_msgcnt(tunnel_msg);
2365
2366 dest_link = (*l_ptr)->owner->links[msg_bearer_id(tunnel_msg)];
2367 assert(dest_link != *l_ptr);
2368 if (!dest_link) {
2369 msg_dbg(tunnel_msg, "NOLINK/<REC<");
2370 goto exit;
2371 }
2372 dbg("%c<-%c:", dest_link->b_ptr->net_plane,
2373 (*l_ptr)->b_ptr->net_plane);
2374 *l_ptr = dest_link;
2375 msg = msg_get_wrapped(tunnel_msg);
2376
2377 if (msg_typ == DUPLICATE_MSG) {
2378 if (less(msg_seqno(msg), mod(dest_link->next_in_no))) {
2379 msg_dbg(tunnel_msg, "DROP/<REC<");
2380 goto exit;
2381 }
2382 *buf = buf_extract(tunnel_buf,INT_H_SIZE);
2383 if (*buf == NULL) {
2384 warn("Memory squeeze; failed to extract msg\n");
2385 goto exit;
2386 }
2387 msg_dbg(tunnel_msg, "TNL<REC<");
2388 buf_discard(tunnel_buf);
2389 return 1;
2390 }
2391
2392 /* First original message ?: */
2393
2394 if (tipc_link_is_up(dest_link)) {
2395 msg_dbg(tunnel_msg, "UP/FIRST/<REC<");
2396 tipc_link_reset(dest_link);
2397 dest_link->exp_msg_count = msg_count;
2398 if (!msg_count)
2399 goto exit;
2400 } else if (dest_link->exp_msg_count == START_CHANGEOVER) {
2401 msg_dbg(tunnel_msg, "BLK/FIRST/<REC<");
2402 dest_link->exp_msg_count = msg_count;
2403 if (!msg_count)
2404 goto exit;
2405 }
2406
2407 /* Receive original message */
2408
2409 if (dest_link->exp_msg_count == 0) {
2410 msg_dbg(tunnel_msg, "OVERDUE/DROP/<REC<");
2411 dbg_print_link(dest_link, "LINK:");
2412 goto exit;
2413 }
2414 dest_link->exp_msg_count--;
2415 if (less(msg_seqno(msg), dest_link->reset_checkpoint)) {
2416 msg_dbg(tunnel_msg, "DROP/DUPL/<REC<");
2417 goto exit;
2418 } else {
2419 *buf = buf_extract(tunnel_buf, INT_H_SIZE);
2420 if (*buf != NULL) {
2421 msg_dbg(tunnel_msg, "TNL<REC<");
2422 buf_discard(tunnel_buf);
2423 return 1;
2424 } else {
2425 warn("Memory squeeze; dropped incoming msg\n");
2426 }
2427 }
2428exit:
2429 *buf = 0;
2430 buf_discard(tunnel_buf);
2431 return 0;
2432}
2433
2434/*
2435 * Bundler functionality:
2436 */
2437void tipc_link_recv_bundle(struct sk_buff *buf)
2438{
2439 u32 msgcount = msg_msgcnt(buf_msg(buf));
2440 u32 pos = INT_H_SIZE;
2441 struct sk_buff *obuf;
2442
2443 msg_dbg(buf_msg(buf), "<BNDL<: ");
2444 while (msgcount--) {
2445 obuf = buf_extract(buf, pos);
2446 if (obuf == NULL) {
2447 char addr_string[16];
2448
2449 warn("Buffer allocation failure;\n");
2450 warn(" incoming message(s) from %s lost\n",
2451 addr_string_fill(addr_string,
2452 msg_orignode(buf_msg(buf))));
2453 return;
2454 };
2455 pos += align(msg_size(buf_msg(obuf)));
2456 msg_dbg(buf_msg(obuf), " /");
2457 tipc_net_route_msg(obuf);
2458 }
2459 buf_discard(buf);
2460}
2461
2462/*
2463 * Fragmentation/defragmentation:
2464 */
2465
2466
2467/*
2468 * tipc_link_send_long_buf: Entry for buffers needing fragmentation.
2469 * The buffer is complete, inclusive total message length.
2470 * Returns user data length.
2471 */
2472int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf)
2473{
2474 struct tipc_msg *inmsg = buf_msg(buf);
2475 struct tipc_msg fragm_hdr;
2476 u32 insize = msg_size(inmsg);
2477 u32 dsz = msg_data_sz(inmsg);
2478 unchar *crs = buf->data;
2479 u32 rest = insize;
2480 u32 pack_sz = link_max_pkt(l_ptr);
2481 u32 fragm_sz = pack_sz - INT_H_SIZE;
2482 u32 fragm_no = 1;
2483 u32 destaddr = msg_destnode(inmsg);
2484
2485 if (msg_short(inmsg))
2486 destaddr = l_ptr->addr;
2487
2488 if (msg_routed(inmsg))
2489 msg_set_prevnode(inmsg, tipc_own_addr);
2490
2491 /* Prepare reusable fragment header: */
2492
2493 msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
2494 TIPC_OK, INT_H_SIZE, destaddr);
2495 msg_set_link_selector(&fragm_hdr, msg_link_selector(inmsg));
2496 msg_set_long_msgno(&fragm_hdr, mod(l_ptr->long_msg_seq_no++));
2497 msg_set_fragm_no(&fragm_hdr, fragm_no);
2498 l_ptr->stats.sent_fragmented++;
2499
2500 /* Chop up message: */
2501
2502 while (rest > 0) {
2503 struct sk_buff *fragm;
2504
2505 if (rest <= fragm_sz) {
2506 fragm_sz = rest;
2507 msg_set_type(&fragm_hdr, LAST_FRAGMENT);
2508 }
2509 fragm = buf_acquire(fragm_sz + INT_H_SIZE);
2510 if (fragm == NULL) {
2511 warn("Memory squeeze; failed to fragment msg\n");
2512 dsz = -ENOMEM;
2513 goto exit;
2514 }
2515 msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
2516 memcpy(fragm->data, (unchar *)&fragm_hdr, INT_H_SIZE);
2517 memcpy(fragm->data + INT_H_SIZE, crs, fragm_sz);
2518
2519 /* Send queued messages first, if any: */
2520
2521 l_ptr->stats.sent_fragments++;
2522 tipc_link_send_buf(l_ptr, fragm);
2523 if (!tipc_link_is_up(l_ptr))
2524 return dsz;
2525 msg_set_fragm_no(&fragm_hdr, ++fragm_no);
2526 rest -= fragm_sz;
2527 crs += fragm_sz;
2528 msg_set_type(&fragm_hdr, FRAGMENT);
2529 }
2530exit:
2531 buf_discard(buf);
2532 return dsz;
2533}
2534
2535/*
2536 * A pending message being re-assembled must store certain values
2537 * to handle subsequent fragments correctly. The following functions
2538 * help storing these values in unused, available fields in the
2539 * pending message. This makes dynamic memory allocation unecessary.
2540 */
2541
2542static inline u32 get_long_msg_seqno(struct sk_buff *buf)
2543{
2544 return msg_seqno(buf_msg(buf));
2545}
2546
2547static inline void set_long_msg_seqno(struct sk_buff *buf, u32 seqno)
2548{
2549 msg_set_seqno(buf_msg(buf), seqno);
2550}
2551
2552static inline u32 get_fragm_size(struct sk_buff *buf)
2553{
2554 return msg_ack(buf_msg(buf));
2555}
2556
2557static inline void set_fragm_size(struct sk_buff *buf, u32 sz)
2558{
2559 msg_set_ack(buf_msg(buf), sz);
2560}
2561
2562static inline u32 get_expected_frags(struct sk_buff *buf)
2563{
2564 return msg_bcast_ack(buf_msg(buf));
2565}
2566
2567static inline void set_expected_frags(struct sk_buff *buf, u32 exp)
2568{
2569 msg_set_bcast_ack(buf_msg(buf), exp);
2570}
2571
2572static inline u32 get_timer_cnt(struct sk_buff *buf)
2573{
2574 return msg_reroute_cnt(buf_msg(buf));
2575}
2576
2577static inline void incr_timer_cnt(struct sk_buff *buf)
2578{
2579 msg_incr_reroute_cnt(buf_msg(buf));
2580}
2581
2582/*
2583 * tipc_link_recv_fragment(): Called with node lock on. Returns
2584 * the reassembled buffer if message is complete.
2585 */
2586int tipc_link_recv_fragment(struct sk_buff **pending, struct sk_buff **fb,
2587 struct tipc_msg **m)
2588{
2589 struct sk_buff *prev = 0;
2590 struct sk_buff *fbuf = *fb;
2591 struct tipc_msg *fragm = buf_msg(fbuf);
2592 struct sk_buff *pbuf = *pending;
2593 u32 long_msg_seq_no = msg_long_msgno(fragm);
2594
2595 *fb = 0;
2596 msg_dbg(fragm,"FRG<REC<");
2597
2598 /* Is there an incomplete message waiting for this fragment? */
2599
2600 while (pbuf && ((msg_seqno(buf_msg(pbuf)) != long_msg_seq_no)
2601 || (msg_orignode(fragm) != msg_orignode(buf_msg(pbuf))))) {
2602 prev = pbuf;
2603 pbuf = pbuf->next;
2604 }
2605
2606 if (!pbuf && (msg_type(fragm) == FIRST_FRAGMENT)) {
2607 struct tipc_msg *imsg = (struct tipc_msg *)msg_data(fragm);
2608 u32 msg_sz = msg_size(imsg);
2609 u32 fragm_sz = msg_data_sz(fragm);
2610 u32 exp_fragm_cnt = msg_sz/fragm_sz + !!(msg_sz % fragm_sz);
2611 u32 max = TIPC_MAX_USER_MSG_SIZE + LONG_H_SIZE;
2612 if (msg_type(imsg) == TIPC_MCAST_MSG)
2613 max = TIPC_MAX_USER_MSG_SIZE + MCAST_H_SIZE;
2614 if (msg_size(imsg) > max) {
2615 msg_dbg(fragm,"<REC<Oversized: ");
2616 buf_discard(fbuf);
2617 return 0;
2618 }
2619 pbuf = buf_acquire(msg_size(imsg));
2620 if (pbuf != NULL) {
2621 pbuf->next = *pending;
2622 *pending = pbuf;
2623 memcpy(pbuf->data, (unchar *)imsg, msg_data_sz(fragm));
2624
2625 /* Prepare buffer for subsequent fragments. */
2626
2627 set_long_msg_seqno(pbuf, long_msg_seq_no);
2628 set_fragm_size(pbuf,fragm_sz);
2629 set_expected_frags(pbuf,exp_fragm_cnt - 1);
2630 } else {
2631 warn("Memory squeeze; got no defragmenting buffer\n");
2632 }
2633 buf_discard(fbuf);
2634 return 0;
2635 } else if (pbuf && (msg_type(fragm) != FIRST_FRAGMENT)) {
2636 u32 dsz = msg_data_sz(fragm);
2637 u32 fsz = get_fragm_size(pbuf);
2638 u32 crs = ((msg_fragm_no(fragm) - 1) * fsz);
2639 u32 exp_frags = get_expected_frags(pbuf) - 1;
2640 memcpy(pbuf->data + crs, msg_data(fragm), dsz);
2641 buf_discard(fbuf);
2642
2643 /* Is message complete? */
2644
2645 if (exp_frags == 0) {
2646 if (prev)
2647 prev->next = pbuf->next;
2648 else
2649 *pending = pbuf->next;
2650 msg_reset_reroute_cnt(buf_msg(pbuf));
2651 *fb = pbuf;
2652 *m = buf_msg(pbuf);
2653 return 1;
2654 }
2655 set_expected_frags(pbuf,exp_frags);
2656 return 0;
2657 }
2658 dbg(" Discarding orphan fragment %x\n",fbuf);
2659 msg_dbg(fragm,"ORPHAN:");
2660 dbg("Pending long buffers:\n");
2661 dbg_print_buf_chain(*pending);
2662 buf_discard(fbuf);
2663 return 0;
2664}
2665
2666/**
2667 * link_check_defragm_bufs - flush stale incoming message fragments
2668 * @l_ptr: pointer to link
2669 */
2670
2671static void link_check_defragm_bufs(struct link *l_ptr)
2672{
2673 struct sk_buff *prev = 0;
2674 struct sk_buff *next = 0;
2675 struct sk_buff *buf = l_ptr->defragm_buf;
2676
2677 if (!buf)
2678 return;
2679 if (!link_working_working(l_ptr))
2680 return;
2681 while (buf) {
2682 u32 cnt = get_timer_cnt(buf);
2683
2684 next = buf->next;
2685 if (cnt < 4) {
2686 incr_timer_cnt(buf);
2687 prev = buf;
2688 } else {
2689 dbg(" Discarding incomplete long buffer\n");
2690 msg_dbg(buf_msg(buf), "LONG:");
2691 dbg_print_link(l_ptr, "curr:");
2692 dbg("Pending long buffers:\n");
2693 dbg_print_buf_chain(l_ptr->defragm_buf);
2694 if (prev)
2695 prev->next = buf->next;
2696 else
2697 l_ptr->defragm_buf = buf->next;
2698 buf_discard(buf);
2699 }
2700 buf = next;
2701 }
2702}
2703
2704
2705
2706static void link_set_supervision_props(struct link *l_ptr, u32 tolerance)
2707{
2708 l_ptr->tolerance = tolerance;
2709 l_ptr->continuity_interval =
2710 ((tolerance / 4) > 500) ? 500 : tolerance / 4;
2711 l_ptr->abort_limit = tolerance / (l_ptr->continuity_interval / 4);
2712}
2713
2714
2715void tipc_link_set_queue_limits(struct link *l_ptr, u32 window)
2716{
2717 /* Data messages from this node, inclusive FIRST_FRAGM */
2718 l_ptr->queue_limit[DATA_LOW] = window;
2719 l_ptr->queue_limit[DATA_MEDIUM] = (window / 3) * 4;
2720 l_ptr->queue_limit[DATA_HIGH] = (window / 3) * 5;
2721 l_ptr->queue_limit[DATA_CRITICAL] = (window / 3) * 6;
2722 /* Transiting data messages,inclusive FIRST_FRAGM */
2723 l_ptr->queue_limit[DATA_LOW + 4] = 300;
2724 l_ptr->queue_limit[DATA_MEDIUM + 4] = 600;
2725 l_ptr->queue_limit[DATA_HIGH + 4] = 900;
2726 l_ptr->queue_limit[DATA_CRITICAL + 4] = 1200;
2727 l_ptr->queue_limit[CONN_MANAGER] = 1200;
2728 l_ptr->queue_limit[ROUTE_DISTRIBUTOR] = 1200;
2729 l_ptr->queue_limit[CHANGEOVER_PROTOCOL] = 2500;
2730 l_ptr->queue_limit[NAME_DISTRIBUTOR] = 3000;
2731 /* FRAGMENT and LAST_FRAGMENT packets */
2732 l_ptr->queue_limit[MSG_FRAGMENTER] = 4000;
2733}
2734
2735/**
2736 * link_find_link - locate link by name
2737 * @name - ptr to link name string
2738 * @node - ptr to area to be filled with ptr to associated node
2739 *
2740 * Caller must hold 'tipc_net_lock' to ensure node and bearer are not deleted;
2741 * this also prevents link deletion.
2742 *
2743 * Returns pointer to link (or 0 if invalid link name).
2744 */
2745
2746static struct link *link_find_link(const char *name, struct node **node)
2747{
2748 struct link_name link_name_parts;
2749 struct bearer *b_ptr;
2750 struct link *l_ptr;
2751
2752 if (!link_name_validate(name, &link_name_parts))
2753 return 0;
2754
2755 b_ptr = tipc_bearer_find_interface(link_name_parts.if_local);
2756 if (!b_ptr)
2757 return 0;
2758
2759 *node = tipc_node_find(link_name_parts.addr_peer);
2760 if (!*node)
2761 return 0;
2762
2763 l_ptr = (*node)->links[b_ptr->identity];
2764 if (!l_ptr || strcmp(l_ptr->name, name))
2765 return 0;
2766
2767 return l_ptr;
2768}
2769
2770struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space,
2771 u16 cmd)
2772{
2773 struct tipc_link_config *args;
2774 u32 new_value;
2775 struct link *l_ptr;
2776 struct node *node;
2777 int res;
2778
2779 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_CONFIG))
2780 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2781
2782 args = (struct tipc_link_config *)TLV_DATA(req_tlv_area);
2783 new_value = ntohl(args->value);
2784
2785 if (!strcmp(args->name, tipc_bclink_name)) {
2786 if ((cmd == TIPC_CMD_SET_LINK_WINDOW) &&
2787 (tipc_bclink_set_queue_limits(new_value) == 0))
2788 return tipc_cfg_reply_none();
2789 return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
2790 " (cannot change setting on broadcast link)");
2791 }
2792
2793 read_lock_bh(&tipc_net_lock);
2794 l_ptr = link_find_link(args->name, &node);
2795 if (!l_ptr) {
2796 read_unlock_bh(&tipc_net_lock);
2797 return tipc_cfg_reply_error_string("link not found");
2798 }
2799
2800 tipc_node_lock(node);
2801 res = -EINVAL;
2802 switch (cmd) {
2803 case TIPC_CMD_SET_LINK_TOL:
2804 if ((new_value >= TIPC_MIN_LINK_TOL) &&
2805 (new_value <= TIPC_MAX_LINK_TOL)) {
2806 link_set_supervision_props(l_ptr, new_value);
2807 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2808 0, 0, new_value, 0, 0);
2809 res = TIPC_OK;
2810 }
2811 break;
2812 case TIPC_CMD_SET_LINK_PRI:
2813 if ((new_value >= TIPC_MIN_LINK_PRI) &&
2814 (new_value <= TIPC_MAX_LINK_PRI)) {
2815 l_ptr->priority = new_value;
2816 tipc_link_send_proto_msg(l_ptr, STATE_MSG,
2817 0, 0, 0, new_value, 0);
2818 res = TIPC_OK;
2819 }
2820 break;
2821 case TIPC_CMD_SET_LINK_WINDOW:
2822 if ((new_value >= TIPC_MIN_LINK_WIN) &&
2823 (new_value <= TIPC_MAX_LINK_WIN)) {
2824 tipc_link_set_queue_limits(l_ptr, new_value);
2825 res = TIPC_OK;
2826 }
2827 break;
2828 }
2829 tipc_node_unlock(node);
2830
2831 read_unlock_bh(&tipc_net_lock);
2832 if (res)
2833 return tipc_cfg_reply_error_string("cannot change link setting");
2834
2835 return tipc_cfg_reply_none();
2836}
2837
2838/**
2839 * link_reset_statistics - reset link statistics
2840 * @l_ptr: pointer to link
2841 */
2842
2843static void link_reset_statistics(struct link *l_ptr)
2844{
2845 memset(&l_ptr->stats, 0, sizeof(l_ptr->stats));
2846 l_ptr->stats.sent_info = l_ptr->next_out_no;
2847 l_ptr->stats.recv_info = l_ptr->next_in_no;
2848}
2849
2850struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space)
2851{
2852 char *link_name;
2853 struct link *l_ptr;
2854 struct node *node;
2855
2856 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2857 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2858
2859 link_name = (char *)TLV_DATA(req_tlv_area);
2860 if (!strcmp(link_name, tipc_bclink_name)) {
2861 if (tipc_bclink_reset_stats())
2862 return tipc_cfg_reply_error_string("link not found");
2863 return tipc_cfg_reply_none();
2864 }
2865
2866 read_lock_bh(&tipc_net_lock);
2867 l_ptr = link_find_link(link_name, &node);
2868 if (!l_ptr) {
2869 read_unlock_bh(&tipc_net_lock);
2870 return tipc_cfg_reply_error_string("link not found");
2871 }
2872
2873 tipc_node_lock(node);
2874 link_reset_statistics(l_ptr);
2875 tipc_node_unlock(node);
2876 read_unlock_bh(&tipc_net_lock);
2877 return tipc_cfg_reply_none();
2878}
2879
2880/**
2881 * percent - convert count to a percentage of total (rounding up or down)
2882 */
2883
2884static u32 percent(u32 count, u32 total)
2885{
2886 return (count * 100 + (total / 2)) / total;
2887}
2888
2889/**
2890 * tipc_link_stats - print link statistics
2891 * @name: link name
2892 * @buf: print buffer area
2893 * @buf_size: size of print buffer area
2894 *
2895 * Returns length of print buffer data string (or 0 if error)
2896 */
2897
2898static int tipc_link_stats(const char *name, char *buf, const u32 buf_size)
2899{
2900 struct print_buf pb;
2901 struct link *l_ptr;
2902 struct node *node;
2903 char *status;
2904 u32 profile_total = 0;
2905
2906 if (!strcmp(name, tipc_bclink_name))
2907 return tipc_bclink_stats(buf, buf_size);
2908
2909 tipc_printbuf_init(&pb, buf, buf_size);
2910
2911 read_lock_bh(&tipc_net_lock);
2912 l_ptr = link_find_link(name, &node);
2913 if (!l_ptr) {
2914 read_unlock_bh(&tipc_net_lock);
2915 return 0;
2916 }
2917 tipc_node_lock(node);
2918
2919 if (tipc_link_is_active(l_ptr))
2920 status = "ACTIVE";
2921 else if (tipc_link_is_up(l_ptr))
2922 status = "STANDBY";
2923 else
2924 status = "DEFUNCT";
2925 tipc_printf(&pb, "Link <%s>\n"
2926 " %s MTU:%u Priority:%u Tolerance:%u ms"
2927 " Window:%u packets\n",
2928 l_ptr->name, status, link_max_pkt(l_ptr),
2929 l_ptr->priority, l_ptr->tolerance, l_ptr->queue_limit[0]);
2930 tipc_printf(&pb, " RX packets:%u fragments:%u/%u bundles:%u/%u\n",
2931 l_ptr->next_in_no - l_ptr->stats.recv_info,
2932 l_ptr->stats.recv_fragments,
2933 l_ptr->stats.recv_fragmented,
2934 l_ptr->stats.recv_bundles,
2935 l_ptr->stats.recv_bundled);
2936 tipc_printf(&pb, " TX packets:%u fragments:%u/%u bundles:%u/%u\n",
2937 l_ptr->next_out_no - l_ptr->stats.sent_info,
2938 l_ptr->stats.sent_fragments,
2939 l_ptr->stats.sent_fragmented,
2940 l_ptr->stats.sent_bundles,
2941 l_ptr->stats.sent_bundled);
2942 profile_total = l_ptr->stats.msg_length_counts;
2943 if (!profile_total)
2944 profile_total = 1;
2945 tipc_printf(&pb, " TX profile sample:%u packets average:%u octets\n"
2946 " 0-64:%u%% -256:%u%% -1024:%u%% -4096:%u%% "
2947 "-16354:%u%% -32768:%u%% -66000:%u%%\n",
2948 l_ptr->stats.msg_length_counts,
2949 l_ptr->stats.msg_lengths_total / profile_total,
2950 percent(l_ptr->stats.msg_length_profile[0], profile_total),
2951 percent(l_ptr->stats.msg_length_profile[1], profile_total),
2952 percent(l_ptr->stats.msg_length_profile[2], profile_total),
2953 percent(l_ptr->stats.msg_length_profile[3], profile_total),
2954 percent(l_ptr->stats.msg_length_profile[4], profile_total),
2955 percent(l_ptr->stats.msg_length_profile[5], profile_total),
2956 percent(l_ptr->stats.msg_length_profile[6], profile_total));
2957 tipc_printf(&pb, " RX states:%u probes:%u naks:%u defs:%u dups:%u\n",
2958 l_ptr->stats.recv_states,
2959 l_ptr->stats.recv_probes,
2960 l_ptr->stats.recv_nacks,
2961 l_ptr->stats.deferred_recv,
2962 l_ptr->stats.duplicates);
2963 tipc_printf(&pb, " TX states:%u probes:%u naks:%u acks:%u dups:%u\n",
2964 l_ptr->stats.sent_states,
2965 l_ptr->stats.sent_probes,
2966 l_ptr->stats.sent_nacks,
2967 l_ptr->stats.sent_acks,
2968 l_ptr->stats.retransmitted);
2969 tipc_printf(&pb, " Congestion bearer:%u link:%u Send queue max:%u avg:%u\n",
2970 l_ptr->stats.bearer_congs,
2971 l_ptr->stats.link_congs,
2972 l_ptr->stats.max_queue_sz,
2973 l_ptr->stats.queue_sz_counts
2974 ? (l_ptr->stats.accu_queue_sz / l_ptr->stats.queue_sz_counts)
2975 : 0);
2976
2977 tipc_node_unlock(node);
2978 read_unlock_bh(&tipc_net_lock);
2979 return tipc_printbuf_validate(&pb);
2980}
2981
2982#define MAX_LINK_STATS_INFO 2000
2983
2984struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space)
2985{
2986 struct sk_buff *buf;
2987 struct tlv_desc *rep_tlv;
2988 int str_len;
2989
2990 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_LINK_NAME))
2991 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
2992
2993 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_LINK_STATS_INFO));
2994 if (!buf)
2995 return NULL;
2996
2997 rep_tlv = (struct tlv_desc *)buf->data;
2998
2999 str_len = tipc_link_stats((char *)TLV_DATA(req_tlv_area),
3000 (char *)TLV_DATA(rep_tlv), MAX_LINK_STATS_INFO);
3001 if (!str_len) {
3002 buf_discard(buf);
3003 return tipc_cfg_reply_error_string("link not found");
3004 }
3005
3006 skb_put(buf, TLV_SPACE(str_len));
3007 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
3008
3009 return buf;
3010}
3011
3012#if 0
3013int link_control(const char *name, u32 op, u32 val)
3014{
3015 int res = -EINVAL;
3016 struct link *l_ptr;
3017 u32 bearer_id;
3018 struct node * node;
3019 u32 a;
3020
3021 a = link_name2addr(name, &bearer_id);
3022 read_lock_bh(&tipc_net_lock);
3023 node = tipc_node_find(a);
3024 if (node) {
3025 tipc_node_lock(node);
3026 l_ptr = node->links[bearer_id];
3027 if (l_ptr) {
3028 if (op == TIPC_REMOVE_LINK) {
3029 struct bearer *b_ptr = l_ptr->b_ptr;
3030 spin_lock_bh(&b_ptr->publ.lock);
3031 tipc_link_delete(l_ptr);
3032 spin_unlock_bh(&b_ptr->publ.lock);
3033 }
3034 if (op == TIPC_CMD_BLOCK_LINK) {
3035 tipc_link_reset(l_ptr);
3036 l_ptr->blocked = 1;
3037 }
3038 if (op == TIPC_CMD_UNBLOCK_LINK) {
3039 l_ptr->blocked = 0;
3040 }
3041 res = TIPC_OK;
3042 }
3043 tipc_node_unlock(node);
3044 }
3045 read_unlock_bh(&tipc_net_lock);
3046 return res;
3047}
3048#endif
3049
3050/**
3051 * tipc_link_get_max_pkt - get maximum packet size to use when sending to destination
3052 * @dest: network address of destination node
3053 * @selector: used to select from set of active links
3054 *
3055 * If no active link can be found, uses default maximum packet size.
3056 */
3057
3058u32 tipc_link_get_max_pkt(u32 dest, u32 selector)
3059{
3060 struct node *n_ptr;
3061 struct link *l_ptr;
3062 u32 res = MAX_PKT_DEFAULT;
3063
3064 if (dest == tipc_own_addr)
3065 return MAX_MSG_SIZE;
3066
3067 read_lock_bh(&tipc_net_lock);
3068 n_ptr = tipc_node_select(dest, selector);
3069 if (n_ptr) {
3070 tipc_node_lock(n_ptr);
3071 l_ptr = n_ptr->active_links[selector & 1];
3072 if (l_ptr)
3073 res = link_max_pkt(l_ptr);
3074 tipc_node_unlock(n_ptr);
3075 }
3076 read_unlock_bh(&tipc_net_lock);
3077 return res;
3078}
3079
3080#if 0
3081static void link_dump_rec_queue(struct link *l_ptr)
3082{
3083 struct sk_buff *crs;
3084
3085 if (!l_ptr->oldest_deferred_in) {
3086 info("Reception queue empty\n");
3087 return;
3088 }
3089 info("Contents of Reception queue:\n");
3090 crs = l_ptr->oldest_deferred_in;
3091 while (crs) {
3092 if (crs->data == (void *)0x0000a3a3) {
3093 info("buffer %x invalid\n", crs);
3094 return;
3095 }
3096 msg_dbg(buf_msg(crs), "In rec queue: \n");
3097 crs = crs->next;
3098 }
3099}
3100#endif
3101
3102static void link_dump_send_queue(struct link *l_ptr)
3103{
3104 if (l_ptr->next_out) {
3105 info("\nContents of unsent queue:\n");
3106 dbg_print_buf_chain(l_ptr->next_out);
3107 }
3108 info("\nContents of send queue:\n");
3109 if (l_ptr->first_out) {
3110 dbg_print_buf_chain(l_ptr->first_out);
3111 }
3112 info("Empty send queue\n");
3113}
3114
3115static void link_print(struct link *l_ptr, struct print_buf *buf,
3116 const char *str)
3117{
3118 tipc_printf(buf, str);
3119 if (link_reset_reset(l_ptr) || link_reset_unknown(l_ptr))
3120 return;
3121 tipc_printf(buf, "Link %x<%s>:",
3122 l_ptr->addr, l_ptr->b_ptr->publ.name);
3123 tipc_printf(buf, ": NXO(%u):", mod(l_ptr->next_out_no));
3124 tipc_printf(buf, "NXI(%u):", mod(l_ptr->next_in_no));
3125 tipc_printf(buf, "SQUE");
3126 if (l_ptr->first_out) {
3127 tipc_printf(buf, "[%u..", msg_seqno(buf_msg(l_ptr->first_out)));
3128 if (l_ptr->next_out)
3129 tipc_printf(buf, "%u..",
3130 msg_seqno(buf_msg(l_ptr->next_out)));
3131 tipc_printf(buf, "%u]",
3132 msg_seqno(buf_msg
3133 (l_ptr->last_out)), l_ptr->out_queue_size);
3134 if ((mod(msg_seqno(buf_msg(l_ptr->last_out)) -
3135 msg_seqno(buf_msg(l_ptr->first_out)))
3136 != (l_ptr->out_queue_size - 1))
3137 || (l_ptr->last_out->next != 0)) {
3138 tipc_printf(buf, "\nSend queue inconsistency\n");
3139 tipc_printf(buf, "first_out= %x ", l_ptr->first_out);
3140 tipc_printf(buf, "next_out= %x ", l_ptr->next_out);
3141 tipc_printf(buf, "last_out= %x ", l_ptr->last_out);
3142 link_dump_send_queue(l_ptr);
3143 }
3144 } else
3145 tipc_printf(buf, "[]");
3146 tipc_printf(buf, "SQSIZ(%u)", l_ptr->out_queue_size);
3147 if (l_ptr->oldest_deferred_in) {
3148 u32 o = msg_seqno(buf_msg(l_ptr->oldest_deferred_in));
3149 u32 n = msg_seqno(buf_msg(l_ptr->newest_deferred_in));
3150 tipc_printf(buf, ":RQUE[%u..%u]", o, n);
3151 if (l_ptr->deferred_inqueue_sz != mod((n + 1) - o)) {
3152 tipc_printf(buf, ":RQSIZ(%u)",
3153 l_ptr->deferred_inqueue_sz);
3154 }
3155 }
3156 if (link_working_unknown(l_ptr))
3157 tipc_printf(buf, ":WU");
3158 if (link_reset_reset(l_ptr))
3159 tipc_printf(buf, ":RR");
3160 if (link_reset_unknown(l_ptr))
3161 tipc_printf(buf, ":RU");
3162 if (link_working_working(l_ptr))
3163 tipc_printf(buf, ":WW");
3164 tipc_printf(buf, "\n");
3165}
3166
diff --git a/net/tipc/link.h b/net/tipc/link.h
new file mode 100644
index 000000000000..2d3c157f707d
--- /dev/null
+++ b/net/tipc/link.h
@@ -0,0 +1,295 @@
1/*
2 * net/tipc/link.h: Include file for TIPC link code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_LINK_H
38#define _TIPC_LINK_H
39
40#include "dbg.h"
41#include "msg.h"
42#include "bearer.h"
43#include "node.h"
44
45#define PUSH_FAILED 1
46#define PUSH_FINISHED 2
47
48/*
49 * Link states
50 */
51
52#define WORKING_WORKING 560810u
53#define WORKING_UNKNOWN 560811u
54#define RESET_UNKNOWN 560812u
55#define RESET_RESET 560813u
56
57/*
58 * Starting value for maximum packet size negotiation on unicast links
59 * (unless bearer MTU is less)
60 */
61
62#define MAX_PKT_DEFAULT 1500
63
64/**
65 * struct link - TIPC link data structure
66 * @addr: network address of link's peer node
67 * @name: link name character string
68 * @media_addr: media address to use when sending messages over link
69 * @timer: link timer
70 * @owner: pointer to peer node
71 * @link_list: adjacent links in bearer's list of links
72 * @started: indicates if link has been started
73 * @checkpoint: reference point for triggering link continuity checking
74 * @peer_session: link session # being used by peer end of link
75 * @peer_bearer_id: bearer id used by link's peer endpoint
76 * @b_ptr: pointer to bearer used by link
77 * @tolerance: minimum link continuity loss needed to reset link [in ms]
78 * @continuity_interval: link continuity testing interval [in ms]
79 * @abort_limit: # of unacknowledged continuity probes needed to reset link
80 * @state: current state of link FSM
81 * @blocked: indicates if link has been administratively blocked
82 * @fsm_msg_cnt: # of protocol messages link FSM has sent in current state
83 * @proto_msg: template for control messages generated by link
84 * @pmsg: convenience pointer to "proto_msg" field
85 * @priority: current link priority
86 * @queue_limit: outbound message queue congestion thresholds (indexed by user)
87 * @exp_msg_count: # of tunnelled messages expected during link changeover
88 * @reset_checkpoint: seq # of last acknowledged message at time of link reset
89 * @max_pkt: current maximum packet size for this link
90 * @max_pkt_target: desired maximum packet size for this link
91 * @max_pkt_probes: # of probes based on current (max_pkt, max_pkt_target)
92 * @out_queue_size: # of messages in outbound message queue
93 * @first_out: ptr to first outbound message in queue
94 * @last_out: ptr to last outbound message in queue
95 * @next_out_no: next sequence number to use for outbound messages
96 * @last_retransmitted: sequence number of most recently retransmitted message
97 * @stale_count: # of identical retransmit requests made by peer
98 * @next_in_no: next sequence number to expect for inbound messages
99 * @deferred_inqueue_sz: # of messages in inbound message queue
100 * @oldest_deferred_in: ptr to first inbound message in queue
101 * @newest_deferred_in: ptr to last inbound message in queue
102 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
103 * @proto_msg_queue: ptr to (single) outbound control message
104 * @retransm_queue_size: number of messages to retransmit
105 * @retransm_queue_head: sequence number of first message to retransmit
106 * @next_out: ptr to first unsent outbound message in queue
107 * @waiting_ports: linked list of ports waiting for link congestion to abate
108 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
109 * @defragm_buf: list of partially reassembled inbound message fragments
110 * @stats: collects statistics regarding link activity
111 * @print_buf: print buffer used to log link activity
112 */
113
114struct link {
115 u32 addr;
116 char name[TIPC_MAX_LINK_NAME];
117 struct tipc_media_addr media_addr;
118 struct timer_list timer;
119 struct node *owner;
120 struct list_head link_list;
121
122 /* Management and link supervision data */
123 int started;
124 u32 checkpoint;
125 u32 peer_session;
126 u32 peer_bearer_id;
127 struct bearer *b_ptr;
128 u32 tolerance;
129 u32 continuity_interval;
130 u32 abort_limit;
131 int state;
132 int blocked;
133 u32 fsm_msg_cnt;
134 struct {
135 unchar hdr[INT_H_SIZE];
136 unchar body[TIPC_MAX_IF_NAME];
137 } proto_msg;
138 struct tipc_msg *pmsg;
139 u32 priority;
140 u32 queue_limit[15]; /* queue_limit[0]==window limit */
141
142 /* Changeover */
143 u32 exp_msg_count;
144 u32 reset_checkpoint;
145
146 /* Max packet negotiation */
147 u32 max_pkt;
148 u32 max_pkt_target;
149 u32 max_pkt_probes;
150
151 /* Sending */
152 u32 out_queue_size;
153 struct sk_buff *first_out;
154 struct sk_buff *last_out;
155 u32 next_out_no;
156 u32 last_retransmitted;
157 u32 stale_count;
158
159 /* Reception */
160 u32 next_in_no;
161 u32 deferred_inqueue_sz;
162 struct sk_buff *oldest_deferred_in;
163 struct sk_buff *newest_deferred_in;
164 u32 unacked_window;
165
166 /* Congestion handling */
167 struct sk_buff *proto_msg_queue;
168 u32 retransm_queue_size;
169 u32 retransm_queue_head;
170 struct sk_buff *next_out;
171 struct list_head waiting_ports;
172
173 /* Fragmentation/defragmentation */
174 u32 long_msg_seq_no;
175 struct sk_buff *defragm_buf;
176
177 /* Statistics */
178 struct {
179 u32 sent_info; /* used in counting # sent packets */
180 u32 recv_info; /* used in counting # recv'd packets */
181 u32 sent_states;
182 u32 recv_states;
183 u32 sent_probes;
184 u32 recv_probes;
185 u32 sent_nacks;
186 u32 recv_nacks;
187 u32 sent_acks;
188 u32 sent_bundled;
189 u32 sent_bundles;
190 u32 recv_bundled;
191 u32 recv_bundles;
192 u32 retransmitted;
193 u32 sent_fragmented;
194 u32 sent_fragments;
195 u32 recv_fragmented;
196 u32 recv_fragments;
197 u32 link_congs; /* # port sends blocked by congestion */
198 u32 bearer_congs;
199 u32 deferred_recv;
200 u32 duplicates;
201
202 /* for statistical profiling of send queue size */
203
204 u32 max_queue_sz;
205 u32 accu_queue_sz;
206 u32 queue_sz_counts;
207
208 /* for statistical profiling of message lengths */
209
210 u32 msg_length_counts;
211 u32 msg_lengths_total;
212 u32 msg_length_profile[7];
213#if 0
214 u32 sent_tunneled;
215 u32 recv_tunneled;
216#endif
217 } stats;
218
219 struct print_buf print_buf;
220};
221
222struct port;
223
224struct link *tipc_link_create(struct bearer *b_ptr, const u32 peer,
225 const struct tipc_media_addr *media_addr);
226void tipc_link_delete(struct link *l_ptr);
227void tipc_link_changeover(struct link *l_ptr);
228void tipc_link_send_duplicate(struct link *l_ptr, struct link *dest);
229void tipc_link_reset_fragments(struct link *l_ptr);
230int tipc_link_is_up(struct link *l_ptr);
231int tipc_link_is_active(struct link *l_ptr);
232void tipc_link_start(struct link *l_ptr);
233u32 tipc_link_push_packet(struct link *l_ptr);
234void tipc_link_stop(struct link *l_ptr);
235struct sk_buff *tipc_link_cmd_config(const void *req_tlv_area, int req_tlv_space, u16 cmd);
236struct sk_buff *tipc_link_cmd_show_stats(const void *req_tlv_area, int req_tlv_space);
237struct sk_buff *tipc_link_cmd_reset_stats(const void *req_tlv_area, int req_tlv_space);
238void tipc_link_reset(struct link *l_ptr);
239int tipc_link_send(struct sk_buff *buf, u32 dest, u32 selector);
240int tipc_link_send_buf(struct link *l_ptr, struct sk_buff *buf);
241u32 tipc_link_get_max_pkt(u32 dest,u32 selector);
242int tipc_link_send_sections_fast(struct port* sender,
243 struct iovec const *msg_sect,
244 const u32 num_sect,
245 u32 destnode);
246int tipc_link_send_long_buf(struct link *l_ptr, struct sk_buff *buf);
247void tipc_link_tunnel(struct link *l_ptr, struct tipc_msg *tnl_hdr,
248 struct tipc_msg *msg, u32 selector);
249void tipc_link_recv_bundle(struct sk_buff *buf);
250int tipc_link_recv_fragment(struct sk_buff **pending,
251 struct sk_buff **fb,
252 struct tipc_msg **msg);
253void tipc_link_send_proto_msg(struct link *l_ptr, u32 msg_typ, int prob, u32 gap,
254 u32 tolerance, u32 priority, u32 acked_mtu);
255void tipc_link_push_queue(struct link *l_ptr);
256u32 tipc_link_defer_pkt(struct sk_buff **head, struct sk_buff **tail,
257 struct sk_buff *buf);
258void tipc_link_wakeup_ports(struct link *l_ptr, int all);
259void tipc_link_set_queue_limits(struct link *l_ptr, u32 window);
260void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *start, u32 retransmits);
261
262/*
263 * Link sequence number manipulation routines (uses modulo 2**16 arithmetic)
264 */
265
266static inline u32 mod(u32 x)
267{
268 return x & 0xffffu;
269}
270
271static inline int between(u32 lower, u32 upper, u32 n)
272{
273 if ((lower < n) && (n < upper))
274 return 1;
275 if ((upper < lower) && ((n > lower) || (n < upper)))
276 return 1;
277 return 0;
278}
279
280static inline int less_eq(u32 left, u32 right)
281{
282 return (mod(right - left) < 32768u);
283}
284
285static inline int less(u32 left, u32 right)
286{
287 return (less_eq(left, right) && (mod(right) != mod(left)));
288}
289
290static inline u32 lesser(u32 left, u32 right)
291{
292 return less_eq(left, right) ? left : right;
293}
294
295#endif
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
new file mode 100644
index 000000000000..3bd345a344e5
--- /dev/null
+++ b/net/tipc/msg.c
@@ -0,0 +1,323 @@
1/*
2 * net/tipc/msg.c: TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "addr.h"
39#include "dbg.h"
40#include "msg.h"
41#include "bearer.h"
42
43
44void tipc_msg_print(struct print_buf *buf, struct tipc_msg *msg, const char *str)
45{
46 u32 usr = msg_user(msg);
47 tipc_printf(buf, str);
48
49 switch (usr) {
50 case MSG_BUNDLER:
51 tipc_printf(buf, "BNDL::");
52 tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg));
53 break;
54 case BCAST_PROTOCOL:
55 tipc_printf(buf, "BCASTP::");
56 break;
57 case MSG_FRAGMENTER:
58 tipc_printf(buf, "FRAGM::");
59 switch (msg_type(msg)) {
60 case FIRST_FRAGMENT:
61 tipc_printf(buf, "FIRST:");
62 break;
63 case FRAGMENT:
64 tipc_printf(buf, "BODY:");
65 break;
66 case LAST_FRAGMENT:
67 tipc_printf(buf, "LAST:");
68 break;
69 default:
70 tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
71
72 }
73 tipc_printf(buf, "NO(%u/%u):",msg_long_msgno(msg),
74 msg_fragm_no(msg));
75 break;
76 case DATA_LOW:
77 case DATA_MEDIUM:
78 case DATA_HIGH:
79 case DATA_CRITICAL:
80 tipc_printf(buf, "DAT%u:", msg_user(msg));
81 if (msg_short(msg)) {
82 tipc_printf(buf, "CON:");
83 break;
84 }
85 switch (msg_type(msg)) {
86 case TIPC_CONN_MSG:
87 tipc_printf(buf, "CON:");
88 break;
89 case TIPC_MCAST_MSG:
90 tipc_printf(buf, "MCST:");
91 break;
92 case TIPC_NAMED_MSG:
93 tipc_printf(buf, "NAM:");
94 break;
95 case TIPC_DIRECT_MSG:
96 tipc_printf(buf, "DIR:");
97 break;
98 default:
99 tipc_printf(buf, "UNKNOWN TYPE %u",msg_type(msg));
100 }
101 if (msg_routed(msg) && !msg_non_seq(msg))
102 tipc_printf(buf, "ROUT:");
103 if (msg_reroute_cnt(msg))
104 tipc_printf(buf, "REROUTED(%u):",
105 msg_reroute_cnt(msg));
106 break;
107 case NAME_DISTRIBUTOR:
108 tipc_printf(buf, "NMD::");
109 switch (msg_type(msg)) {
110 case PUBLICATION:
111 tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */
112 break;
113 case WITHDRAWAL:
114 tipc_printf(buf, "WDRW:");
115 break;
116 default:
117 tipc_printf(buf, "UNKNOWN:%x",msg_type(msg));
118 }
119 if (msg_routed(msg))
120 tipc_printf(buf, "ROUT:");
121 if (msg_reroute_cnt(msg))
122 tipc_printf(buf, "REROUTED(%u):",
123 msg_reroute_cnt(msg));
124 break;
125 case CONN_MANAGER:
126 tipc_printf(buf, "CONN_MNG:");
127 switch (msg_type(msg)) {
128 case CONN_PROBE:
129 tipc_printf(buf, "PROBE:");
130 break;
131 case CONN_PROBE_REPLY:
132 tipc_printf(buf, "PROBE_REPLY:");
133 break;
134 case CONN_ACK:
135 tipc_printf(buf, "CONN_ACK:");
136 tipc_printf(buf, "ACK(%u):",msg_msgcnt(msg));
137 break;
138 default:
139 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
140 }
141 if (msg_routed(msg))
142 tipc_printf(buf, "ROUT:");
143 if (msg_reroute_cnt(msg))
144 tipc_printf(buf, "REROUTED(%u):",msg_reroute_cnt(msg));
145 break;
146 case LINK_PROTOCOL:
147 tipc_printf(buf, "PROT:TIM(%u):",msg_timestamp(msg));
148 switch (msg_type(msg)) {
149 case STATE_MSG:
150 tipc_printf(buf, "STATE:");
151 tipc_printf(buf, "%s:",msg_probe(msg) ? "PRB" :"");
152 tipc_printf(buf, "NXS(%u):",msg_next_sent(msg));
153 tipc_printf(buf, "GAP(%u):",msg_seq_gap(msg));
154 tipc_printf(buf, "LSTBC(%u):",msg_last_bcast(msg));
155 break;
156 case RESET_MSG:
157 tipc_printf(buf, "RESET:");
158 if (msg_size(msg) != msg_hdr_sz(msg))
159 tipc_printf(buf, "BEAR:%s:",msg_data(msg));
160 break;
161 case ACTIVATE_MSG:
162 tipc_printf(buf, "ACTIVATE:");
163 break;
164 default:
165 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
166 }
167 tipc_printf(buf, "PLANE(%c):",msg_net_plane(msg));
168 tipc_printf(buf, "SESS(%u):",msg_session(msg));
169 break;
170 case CHANGEOVER_PROTOCOL:
171 tipc_printf(buf, "TUNL:");
172 switch (msg_type(msg)) {
173 case DUPLICATE_MSG:
174 tipc_printf(buf, "DUPL:");
175 break;
176 case ORIGINAL_MSG:
177 tipc_printf(buf, "ORIG:");
178 tipc_printf(buf, "EXP(%u)",msg_msgcnt(msg));
179 break;
180 default:
181 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
182 }
183 break;
184 case ROUTE_DISTRIBUTOR:
185 tipc_printf(buf, "ROUTING_MNG:");
186 switch (msg_type(msg)) {
187 case EXT_ROUTING_TABLE:
188 tipc_printf(buf, "EXT_TBL:");
189 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
190 break;
191 case LOCAL_ROUTING_TABLE:
192 tipc_printf(buf, "LOCAL_TBL:");
193 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
194 break;
195 case SLAVE_ROUTING_TABLE:
196 tipc_printf(buf, "DP_TBL:");
197 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
198 break;
199 case ROUTE_ADDITION:
200 tipc_printf(buf, "ADD:");
201 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
202 break;
203 case ROUTE_REMOVAL:
204 tipc_printf(buf, "REMOVE:");
205 tipc_printf(buf, "TO:%x:",msg_remote_node(msg));
206 break;
207 default:
208 tipc_printf(buf, "UNKNOWN TYPE:%x",msg_type(msg));
209 }
210 break;
211 case LINK_CONFIG:
212 tipc_printf(buf, "CFG:");
213 switch (msg_type(msg)) {
214 case DSC_REQ_MSG:
215 tipc_printf(buf, "DSC_REQ:");
216 break;
217 case DSC_RESP_MSG:
218 tipc_printf(buf, "DSC_RESP:");
219 break;
220 default:
221 tipc_printf(buf, "UNKNOWN TYPE:%x:",msg_type(msg));
222 break;
223 }
224 break;
225 default:
226 tipc_printf(buf, "UNKNOWN USER:");
227 }
228
229 switch (usr) {
230 case CONN_MANAGER:
231 case NAME_DISTRIBUTOR:
232 case DATA_LOW:
233 case DATA_MEDIUM:
234 case DATA_HIGH:
235 case DATA_CRITICAL:
236 if (msg_short(msg))
237 break; /* No error */
238 switch (msg_errcode(msg)) {
239 case TIPC_OK:
240 break;
241 case TIPC_ERR_NO_NAME:
242 tipc_printf(buf, "NO_NAME:");
243 break;
244 case TIPC_ERR_NO_PORT:
245 tipc_printf(buf, "NO_PORT:");
246 break;
247 case TIPC_ERR_NO_NODE:
248 tipc_printf(buf, "NO_PROC:");
249 break;
250 case TIPC_ERR_OVERLOAD:
251 tipc_printf(buf, "OVERLOAD:");
252 break;
253 case TIPC_CONN_SHUTDOWN:
254 tipc_printf(buf, "SHUTDOWN:");
255 break;
256 default:
257 tipc_printf(buf, "UNKNOWN ERROR(%x):",
258 msg_errcode(msg));
259 }
260 default:{}
261 }
262
263 tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg));
264 tipc_printf(buf, "SZ(%u):", msg_size(msg));
265 tipc_printf(buf, "SQNO(%u):", msg_seqno(msg));
266
267 if (msg_non_seq(msg))
268 tipc_printf(buf, "NOSEQ:");
269 else {
270 tipc_printf(buf, "ACK(%u):", msg_ack(msg));
271 }
272 tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg));
273 tipc_printf(buf, "PRND(%x)", msg_prevnode(msg));
274
275 if (msg_isdata(msg)) {
276 if (msg_named(msg)) {
277 tipc_printf(buf, "NTYP(%u):", msg_nametype(msg));
278 tipc_printf(buf, "NINST(%u)", msg_nameinst(msg));
279 }
280 }
281
282 if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) &&
283 (usr != MSG_BUNDLER)) {
284 if (!msg_short(msg)) {
285 tipc_printf(buf, ":ORIG(%x:%u):",
286 msg_orignode(msg), msg_origport(msg));
287 tipc_printf(buf, ":DEST(%x:%u):",
288 msg_destnode(msg), msg_destport(msg));
289 } else {
290 tipc_printf(buf, ":OPRT(%u):", msg_origport(msg));
291 tipc_printf(buf, ":DPRT(%u):", msg_destport(msg));
292 }
293 if (msg_routed(msg) && !msg_non_seq(msg))
294 tipc_printf(buf, ":TSEQN(%u)", msg_transp_seqno(msg));
295 }
296 if (msg_user(msg) == NAME_DISTRIBUTOR) {
297 tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg));
298 tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg));
299 if (msg_routed(msg)) {
300 tipc_printf(buf, ":CSEQN(%u)", msg_transp_seqno(msg));
301 }
302 }
303
304 if (msg_user(msg) == LINK_CONFIG) {
305 u32* raw = (u32*)msg;
306 struct tipc_media_addr* orig = (struct tipc_media_addr*)&raw[5];
307 tipc_printf(buf, ":REQL(%u):", msg_req_links(msg));
308 tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg));
309 tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg));
310 tipc_media_addr_printf(buf, orig);
311 }
312 if (msg_user(msg) == BCAST_PROTOCOL) {
313 tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg));
314 tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg));
315 }
316 tipc_printf(buf, "\n");
317 if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) {
318 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
319 }
320 if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) {
321 tipc_msg_print(buf,msg_get_wrapped(msg)," /");
322 }
323}
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
new file mode 100644
index 000000000000..6699aaf7bd4c
--- /dev/null
+++ b/net/tipc/msg.h
@@ -0,0 +1,818 @@
1/*
2 * net/tipc/msg.h: Include file for TIPC message header routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_MSG_H
38#define _TIPC_MSG_H
39
40#include "core.h"
41
42#define TIPC_VERSION 2
43#define DATA_LOW TIPC_LOW_IMPORTANCE
44#define DATA_MEDIUM TIPC_MEDIUM_IMPORTANCE
45#define DATA_HIGH TIPC_HIGH_IMPORTANCE
46#define DATA_CRITICAL TIPC_CRITICAL_IMPORTANCE
47#define SHORT_H_SIZE 24 /* Connected,in cluster */
48#define DIR_MSG_H_SIZE 32 /* Directly addressed messages */
49#define CONN_MSG_H_SIZE 36 /* Routed connected msgs*/
50#define LONG_H_SIZE 40 /* Named Messages */
51#define MCAST_H_SIZE 44 /* Multicast messages */
52#define MAX_H_SIZE 60 /* Inclusive full options */
53#define MAX_MSG_SIZE (MAX_H_SIZE + TIPC_MAX_USER_MSG_SIZE)
54#define LINK_CONFIG 13
55
56
57/*
58 TIPC user data message header format, version 2
59
60 - Fundamental definitions available to privileged TIPC users
61 are located in tipc_msg.h.
62 - Remaining definitions available to TIPC internal users appear below.
63*/
64
65
66static inline void msg_set_word(struct tipc_msg *m, u32 w, u32 val)
67{
68 m->hdr[w] = htonl(val);
69}
70
71static inline void msg_set_bits(struct tipc_msg *m, u32 w,
72 u32 pos, u32 mask, u32 val)
73{
74 u32 word = msg_word(m,w) & ~(mask << pos);
75 msg_set_word(m, w, (word |= (val << pos)));
76}
77
78/*
79 * Word 0
80 */
81
82static inline u32 msg_version(struct tipc_msg *m)
83{
84 return msg_bits(m, 0, 29, 7);
85}
86
87static inline void msg_set_version(struct tipc_msg *m)
88{
89 msg_set_bits(m, 0, 29, 0xf, TIPC_VERSION);
90}
91
92static inline u32 msg_user(struct tipc_msg *m)
93{
94 return msg_bits(m, 0, 25, 0xf);
95}
96
97static inline u32 msg_isdata(struct tipc_msg *m)
98{
99 return (msg_user(m) <= DATA_CRITICAL);
100}
101
102static inline void msg_set_user(struct tipc_msg *m, u32 n)
103{
104 msg_set_bits(m, 0, 25, 0xf, n);
105}
106
107static inline void msg_set_importance(struct tipc_msg *m, u32 i)
108{
109 msg_set_user(m, i);
110}
111
112static inline void msg_set_hdr_sz(struct tipc_msg *m,u32 n)
113{
114 msg_set_bits(m, 0, 21, 0xf, n>>2);
115}
116
117static inline int msg_non_seq(struct tipc_msg *m)
118{
119 return msg_bits(m, 0, 20, 1);
120}
121
122static inline void msg_set_non_seq(struct tipc_msg *m)
123{
124 msg_set_bits(m, 0, 20, 1, 1);
125}
126
127static inline int msg_dest_droppable(struct tipc_msg *m)
128{
129 return msg_bits(m, 0, 19, 1);
130}
131
132static inline void msg_set_dest_droppable(struct tipc_msg *m, u32 d)
133{
134 msg_set_bits(m, 0, 19, 1, d);
135}
136
137static inline int msg_src_droppable(struct tipc_msg *m)
138{
139 return msg_bits(m, 0, 18, 1);
140}
141
142static inline void msg_set_src_droppable(struct tipc_msg *m, u32 d)
143{
144 msg_set_bits(m, 0, 18, 1, d);
145}
146
147static inline void msg_set_size(struct tipc_msg *m, u32 sz)
148{
149 m->hdr[0] = htonl((msg_word(m, 0) & ~0x1ffff) | sz);
150}
151
152
153/*
154 * Word 1
155 */
156
157static inline void msg_set_type(struct tipc_msg *m, u32 n)
158{
159 msg_set_bits(m, 1, 29, 0x7, n);
160}
161
162static inline void msg_set_errcode(struct tipc_msg *m, u32 err)
163{
164 msg_set_bits(m, 1, 25, 0xf, err);
165}
166
167static inline u32 msg_reroute_cnt(struct tipc_msg *m)
168{
169 return msg_bits(m, 1, 21, 0xf);
170}
171
172static inline void msg_incr_reroute_cnt(struct tipc_msg *m)
173{
174 msg_set_bits(m, 1, 21, 0xf, msg_reroute_cnt(m) + 1);
175}
176
177static inline void msg_reset_reroute_cnt(struct tipc_msg *m)
178{
179 msg_set_bits(m, 1, 21, 0xf, 0);
180}
181
182static inline u32 msg_lookup_scope(struct tipc_msg *m)
183{
184 return msg_bits(m, 1, 19, 0x3);
185}
186
187static inline void msg_set_lookup_scope(struct tipc_msg *m, u32 n)
188{
189 msg_set_bits(m, 1, 19, 0x3, n);
190}
191
192static inline void msg_set_options(struct tipc_msg *m, const char *opt, u32 sz)
193{
194 u32 hsz = msg_hdr_sz(m);
195 char *to = (char *)&m->hdr[hsz/4];
196
197 if ((hsz < DIR_MSG_H_SIZE) || ((hsz + sz) > MAX_H_SIZE))
198 return;
199 msg_set_bits(m, 1, 16, 0x7, (hsz - 28)/4);
200 msg_set_hdr_sz(m, hsz + sz);
201 memcpy(to, opt, sz);
202}
203
204static inline u32 msg_bcast_ack(struct tipc_msg *m)
205{
206 return msg_bits(m, 1, 0, 0xffff);
207}
208
209static inline void msg_set_bcast_ack(struct tipc_msg *m, u32 n)
210{
211 msg_set_bits(m, 1, 0, 0xffff, n);
212}
213
214
215/*
216 * Word 2
217 */
218
219static inline u32 msg_ack(struct tipc_msg *m)
220{
221 return msg_bits(m, 2, 16, 0xffff);
222}
223
224static inline void msg_set_ack(struct tipc_msg *m, u32 n)
225{
226 msg_set_bits(m, 2, 16, 0xffff, n);
227}
228
229static inline u32 msg_seqno(struct tipc_msg *m)
230{
231 return msg_bits(m, 2, 0, 0xffff);
232}
233
234static inline void msg_set_seqno(struct tipc_msg *m, u32 n)
235{
236 msg_set_bits(m, 2, 0, 0xffff, n);
237}
238
239
240/*
241 * Words 3-10
242 */
243
244
245static inline void msg_set_prevnode(struct tipc_msg *m, u32 a)
246{
247 msg_set_word(m, 3, a);
248}
249
250static inline void msg_set_origport(struct tipc_msg *m, u32 p)
251{
252 msg_set_word(m, 4, p);
253}
254
255static inline void msg_set_destport(struct tipc_msg *m, u32 p)
256{
257 msg_set_word(m, 5, p);
258}
259
260static inline void msg_set_mc_netid(struct tipc_msg *m, u32 p)
261{
262 msg_set_word(m, 5, p);
263}
264
265static inline void msg_set_orignode(struct tipc_msg *m, u32 a)
266{
267 msg_set_word(m, 6, a);
268}
269
270static inline void msg_set_destnode(struct tipc_msg *m, u32 a)
271{
272 msg_set_word(m, 7, a);
273}
274
275static inline int msg_is_dest(struct tipc_msg *m, u32 d)
276{
277 return(msg_short(m) || (msg_destnode(m) == d));
278}
279
280static inline u32 msg_routed(struct tipc_msg *m)
281{
282 if (likely(msg_short(m)))
283 return 0;
284 return(msg_destnode(m) ^ msg_orignode(m)) >> 11;
285}
286
287static inline void msg_set_nametype(struct tipc_msg *m, u32 n)
288{
289 msg_set_word(m, 8, n);
290}
291
292static inline u32 msg_transp_seqno(struct tipc_msg *m)
293{
294 return msg_word(m, 8);
295}
296
297static inline void msg_set_timestamp(struct tipc_msg *m, u32 n)
298{
299 msg_set_word(m, 8, n);
300}
301
302static inline u32 msg_timestamp(struct tipc_msg *m)
303{
304 return msg_word(m, 8);
305}
306
307static inline void msg_set_transp_seqno(struct tipc_msg *m, u32 n)
308{
309 msg_set_word(m, 8, n);
310}
311
312static inline void msg_set_namelower(struct tipc_msg *m, u32 n)
313{
314 msg_set_word(m, 9, n);
315}
316
317static inline void msg_set_nameinst(struct tipc_msg *m, u32 n)
318{
319 msg_set_namelower(m, n);
320}
321
322static inline void msg_set_nameupper(struct tipc_msg *m, u32 n)
323{
324 msg_set_word(m, 10, n);
325}
326
327static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
328{
329 return (struct tipc_msg *)msg_data(m);
330}
331
332static inline void msg_expand(struct tipc_msg *m, u32 destnode)
333{
334 if (!msg_short(m))
335 return;
336 msg_set_hdr_sz(m, LONG_H_SIZE);
337 msg_set_orignode(m, msg_prevnode(m));
338 msg_set_destnode(m, destnode);
339 memset(&m->hdr[8], 0, 12);
340}
341
342
343
344/*
345 TIPC internal message header format, version 2
346
347 1 0 9 8 7 6 5 4|3 2 1 0 9 8 7 6|5 4 3 2 1 0 9 8|7 6 5 4 3 2 1 0
348 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
349 w0:|vers |msg usr|hdr sz |n|resrv| packet size |
350 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
351 w1:|m typ|rsv=0| sequence gap | broadcast ack no |
352 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
353 w2:| link level ack no/bc_gap_from | seq no / bcast_gap_to |
354 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
355 w3:| previous node |
356 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
357 w4:| next sent broadcast/fragm no | next sent pkt/ fragm msg no |
358 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
359 w5:| session no |rsv=0|r|berid|link prio|netpl|p|
360 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
361 w6:| originating node |
362 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
363 w7:| destination node |
364 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
365 w8:| transport sequence number |
366 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
367 w9:| msg count / bcast tag | link tolerance |
368 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
369 \ \
370 / User Specific Data /
371 \ \
372 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
373
374 NB: CONN_MANAGER use data message format. LINK_CONFIG has own format.
375*/
376
377/*
378 * Internal users
379 */
380
381#define BCAST_PROTOCOL 5
382#define MSG_BUNDLER 6
383#define LINK_PROTOCOL 7
384#define CONN_MANAGER 8
385#define ROUTE_DISTRIBUTOR 9
386#define CHANGEOVER_PROTOCOL 10
387#define NAME_DISTRIBUTOR 11
388#define MSG_FRAGMENTER 12
389#define LINK_CONFIG 13
390#define INT_H_SIZE 40
391#define DSC_H_SIZE 40
392
393/*
394 * Connection management protocol messages
395 */
396
397#define CONN_PROBE 0
398#define CONN_PROBE_REPLY 1
399#define CONN_ACK 2
400
401/*
402 * Name distributor messages
403 */
404
405#define PUBLICATION 0
406#define WITHDRAWAL 1
407
408
409/*
410 * Word 1
411 */
412
413static inline u32 msg_seq_gap(struct tipc_msg *m)
414{
415 return msg_bits(m, 1, 16, 0xff);
416}
417
418static inline void msg_set_seq_gap(struct tipc_msg *m, u32 n)
419{
420 msg_set_bits(m, 1, 16, 0xff, n);
421}
422
423static inline u32 msg_req_links(struct tipc_msg *m)
424{
425 return msg_bits(m, 1, 16, 0xfff);
426}
427
428static inline void msg_set_req_links(struct tipc_msg *m, u32 n)
429{
430 msg_set_bits(m, 1, 16, 0xfff, n);
431}
432
433
434/*
435 * Word 2
436 */
437
438static inline u32 msg_dest_domain(struct tipc_msg *m)
439{
440 return msg_word(m, 2);
441}
442
443static inline void msg_set_dest_domain(struct tipc_msg *m, u32 n)
444{
445 msg_set_word(m, 2, n);
446}
447
448static inline u32 msg_bcgap_after(struct tipc_msg *m)
449{
450 return msg_bits(m, 2, 16, 0xffff);
451}
452
453static inline void msg_set_bcgap_after(struct tipc_msg *m, u32 n)
454{
455 msg_set_bits(m, 2, 16, 0xffff, n);
456}
457
458static inline u32 msg_bcgap_to(struct tipc_msg *m)
459{
460 return msg_bits(m, 2, 0, 0xffff);
461}
462
463static inline void msg_set_bcgap_to(struct tipc_msg *m, u32 n)
464{
465 msg_set_bits(m, 2, 0, 0xffff, n);
466}
467
468
469/*
470 * Word 4
471 */
472
473static inline u32 msg_last_bcast(struct tipc_msg *m)
474{
475 return msg_bits(m, 4, 16, 0xffff);
476}
477
478static inline void msg_set_last_bcast(struct tipc_msg *m, u32 n)
479{
480 msg_set_bits(m, 4, 16, 0xffff, n);
481}
482
483
484static inline u32 msg_fragm_no(struct tipc_msg *m)
485{
486 return msg_bits(m, 4, 16, 0xffff);
487}
488
489static inline void msg_set_fragm_no(struct tipc_msg *m, u32 n)
490{
491 msg_set_bits(m, 4, 16, 0xffff, n);
492}
493
494
495static inline u32 msg_next_sent(struct tipc_msg *m)
496{
497 return msg_bits(m, 4, 0, 0xffff);
498}
499
500static inline void msg_set_next_sent(struct tipc_msg *m, u32 n)
501{
502 msg_set_bits(m, 4, 0, 0xffff, n);
503}
504
505
506static inline u32 msg_long_msgno(struct tipc_msg *m)
507{
508 return msg_bits(m, 4, 0, 0xffff);
509}
510
511static inline void msg_set_long_msgno(struct tipc_msg *m, u32 n)
512{
513 msg_set_bits(m, 4, 0, 0xffff, n);
514}
515
516static inline u32 msg_bc_netid(struct tipc_msg *m)
517{
518 return msg_word(m, 4);
519}
520
521static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
522{
523 msg_set_word(m, 4, id);
524}
525
526static inline u32 msg_link_selector(struct tipc_msg *m)
527{
528 return msg_bits(m, 4, 0, 1);
529}
530
531static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
532{
533 msg_set_bits(m, 4, 0, 1, (n & 1));
534}
535
536/*
537 * Word 5
538 */
539
540static inline u32 msg_session(struct tipc_msg *m)
541{
542 return msg_bits(m, 5, 16, 0xffff);
543}
544
545static inline void msg_set_session(struct tipc_msg *m, u32 n)
546{
547 msg_set_bits(m, 5, 16, 0xffff, n);
548}
549
550static inline u32 msg_probe(struct tipc_msg *m)
551{
552 return msg_bits(m, 5, 0, 1);
553}
554
555static inline void msg_set_probe(struct tipc_msg *m, u32 val)
556{
557 msg_set_bits(m, 5, 0, 1, (val & 1));
558}
559
560static inline char msg_net_plane(struct tipc_msg *m)
561{
562 return msg_bits(m, 5, 1, 7) + 'A';
563}
564
565static inline void msg_set_net_plane(struct tipc_msg *m, char n)
566{
567 msg_set_bits(m, 5, 1, 7, (n - 'A'));
568}
569
570static inline u32 msg_linkprio(struct tipc_msg *m)
571{
572 return msg_bits(m, 5, 4, 0x1f);
573}
574
575static inline void msg_set_linkprio(struct tipc_msg *m, u32 n)
576{
577 msg_set_bits(m, 5, 4, 0x1f, n);
578}
579
580static inline u32 msg_bearer_id(struct tipc_msg *m)
581{
582 return msg_bits(m, 5, 9, 0x7);
583}
584
585static inline void msg_set_bearer_id(struct tipc_msg *m, u32 n)
586{
587 msg_set_bits(m, 5, 9, 0x7, n);
588}
589
590static inline u32 msg_redundant_link(struct tipc_msg *m)
591{
592 return msg_bits(m, 5, 12, 0x1);
593}
594
595static inline void msg_set_redundant_link(struct tipc_msg *m)
596{
597 msg_set_bits(m, 5, 12, 0x1, 1);
598}
599
600static inline void msg_clear_redundant_link(struct tipc_msg *m)
601{
602 msg_set_bits(m, 5, 12, 0x1, 0);
603}
604
605
606/*
607 * Word 9
608 */
609
610static inline u32 msg_msgcnt(struct tipc_msg *m)
611{
612 return msg_bits(m, 9, 16, 0xffff);
613}
614
615static inline void msg_set_msgcnt(struct tipc_msg *m, u32 n)
616{
617 msg_set_bits(m, 9, 16, 0xffff, n);
618}
619
620static inline u32 msg_bcast_tag(struct tipc_msg *m)
621{
622 return msg_bits(m, 9, 16, 0xffff);
623}
624
625static inline void msg_set_bcast_tag(struct tipc_msg *m, u32 n)
626{
627 msg_set_bits(m, 9, 16, 0xffff, n);
628}
629
630static inline u32 msg_max_pkt(struct tipc_msg *m)
631{
632 return (msg_bits(m, 9, 16, 0xffff) * 4);
633}
634
635static inline void msg_set_max_pkt(struct tipc_msg *m, u32 n)
636{
637 msg_set_bits(m, 9, 16, 0xffff, (n / 4));
638}
639
640static inline u32 msg_link_tolerance(struct tipc_msg *m)
641{
642 return msg_bits(m, 9, 0, 0xffff);
643}
644
645static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
646{
647 msg_set_bits(m, 9, 0, 0xffff, n);
648}
649
650/*
651 * Routing table message data
652 */
653
654
655static inline u32 msg_remote_node(struct tipc_msg *m)
656{
657 return msg_word(m, msg_hdr_sz(m)/4);
658}
659
660static inline void msg_set_remote_node(struct tipc_msg *m, u32 a)
661{
662 msg_set_word(m, msg_hdr_sz(m)/4, a);
663}
664
665static inline int msg_dataoctet(struct tipc_msg *m, u32 pos)
666{
667 return(msg_data(m)[pos + 4] != 0);
668}
669
670static inline void msg_set_dataoctet(struct tipc_msg *m, u32 pos)
671{
672 msg_data(m)[pos + 4] = 1;
673}
674
675/*
676 * Segmentation message types
677 */
678
679#define FIRST_FRAGMENT 0
680#define FRAGMENT 1
681#define LAST_FRAGMENT 2
682
683/*
684 * Link management protocol message types
685 */
686
687#define STATE_MSG 0
688#define RESET_MSG 1
689#define ACTIVATE_MSG 2
690
691/*
692 * Changeover tunnel message types
693 */
694#define DUPLICATE_MSG 0
695#define ORIGINAL_MSG 1
696
697/*
698 * Routing table message types
699 */
700#define EXT_ROUTING_TABLE 0
701#define LOCAL_ROUTING_TABLE 1
702#define SLAVE_ROUTING_TABLE 2
703#define ROUTE_ADDITION 3
704#define ROUTE_REMOVAL 4
705
706/*
707 * Config protocol message types
708 */
709
710#define DSC_REQ_MSG 0
711#define DSC_RESP_MSG 1
712
713static inline u32 msg_tot_importance(struct tipc_msg *m)
714{
715 if (likely(msg_isdata(m))) {
716 if (likely(msg_orignode(m) == tipc_own_addr))
717 return msg_importance(m);
718 return msg_importance(m) + 4;
719 }
720 if ((msg_user(m) == MSG_FRAGMENTER) &&
721 (msg_type(m) == FIRST_FRAGMENT))
722 return msg_importance(msg_get_wrapped(m));
723 return msg_importance(m);
724}
725
726
727static inline void msg_init(struct tipc_msg *m, u32 user, u32 type,
728 u32 err, u32 hsize, u32 destnode)
729{
730 memset(m, 0, hsize);
731 msg_set_version(m);
732 msg_set_user(m, user);
733 msg_set_hdr_sz(m, hsize);
734 msg_set_size(m, hsize);
735 msg_set_prevnode(m, tipc_own_addr);
736 msg_set_type(m, type);
737 msg_set_errcode(m, err);
738 if (!msg_short(m)) {
739 msg_set_orignode(m, tipc_own_addr);
740 msg_set_destnode(m, destnode);
741 }
742}
743
744/**
745 * msg_calc_data_size - determine total data size for message
746 */
747
748static inline int msg_calc_data_size(struct iovec const *msg_sect, u32 num_sect)
749{
750 int dsz = 0;
751 int i;
752
753 for (i = 0; i < num_sect; i++)
754 dsz += msg_sect[i].iov_len;
755 return dsz;
756}
757
758/**
759 * msg_build - create message using specified header and data
760 *
761 * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
762 *
763 * Returns message data size or errno
764 */
765
766static inline int msg_build(struct tipc_msg *hdr,
767 struct iovec const *msg_sect, u32 num_sect,
768 int max_size, int usrmem, struct sk_buff** buf)
769{
770 int dsz, sz, hsz, pos, res, cnt;
771
772 dsz = msg_calc_data_size(msg_sect, num_sect);
773 if (unlikely(dsz > TIPC_MAX_USER_MSG_SIZE)) {
774 *buf = NULL;
775 return -EINVAL;
776 }
777
778 pos = hsz = msg_hdr_sz(hdr);
779 sz = hsz + dsz;
780 msg_set_size(hdr, sz);
781 if (unlikely(sz > max_size)) {
782 *buf = NULL;
783 return dsz;
784 }
785
786 *buf = buf_acquire(sz);
787 if (!(*buf))
788 return -ENOMEM;
789 memcpy((*buf)->data, (unchar *)hdr, hsz);
790 for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) {
791 if (likely(usrmem))
792 res = !copy_from_user((*buf)->data + pos,
793 msg_sect[cnt].iov_base,
794 msg_sect[cnt].iov_len);
795 else
796 memcpy((*buf)->data + pos, msg_sect[cnt].iov_base,
797 msg_sect[cnt].iov_len);
798 pos += msg_sect[cnt].iov_len;
799 }
800 if (likely(res))
801 return dsz;
802
803 buf_discard(*buf);
804 *buf = NULL;
805 return -EFAULT;
806}
807
808static inline void msg_set_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
809{
810 memcpy(&((int *)m)[5], a, sizeof(*a));
811}
812
813static inline void msg_get_media_addr(struct tipc_msg *m, struct tipc_media_addr *a)
814{
815 memcpy(a, &((int*)m)[5], sizeof(*a));
816}
817
818#endif
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
new file mode 100644
index 000000000000..830f90999041
--- /dev/null
+++ b/net/tipc/name_distr.c
@@ -0,0 +1,309 @@
1/*
2 * net/tipc/name_distr.c: TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "cluster.h"
39#include "dbg.h"
40#include "link.h"
41#include "msg.h"
42#include "name_distr.h"
43
44#undef DBG_OUTPUT
45#define DBG_OUTPUT NULL
46
47#define ITEM_SIZE sizeof(struct distr_item)
48
49/**
50 * struct distr_item - publication info distributed to other nodes
51 * @type: name sequence type
52 * @lower: name sequence lower bound
53 * @upper: name sequence upper bound
54 * @ref: publishing port reference
55 * @key: publication key
56 *
57 * ===> All fields are stored in network byte order. <===
58 *
59 * First 3 fields identify (name or) name sequence being published.
60 * Reference field uniquely identifies port that published name sequence.
61 * Key field uniquely identifies publication, in the event a port has
62 * multiple publications of the same name sequence.
63 *
64 * Note: There is no field that identifies the publishing node because it is
65 * the same for all items contained within a publication message.
66 */
67
68struct distr_item {
69 u32 type;
70 u32 lower;
71 u32 upper;
72 u32 ref;
73 u32 key;
74};
75
76/**
77 * List of externally visible publications by this node --
78 * that is, all publications having scope > TIPC_NODE_SCOPE.
79 */
80
81static LIST_HEAD(publ_root);
82static u32 publ_cnt = 0;
83
84/**
85 * publ_to_item - add publication info to a publication message
86 */
87
88static void publ_to_item(struct distr_item *i, struct publication *p)
89{
90 i->type = htonl(p->type);
91 i->lower = htonl(p->lower);
92 i->upper = htonl(p->upper);
93 i->ref = htonl(p->ref);
94 i->key = htonl(p->key);
95 dbg("publ_to_item: %u, %u, %u\n", p->type, p->lower, p->upper);
96}
97
98/**
99 * named_prepare_buf - allocate & initialize a publication message
100 */
101
102static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
103{
104 struct sk_buff *buf = buf_acquire(LONG_H_SIZE + size);
105 struct tipc_msg *msg;
106
107 if (buf != NULL) {
108 msg = buf_msg(buf);
109 msg_init(msg, NAME_DISTRIBUTOR, type, TIPC_OK,
110 LONG_H_SIZE, dest);
111 msg_set_size(msg, LONG_H_SIZE + size);
112 }
113 return buf;
114}
115
116/**
117 * tipc_named_publish - tell other nodes about a new publication by this node
118 */
119
120void tipc_named_publish(struct publication *publ)
121{
122 struct sk_buff *buf;
123 struct distr_item *item;
124
125 list_add(&publ->local_list, &publ_root);
126 publ_cnt++;
127
128 buf = named_prepare_buf(PUBLICATION, ITEM_SIZE, 0);
129 if (!buf) {
130 warn("Memory squeeze; failed to distribute publication\n");
131 return;
132 }
133
134 item = (struct distr_item *)msg_data(buf_msg(buf));
135 publ_to_item(item, publ);
136 dbg("tipc_named_withdraw: broadcasting publish msg\n");
137 tipc_cltr_broadcast(buf);
138}
139
140/**
141 * tipc_named_withdraw - tell other nodes about a withdrawn publication by this node
142 */
143
144void tipc_named_withdraw(struct publication *publ)
145{
146 struct sk_buff *buf;
147 struct distr_item *item;
148
149 list_del(&publ->local_list);
150 publ_cnt--;
151
152 buf = named_prepare_buf(WITHDRAWAL, ITEM_SIZE, 0);
153 if (!buf) {
154 warn("Memory squeeze; failed to distribute withdrawal\n");
155 return;
156 }
157
158 item = (struct distr_item *)msg_data(buf_msg(buf));
159 publ_to_item(item, publ);
160 dbg("tipc_named_withdraw: broadcasting withdraw msg\n");
161 tipc_cltr_broadcast(buf);
162}
163
164/**
165 * tipc_named_node_up - tell specified node about all publications by this node
166 */
167
168void tipc_named_node_up(unsigned long node)
169{
170 struct publication *publ;
171 struct distr_item *item = 0;
172 struct sk_buff *buf = 0;
173 u32 left = 0;
174 u32 rest;
175 u32 max_item_buf;
176
177 assert(in_own_cluster(node));
178 read_lock_bh(&tipc_nametbl_lock);
179 max_item_buf = TIPC_MAX_USER_MSG_SIZE / ITEM_SIZE;
180 max_item_buf *= ITEM_SIZE;
181 rest = publ_cnt * ITEM_SIZE;
182
183 list_for_each_entry(publ, &publ_root, local_list) {
184 if (!buf) {
185 left = (rest <= max_item_buf) ? rest : max_item_buf;
186 rest -= left;
187 buf = named_prepare_buf(PUBLICATION, left, node);
188 if (buf == NULL) {
189 warn("Memory Squeeze; could not send publication\n");
190 goto exit;
191 }
192 item = (struct distr_item *)msg_data(buf_msg(buf));
193 }
194 publ_to_item(item, publ);
195 item++;
196 left -= ITEM_SIZE;
197 if (!left) {
198 msg_set_link_selector(buf_msg(buf), node);
199 dbg("tipc_named_node_up: sending publish msg to "
200 "<%u.%u.%u>\n", tipc_zone(node),
201 tipc_cluster(node), tipc_node(node));
202 tipc_link_send(buf, node, node);
203 buf = 0;
204 }
205 }
206exit:
207 read_unlock_bh(&tipc_nametbl_lock);
208}
209
210/**
211 * node_is_down - remove publication associated with a failed node
212 *
213 * Invoked for each publication issued by a newly failed node.
214 * Removes publication structure from name table & deletes it.
215 * In rare cases the link may have come back up again when this
216 * function is called, and we have two items representing the same
217 * publication. Nudge this item's key to distinguish it from the other.
218 * (Note: Publication's node subscription is already unsubscribed.)
219 */
220
221static void node_is_down(struct publication *publ)
222{
223 struct publication *p;
224 write_lock_bh(&tipc_nametbl_lock);
225 dbg("node_is_down: withdrawing %u, %u, %u\n",
226 publ->type, publ->lower, publ->upper);
227 publ->key += 1222345;
228 p = tipc_nametbl_remove_publ(publ->type, publ->lower,
229 publ->node, publ->ref, publ->key);
230 assert(p == publ);
231 write_unlock_bh(&tipc_nametbl_lock);
232 if (publ)
233 kfree(publ);
234}
235
236/**
237 * tipc_named_recv - process name table update message sent by another node
238 */
239
240void tipc_named_recv(struct sk_buff *buf)
241{
242 struct publication *publ;
243 struct tipc_msg *msg = buf_msg(buf);
244 struct distr_item *item = (struct distr_item *)msg_data(msg);
245 u32 count = msg_data_sz(msg) / ITEM_SIZE;
246
247 write_lock_bh(&tipc_nametbl_lock);
248 while (count--) {
249 if (msg_type(msg) == PUBLICATION) {
250 dbg("tipc_named_recv: got publication for %u, %u, %u\n",
251 ntohl(item->type), ntohl(item->lower),
252 ntohl(item->upper));
253 publ = tipc_nametbl_insert_publ(ntohl(item->type),
254 ntohl(item->lower),
255 ntohl(item->upper),
256 TIPC_CLUSTER_SCOPE,
257 msg_orignode(msg),
258 ntohl(item->ref),
259 ntohl(item->key));
260 if (publ) {
261 tipc_nodesub_subscribe(&publ->subscr,
262 msg_orignode(msg),
263 publ,
264 (net_ev_handler)node_is_down);
265 }
266 } else if (msg_type(msg) == WITHDRAWAL) {
267 dbg("tipc_named_recv: got withdrawl for %u, %u, %u\n",
268 ntohl(item->type), ntohl(item->lower),
269 ntohl(item->upper));
270 publ = tipc_nametbl_remove_publ(ntohl(item->type),
271 ntohl(item->lower),
272 msg_orignode(msg),
273 ntohl(item->ref),
274 ntohl(item->key));
275
276 if (publ) {
277 tipc_nodesub_unsubscribe(&publ->subscr);
278 kfree(publ);
279 }
280 } else {
281 warn("tipc_named_recv: unknown msg\n");
282 }
283 item++;
284 }
285 write_unlock_bh(&tipc_nametbl_lock);
286 buf_discard(buf);
287}
288
289/**
290 * tipc_named_reinit - re-initialize local publication list
291 *
292 * This routine is called whenever TIPC networking is (re)enabled.
293 * All existing publications by this node that have "cluster" or "zone" scope
294 * are updated to reflect the node's current network address.
295 * (If the node's address is unchanged, the update loop terminates immediately.)
296 */
297
298void tipc_named_reinit(void)
299{
300 struct publication *publ;
301
302 write_lock_bh(&tipc_nametbl_lock);
303 list_for_each_entry(publ, &publ_root, local_list) {
304 if (publ->node == tipc_own_addr)
305 break;
306 publ->node = tipc_own_addr;
307 }
308 write_unlock_bh(&tipc_nametbl_lock);
309}
diff --git a/net/tipc/name_distr.h b/net/tipc/name_distr.h
new file mode 100644
index 000000000000..843da0172f4e
--- /dev/null
+++ b/net/tipc/name_distr.h
@@ -0,0 +1,48 @@
1/*
2 * net/tipc/name_distr.h: Include file for TIPC name distribution code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NAME_DISTR_H
38#define _TIPC_NAME_DISTR_H
39
40#include "name_table.h"
41
42void tipc_named_publish(struct publication *publ);
43void tipc_named_withdraw(struct publication *publ);
44void tipc_named_node_up(unsigned long node);
45void tipc_named_recv(struct sk_buff *buf);
46void tipc_named_reinit(void);
47
48#endif
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
new file mode 100644
index 000000000000..3f4b23bd08f7
--- /dev/null
+++ b/net/tipc/name_table.c
@@ -0,0 +1,1079 @@
1/*
2 * net/tipc/name_table.c: TIPC name table code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "name_table.h"
41#include "name_distr.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "subscr.h"
45#include "port.h"
46#include "cluster.h"
47#include "bcast.h"
48
49int tipc_nametbl_size = 1024; /* must be a power of 2 */
50
51/**
52 * struct sub_seq - container for all published instances of a name sequence
53 * @lower: name sequence lower bound
54 * @upper: name sequence upper bound
55 * @node_list: circular list of matching publications with >= node scope
56 * @cluster_list: circular list of matching publications with >= cluster scope
57 * @zone_list: circular list of matching publications with >= zone scope
58 */
59
60struct sub_seq {
61 u32 lower;
62 u32 upper;
63 struct publication *node_list;
64 struct publication *cluster_list;
65 struct publication *zone_list;
66};
67
68/**
69 * struct name_seq - container for all published instances of a name type
70 * @type: 32 bit 'type' value for name sequence
71 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
72 * sub-sequences are sorted in ascending order
73 * @alloc: number of sub-sequences currently in array
74 * @first_free: upper bound of highest sub-sequence + 1
75 * @ns_list: links to adjacent name sequences in hash chain
76 * @subscriptions: list of subscriptions for this 'type'
77 * @lock: spinlock controlling access to name sequence structure
78 */
79
80struct name_seq {
81 u32 type;
82 struct sub_seq *sseqs;
83 u32 alloc;
84 u32 first_free;
85 struct hlist_node ns_list;
86 struct list_head subscriptions;
87 spinlock_t lock;
88};
89
90/**
91 * struct name_table - table containing all existing port name publications
92 * @types: pointer to fixed-sized array of name sequence lists,
93 * accessed via hashing on 'type'; name sequence lists are *not* sorted
94 * @local_publ_count: number of publications issued by this node
95 */
96
97struct name_table {
98 struct hlist_head *types;
99 u32 local_publ_count;
100};
101
102static struct name_table table = { NULL } ;
103static atomic_t rsv_publ_ok = ATOMIC_INIT(0);
104rwlock_t tipc_nametbl_lock = RW_LOCK_UNLOCKED;
105
106
107static inline int hash(int x)
108{
109 return(x & (tipc_nametbl_size - 1));
110}
111
112/**
113 * publ_create - create a publication structure
114 */
115
116static struct publication *publ_create(u32 type, u32 lower, u32 upper,
117 u32 scope, u32 node, u32 port_ref,
118 u32 key)
119{
120 struct publication *publ =
121 (struct publication *)kmalloc(sizeof(*publ), GFP_ATOMIC);
122 if (publ == NULL) {
123 warn("Memory squeeze; failed to create publication\n");
124 return 0;
125 }
126
127 memset(publ, 0, sizeof(*publ));
128 publ->type = type;
129 publ->lower = lower;
130 publ->upper = upper;
131 publ->scope = scope;
132 publ->node = node;
133 publ->ref = port_ref;
134 publ->key = key;
135 INIT_LIST_HEAD(&publ->local_list);
136 INIT_LIST_HEAD(&publ->pport_list);
137 INIT_LIST_HEAD(&publ->subscr.nodesub_list);
138 return publ;
139}
140
141/**
142 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
143 */
144
145struct sub_seq *tipc_subseq_alloc(u32 cnt)
146{
147 u32 sz = cnt * sizeof(struct sub_seq);
148 struct sub_seq *sseq = (struct sub_seq *)kmalloc(sz, GFP_ATOMIC);
149
150 if (sseq)
151 memset(sseq, 0, sz);
152 return sseq;
153}
154
155/**
156 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
157 *
158 * Allocates a single sub-sequence structure and sets it to all 0's.
159 */
160
161struct name_seq *tipc_nameseq_create(u32 type, struct hlist_head *seq_head)
162{
163 struct name_seq *nseq =
164 (struct name_seq *)kmalloc(sizeof(*nseq), GFP_ATOMIC);
165 struct sub_seq *sseq = tipc_subseq_alloc(1);
166
167 if (!nseq || !sseq) {
168 warn("Memory squeeze; failed to create name sequence\n");
169 kfree(nseq);
170 kfree(sseq);
171 return 0;
172 }
173
174 memset(nseq, 0, sizeof(*nseq));
175 nseq->lock = SPIN_LOCK_UNLOCKED;
176 nseq->type = type;
177 nseq->sseqs = sseq;
178 dbg("tipc_nameseq_create() nseq = %x type %u, ssseqs %x, ff: %u\n",
179 nseq, type, nseq->sseqs, nseq->first_free);
180 nseq->alloc = 1;
181 INIT_HLIST_NODE(&nseq->ns_list);
182 INIT_LIST_HEAD(&nseq->subscriptions);
183 hlist_add_head(&nseq->ns_list, seq_head);
184 return nseq;
185}
186
187/**
188 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
189 *
190 * Very time-critical, so binary searches through sub-sequence array.
191 */
192
193static inline struct sub_seq *nameseq_find_subseq(struct name_seq *nseq,
194 u32 instance)
195{
196 struct sub_seq *sseqs = nseq->sseqs;
197 int low = 0;
198 int high = nseq->first_free - 1;
199 int mid;
200
201 while (low <= high) {
202 mid = (low + high) / 2;
203 if (instance < sseqs[mid].lower)
204 high = mid - 1;
205 else if (instance > sseqs[mid].upper)
206 low = mid + 1;
207 else
208 return &sseqs[mid];
209 }
210 return 0;
211}
212
213/**
214 * nameseq_locate_subseq - determine position of name instance in sub-sequence
215 *
216 * Returns index in sub-sequence array of the entry that contains the specified
217 * instance value; if no entry contains that value, returns the position
218 * where a new entry for it would be inserted in the array.
219 *
220 * Note: Similar to binary search code for locating a sub-sequence.
221 */
222
223static u32 nameseq_locate_subseq(struct name_seq *nseq, u32 instance)
224{
225 struct sub_seq *sseqs = nseq->sseqs;
226 int low = 0;
227 int high = nseq->first_free - 1;
228 int mid;
229
230 while (low <= high) {
231 mid = (low + high) / 2;
232 if (instance < sseqs[mid].lower)
233 high = mid - 1;
234 else if (instance > sseqs[mid].upper)
235 low = mid + 1;
236 else
237 return mid;
238 }
239 return low;
240}
241
242/**
243 * tipc_nameseq_insert_publ -
244 */
245
246struct publication *tipc_nameseq_insert_publ(struct name_seq *nseq,
247 u32 type, u32 lower, u32 upper,
248 u32 scope, u32 node, u32 port, u32 key)
249{
250 struct subscription *s;
251 struct subscription *st;
252 struct publication *publ;
253 struct sub_seq *sseq;
254 int created_subseq = 0;
255
256 assert(nseq->first_free <= nseq->alloc);
257 sseq = nameseq_find_subseq(nseq, lower);
258 dbg("nameseq_ins: for seq %x,<%u,%u>, found sseq %x\n",
259 nseq, type, lower, sseq);
260 if (sseq) {
261
262 /* Lower end overlaps existing entry => need an exact match */
263
264 if ((sseq->lower != lower) || (sseq->upper != upper)) {
265 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
266 return 0;
267 }
268 } else {
269 u32 inspos;
270 struct sub_seq *freesseq;
271
272 /* Find where lower end should be inserted */
273
274 inspos = nameseq_locate_subseq(nseq, lower);
275
276 /* Fail if upper end overlaps into an existing entry */
277
278 if ((inspos < nseq->first_free) &&
279 (upper >= nseq->sseqs[inspos].lower)) {
280 warn("Overlapping publ <%u,%u,%u>\n", type, lower, upper);
281 return 0;
282 }
283
284 /* Ensure there is space for new sub-sequence */
285
286 if (nseq->first_free == nseq->alloc) {
287 struct sub_seq *sseqs = nseq->sseqs;
288 nseq->sseqs = tipc_subseq_alloc(nseq->alloc * 2);
289 if (nseq->sseqs != NULL) {
290 memcpy(nseq->sseqs, sseqs,
291 nseq->alloc * sizeof (struct sub_seq));
292 kfree(sseqs);
293 dbg("Allocated %u sseqs\n", nseq->alloc);
294 nseq->alloc *= 2;
295 } else {
296 warn("Memory squeeze; failed to create sub-sequence\n");
297 return 0;
298 }
299 }
300 dbg("Have %u sseqs for type %u\n", nseq->alloc, type);
301
302 /* Insert new sub-sequence */
303
304 dbg("ins in pos %u, ff = %u\n", inspos, nseq->first_free);
305 sseq = &nseq->sseqs[inspos];
306 freesseq = &nseq->sseqs[nseq->first_free];
307 memmove(sseq + 1, sseq, (freesseq - sseq) * sizeof (*sseq));
308 memset(sseq, 0, sizeof (*sseq));
309 nseq->first_free++;
310 sseq->lower = lower;
311 sseq->upper = upper;
312 created_subseq = 1;
313 }
314 dbg("inserting (%u %u %u) from %x:%u into sseq %x(%u,%u) of seq %x\n",
315 type, lower, upper, node, port, sseq,
316 sseq->lower, sseq->upper, nseq);
317
318 /* Insert a publication: */
319
320 publ = publ_create(type, lower, upper, scope, node, port, key);
321 if (!publ)
322 return 0;
323 dbg("inserting publ %x, node=%x publ->node=%x, subscr->node=%x\n",
324 publ, node, publ->node, publ->subscr.node);
325
326 if (!sseq->zone_list)
327 sseq->zone_list = publ->zone_list_next = publ;
328 else {
329 publ->zone_list_next = sseq->zone_list->zone_list_next;
330 sseq->zone_list->zone_list_next = publ;
331 }
332
333 if (in_own_cluster(node)) {
334 if (!sseq->cluster_list)
335 sseq->cluster_list = publ->cluster_list_next = publ;
336 else {
337 publ->cluster_list_next =
338 sseq->cluster_list->cluster_list_next;
339 sseq->cluster_list->cluster_list_next = publ;
340 }
341 }
342
343 if (node == tipc_own_addr) {
344 if (!sseq->node_list)
345 sseq->node_list = publ->node_list_next = publ;
346 else {
347 publ->node_list_next = sseq->node_list->node_list_next;
348 sseq->node_list->node_list_next = publ;
349 }
350 }
351
352 /*
353 * Any subscriptions waiting for notification?
354 */
355 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
356 dbg("calling report_overlap()\n");
357 tipc_subscr_report_overlap(s,
358 publ->lower,
359 publ->upper,
360 TIPC_PUBLISHED,
361 publ->ref,
362 publ->node,
363 created_subseq);
364 }
365 return publ;
366}
367
368/**
369 * tipc_nameseq_remove_publ -
370 */
371
372struct publication *tipc_nameseq_remove_publ(struct name_seq *nseq, u32 inst,
373 u32 node, u32 ref, u32 key)
374{
375 struct publication *publ;
376 struct publication *prev;
377 struct sub_seq *sseq = nameseq_find_subseq(nseq, inst);
378 struct sub_seq *free;
379 struct subscription *s, *st;
380 int removed_subseq = 0;
381
382 assert(nseq);
383
384 if (!sseq) {
385 int i;
386
387 warn("Withdraw unknown <%u,%u>?\n", nseq->type, inst);
388 assert(nseq->sseqs);
389 dbg("Dumping subseqs %x for %x, alloc = %u,ff=%u\n",
390 nseq->sseqs, nseq, nseq->alloc,
391 nseq->first_free);
392 for (i = 0; i < nseq->first_free; i++) {
393 dbg("Subseq %u(%x): lower = %u,upper = %u\n",
394 i, &nseq->sseqs[i], nseq->sseqs[i].lower,
395 nseq->sseqs[i].upper);
396 }
397 return 0;
398 }
399 dbg("nameseq_remove: seq: %x, sseq %x, <%u,%u> key %u\n",
400 nseq, sseq, nseq->type, inst, key);
401
402 prev = sseq->zone_list;
403 publ = sseq->zone_list->zone_list_next;
404 while ((publ->key != key) || (publ->ref != ref) ||
405 (publ->node && (publ->node != node))) {
406 prev = publ;
407 publ = publ->zone_list_next;
408 assert(prev != sseq->zone_list);
409 }
410 if (publ != sseq->zone_list)
411 prev->zone_list_next = publ->zone_list_next;
412 else if (publ->zone_list_next != publ) {
413 prev->zone_list_next = publ->zone_list_next;
414 sseq->zone_list = publ->zone_list_next;
415 } else {
416 sseq->zone_list = 0;
417 }
418
419 if (in_own_cluster(node)) {
420 prev = sseq->cluster_list;
421 publ = sseq->cluster_list->cluster_list_next;
422 while ((publ->key != key) || (publ->ref != ref) ||
423 (publ->node && (publ->node != node))) {
424 prev = publ;
425 publ = publ->cluster_list_next;
426 assert(prev != sseq->cluster_list);
427 }
428 if (publ != sseq->cluster_list)
429 prev->cluster_list_next = publ->cluster_list_next;
430 else if (publ->cluster_list_next != publ) {
431 prev->cluster_list_next = publ->cluster_list_next;
432 sseq->cluster_list = publ->cluster_list_next;
433 } else {
434 sseq->cluster_list = 0;
435 }
436 }
437
438 if (node == tipc_own_addr) {
439 prev = sseq->node_list;
440 publ = sseq->node_list->node_list_next;
441 while ((publ->key != key) || (publ->ref != ref) ||
442 (publ->node && (publ->node != node))) {
443 prev = publ;
444 publ = publ->node_list_next;
445 assert(prev != sseq->node_list);
446 }
447 if (publ != sseq->node_list)
448 prev->node_list_next = publ->node_list_next;
449 else if (publ->node_list_next != publ) {
450 prev->node_list_next = publ->node_list_next;
451 sseq->node_list = publ->node_list_next;
452 } else {
453 sseq->node_list = 0;
454 }
455 }
456 assert(!publ->node || (publ->node == node));
457 assert(publ->ref == ref);
458 assert(publ->key == key);
459
460 /*
461 * Contract subseq list if no more publications:
462 */
463 if (!sseq->node_list && !sseq->cluster_list && !sseq->zone_list) {
464 free = &nseq->sseqs[nseq->first_free--];
465 memmove(sseq, sseq + 1, (free - (sseq + 1)) * sizeof (*sseq));
466 removed_subseq = 1;
467 }
468
469 /*
470 * Any subscriptions waiting ?
471 */
472 list_for_each_entry_safe(s, st, &nseq->subscriptions, nameseq_list) {
473 tipc_subscr_report_overlap(s,
474 publ->lower,
475 publ->upper,
476 TIPC_WITHDRAWN,
477 publ->ref,
478 publ->node,
479 removed_subseq);
480 }
481 return publ;
482}
483
484/**
485 * tipc_nameseq_subscribe: attach a subscription, and issue
486 * the prescribed number of events if there is any sub-
487 * sequence overlapping with the requested sequence
488 */
489
490void tipc_nameseq_subscribe(struct name_seq *nseq, struct subscription *s)
491{
492 struct sub_seq *sseq = nseq->sseqs;
493
494 list_add(&s->nameseq_list, &nseq->subscriptions);
495
496 if (!sseq)
497 return;
498
499 while (sseq != &nseq->sseqs[nseq->first_free]) {
500 struct publication *zl = sseq->zone_list;
501 if (zl && tipc_subscr_overlap(s,sseq->lower,sseq->upper)) {
502 struct publication *crs = zl;
503 int must_report = 1;
504
505 do {
506 tipc_subscr_report_overlap(s,
507 sseq->lower,
508 sseq->upper,
509 TIPC_PUBLISHED,
510 crs->ref,
511 crs->node,
512 must_report);
513 must_report = 0;
514 crs = crs->zone_list_next;
515 } while (crs != zl);
516 }
517 sseq++;
518 }
519}
520
521static struct name_seq *nametbl_find_seq(u32 type)
522{
523 struct hlist_head *seq_head;
524 struct hlist_node *seq_node;
525 struct name_seq *ns;
526
527 dbg("find_seq %u,(%u,0x%x) table = %p, hash[type] = %u\n",
528 type, ntohl(type), type, table.types, hash(type));
529
530 seq_head = &table.types[hash(type)];
531 hlist_for_each_entry(ns, seq_node, seq_head, ns_list) {
532 if (ns->type == type) {
533 dbg("found %x\n", ns);
534 return ns;
535 }
536 }
537
538 return 0;
539};
540
541struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
542 u32 scope, u32 node, u32 port, u32 key)
543{
544 struct name_seq *seq = nametbl_find_seq(type);
545
546 dbg("ins_publ: <%u,%x,%x> found %x\n", type, lower, upper, seq);
547 if (lower > upper) {
548 warn("Failed to publish illegal <%u,%u,%u>\n",
549 type, lower, upper);
550 return 0;
551 }
552
553 dbg("Publishing <%u,%u,%u> from %x\n", type, lower, upper, node);
554 if (!seq) {
555 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
556 dbg("tipc_nametbl_insert_publ: created %x\n", seq);
557 }
558 if (!seq)
559 return 0;
560
561 assert(seq->type == type);
562 return tipc_nameseq_insert_publ(seq, type, lower, upper,
563 scope, node, port, key);
564}
565
566struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
567 u32 node, u32 ref, u32 key)
568{
569 struct publication *publ;
570 struct name_seq *seq = nametbl_find_seq(type);
571
572 if (!seq)
573 return 0;
574
575 dbg("Withdrawing <%u,%u> from %x\n", type, lower, node);
576 publ = tipc_nameseq_remove_publ(seq, lower, node, ref, key);
577
578 if (!seq->first_free && list_empty(&seq->subscriptions)) {
579 hlist_del_init(&seq->ns_list);
580 kfree(seq->sseqs);
581 kfree(seq);
582 }
583 return publ;
584}
585
586/*
587 * tipc_nametbl_translate(): Translate tipc_name -> tipc_portid.
588 * Very time-critical.
589 *
590 * Note: on entry 'destnode' is the search domain used during translation;
591 * on exit it passes back the node address of the matching port (if any)
592 */
593
594u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *destnode)
595{
596 struct sub_seq *sseq;
597 struct publication *publ = 0;
598 struct name_seq *seq;
599 u32 ref;
600
601 if (!in_scope(*destnode, tipc_own_addr))
602 return 0;
603
604 read_lock_bh(&tipc_nametbl_lock);
605 seq = nametbl_find_seq(type);
606 if (unlikely(!seq))
607 goto not_found;
608 sseq = nameseq_find_subseq(seq, instance);
609 if (unlikely(!sseq))
610 goto not_found;
611 spin_lock_bh(&seq->lock);
612
613 /* Closest-First Algorithm: */
614 if (likely(!*destnode)) {
615 publ = sseq->node_list;
616 if (publ) {
617 sseq->node_list = publ->node_list_next;
618found:
619 ref = publ->ref;
620 *destnode = publ->node;
621 spin_unlock_bh(&seq->lock);
622 read_unlock_bh(&tipc_nametbl_lock);
623 return ref;
624 }
625 publ = sseq->cluster_list;
626 if (publ) {
627 sseq->cluster_list = publ->cluster_list_next;
628 goto found;
629 }
630 publ = sseq->zone_list;
631 if (publ) {
632 sseq->zone_list = publ->zone_list_next;
633 goto found;
634 }
635 }
636
637 /* Round-Robin Algorithm: */
638 else if (*destnode == tipc_own_addr) {
639 publ = sseq->node_list;
640 if (publ) {
641 sseq->node_list = publ->node_list_next;
642 goto found;
643 }
644 } else if (in_own_cluster(*destnode)) {
645 publ = sseq->cluster_list;
646 if (publ) {
647 sseq->cluster_list = publ->cluster_list_next;
648 goto found;
649 }
650 } else {
651 publ = sseq->zone_list;
652 if (publ) {
653 sseq->zone_list = publ->zone_list_next;
654 goto found;
655 }
656 }
657 spin_unlock_bh(&seq->lock);
658not_found:
659 *destnode = 0;
660 read_unlock_bh(&tipc_nametbl_lock);
661 return 0;
662}
663
664/**
665 * tipc_nametbl_mc_translate - find multicast destinations
666 *
667 * Creates list of all local ports that overlap the given multicast address;
668 * also determines if any off-node ports overlap.
669 *
670 * Note: Publications with a scope narrower than 'limit' are ignored.
671 * (i.e. local node-scope publications mustn't receive messages arriving
672 * from another node, even if the multcast link brought it here)
673 *
674 * Returns non-zero if any off-node ports overlap
675 */
676
677int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
678 struct port_list *dports)
679{
680 struct name_seq *seq;
681 struct sub_seq *sseq;
682 struct sub_seq *sseq_stop;
683 int res = 0;
684
685 read_lock_bh(&tipc_nametbl_lock);
686 seq = nametbl_find_seq(type);
687 if (!seq)
688 goto exit;
689
690 spin_lock_bh(&seq->lock);
691
692 sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
693 sseq_stop = seq->sseqs + seq->first_free;
694 for (; sseq != sseq_stop; sseq++) {
695 struct publication *publ;
696
697 if (sseq->lower > upper)
698 break;
699 publ = sseq->cluster_list;
700 if (publ && (publ->scope <= limit))
701 do {
702 if (publ->node == tipc_own_addr)
703 tipc_port_list_add(dports, publ->ref);
704 else
705 res = 1;
706 publ = publ->cluster_list_next;
707 } while (publ != sseq->cluster_list);
708 }
709
710 spin_unlock_bh(&seq->lock);
711exit:
712 read_unlock_bh(&tipc_nametbl_lock);
713 return res;
714}
715
716/**
717 * tipc_nametbl_publish_rsv - publish port name using a reserved name type
718 */
719
720int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
721 struct tipc_name_seq const *seq)
722{
723 int res;
724
725 atomic_inc(&rsv_publ_ok);
726 res = tipc_publish(ref, scope, seq);
727 atomic_dec(&rsv_publ_ok);
728 return res;
729}
730
731/**
732 * tipc_nametbl_publish - add name publication to network name tables
733 */
734
735struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
736 u32 scope, u32 port_ref, u32 key)
737{
738 struct publication *publ;
739
740 if (table.local_publ_count >= tipc_max_publications) {
741 warn("Failed publish: max %u local publication\n",
742 tipc_max_publications);
743 return 0;
744 }
745 if ((type < TIPC_RESERVED_TYPES) && !atomic_read(&rsv_publ_ok)) {
746 warn("Failed to publish reserved name <%u,%u,%u>\n",
747 type, lower, upper);
748 return 0;
749 }
750
751 write_lock_bh(&tipc_nametbl_lock);
752 table.local_publ_count++;
753 publ = tipc_nametbl_insert_publ(type, lower, upper, scope,
754 tipc_own_addr, port_ref, key);
755 if (publ && (scope != TIPC_NODE_SCOPE)) {
756 tipc_named_publish(publ);
757 }
758 write_unlock_bh(&tipc_nametbl_lock);
759 return publ;
760}
761
762/**
763 * tipc_nametbl_withdraw - withdraw name publication from network name tables
764 */
765
766int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key)
767{
768 struct publication *publ;
769
770 dbg("tipc_nametbl_withdraw:<%d,%d,%d>\n", type, lower, key);
771 write_lock_bh(&tipc_nametbl_lock);
772 publ = tipc_nametbl_remove_publ(type, lower, tipc_own_addr, ref, key);
773 if (publ) {
774 table.local_publ_count--;
775 if (publ->scope != TIPC_NODE_SCOPE)
776 tipc_named_withdraw(publ);
777 write_unlock_bh(&tipc_nametbl_lock);
778 list_del_init(&publ->pport_list);
779 kfree(publ);
780 return 1;
781 }
782 write_unlock_bh(&tipc_nametbl_lock);
783 return 0;
784}
785
786/**
787 * tipc_nametbl_subscribe - add a subscription object to the name table
788 */
789
790void
791tipc_nametbl_subscribe(struct subscription *s)
792{
793 u32 type = s->seq.type;
794 struct name_seq *seq;
795
796 write_lock_bh(&tipc_nametbl_lock);
797 seq = nametbl_find_seq(type);
798 if (!seq) {
799 seq = tipc_nameseq_create(type, &table.types[hash(type)]);
800 }
801 if (seq){
802 spin_lock_bh(&seq->lock);
803 dbg("tipc_nametbl_subscribe:found %x for <%u,%u,%u>\n",
804 seq, type, s->seq.lower, s->seq.upper);
805 assert(seq->type == type);
806 tipc_nameseq_subscribe(seq, s);
807 spin_unlock_bh(&seq->lock);
808 }
809 write_unlock_bh(&tipc_nametbl_lock);
810}
811
812/**
813 * tipc_nametbl_unsubscribe - remove a subscription object from name table
814 */
815
816void
817tipc_nametbl_unsubscribe(struct subscription *s)
818{
819 struct name_seq *seq;
820
821 write_lock_bh(&tipc_nametbl_lock);
822 seq = nametbl_find_seq(s->seq.type);
823 if (seq != NULL){
824 spin_lock_bh(&seq->lock);
825 list_del_init(&s->nameseq_list);
826 spin_unlock_bh(&seq->lock);
827 if ((seq->first_free == 0) && list_empty(&seq->subscriptions)) {
828 hlist_del_init(&seq->ns_list);
829 kfree(seq->sseqs);
830 kfree(seq);
831 }
832 }
833 write_unlock_bh(&tipc_nametbl_lock);
834}
835
836
837/**
838 * subseq_list: print specified sub-sequence contents into the given buffer
839 */
840
841static void subseq_list(struct sub_seq *sseq, struct print_buf *buf, u32 depth,
842 u32 index)
843{
844 char portIdStr[27];
845 char *scopeStr;
846 struct publication *publ = sseq->zone_list;
847
848 tipc_printf(buf, "%-10u %-10u ", sseq->lower, sseq->upper);
849
850 if (depth == 2 || !publ) {
851 tipc_printf(buf, "\n");
852 return;
853 }
854
855 do {
856 sprintf (portIdStr, "<%u.%u.%u:%u>",
857 tipc_zone(publ->node), tipc_cluster(publ->node),
858 tipc_node(publ->node), publ->ref);
859 tipc_printf(buf, "%-26s ", portIdStr);
860 if (depth > 3) {
861 if (publ->node != tipc_own_addr)
862 scopeStr = "";
863 else if (publ->scope == TIPC_NODE_SCOPE)
864 scopeStr = "node";
865 else if (publ->scope == TIPC_CLUSTER_SCOPE)
866 scopeStr = "cluster";
867 else
868 scopeStr = "zone";
869 tipc_printf(buf, "%-10u %s", publ->key, scopeStr);
870 }
871
872 publ = publ->zone_list_next;
873 if (publ == sseq->zone_list)
874 break;
875
876 tipc_printf(buf, "\n%33s", " ");
877 } while (1);
878
879 tipc_printf(buf, "\n");
880}
881
882/**
883 * nameseq_list: print specified name sequence contents into the given buffer
884 */
885
886static void nameseq_list(struct name_seq *seq, struct print_buf *buf, u32 depth,
887 u32 type, u32 lowbound, u32 upbound, u32 index)
888{
889 struct sub_seq *sseq;
890 char typearea[11];
891
892 sprintf(typearea, "%-10u", seq->type);
893
894 if (depth == 1) {
895 tipc_printf(buf, "%s\n", typearea);
896 return;
897 }
898
899 for (sseq = seq->sseqs; sseq != &seq->sseqs[seq->first_free]; sseq++) {
900 if ((lowbound <= sseq->upper) && (upbound >= sseq->lower)) {
901 tipc_printf(buf, "%s ", typearea);
902 subseq_list(sseq, buf, depth, index);
903 sprintf(typearea, "%10s", " ");
904 }
905 }
906}
907
908/**
909 * nametbl_header - print name table header into the given buffer
910 */
911
912static void nametbl_header(struct print_buf *buf, u32 depth)
913{
914 tipc_printf(buf, "Type ");
915
916 if (depth > 1)
917 tipc_printf(buf, "Lower Upper ");
918 if (depth > 2)
919 tipc_printf(buf, "Port Identity ");
920 if (depth > 3)
921 tipc_printf(buf, "Publication");
922
923 tipc_printf(buf, "\n-----------");
924
925 if (depth > 1)
926 tipc_printf(buf, "--------------------- ");
927 if (depth > 2)
928 tipc_printf(buf, "-------------------------- ");
929 if (depth > 3)
930 tipc_printf(buf, "------------------");
931
932 tipc_printf(buf, "\n");
933}
934
935/**
936 * nametbl_list - print specified name table contents into the given buffer
937 */
938
939static void nametbl_list(struct print_buf *buf, u32 depth_info,
940 u32 type, u32 lowbound, u32 upbound)
941{
942 struct hlist_head *seq_head;
943 struct hlist_node *seq_node;
944 struct name_seq *seq;
945 int all_types;
946 u32 depth;
947 u32 i;
948
949 all_types = (depth_info & TIPC_NTQ_ALLTYPES);
950 depth = (depth_info & ~TIPC_NTQ_ALLTYPES);
951
952 if (depth == 0)
953 return;
954
955 if (all_types) {
956 /* display all entries in name table to specified depth */
957 nametbl_header(buf, depth);
958 lowbound = 0;
959 upbound = ~0;
960 for (i = 0; i < tipc_nametbl_size; i++) {
961 seq_head = &table.types[i];
962 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
963 nameseq_list(seq, buf, depth, seq->type,
964 lowbound, upbound, i);
965 }
966 }
967 } else {
968 /* display only the sequence that matches the specified type */
969 if (upbound < lowbound) {
970 tipc_printf(buf, "invalid name sequence specified\n");
971 return;
972 }
973 nametbl_header(buf, depth);
974 i = hash(type);
975 seq_head = &table.types[i];
976 hlist_for_each_entry(seq, seq_node, seq_head, ns_list) {
977 if (seq->type == type) {
978 nameseq_list(seq, buf, depth, type,
979 lowbound, upbound, i);
980 break;
981 }
982 }
983 }
984}
985
986void tipc_nametbl_print(struct print_buf *buf, const char *str)
987{
988 tipc_printf(buf, str);
989 read_lock_bh(&tipc_nametbl_lock);
990 nametbl_list(buf, 0, 0, 0, 0);
991 read_unlock_bh(&tipc_nametbl_lock);
992}
993
994#define MAX_NAME_TBL_QUERY 32768
995
996struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space)
997{
998 struct sk_buff *buf;
999 struct tipc_name_table_query *argv;
1000 struct tlv_desc *rep_tlv;
1001 struct print_buf b;
1002 int str_len;
1003
1004 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NAME_TBL_QUERY))
1005 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
1006
1007 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_NAME_TBL_QUERY));
1008 if (!buf)
1009 return NULL;
1010
1011 rep_tlv = (struct tlv_desc *)buf->data;
1012 tipc_printbuf_init(&b, TLV_DATA(rep_tlv), MAX_NAME_TBL_QUERY);
1013 argv = (struct tipc_name_table_query *)TLV_DATA(req_tlv_area);
1014 read_lock_bh(&tipc_nametbl_lock);
1015 nametbl_list(&b, ntohl(argv->depth), ntohl(argv->type),
1016 ntohl(argv->lowbound), ntohl(argv->upbound));
1017 read_unlock_bh(&tipc_nametbl_lock);
1018 str_len = tipc_printbuf_validate(&b);
1019
1020 skb_put(buf, TLV_SPACE(str_len));
1021 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
1022
1023 return buf;
1024}
1025
1026void tipc_nametbl_dump(void)
1027{
1028 nametbl_list(TIPC_CONS, 0, 0, 0, 0);
1029}
1030
1031int tipc_nametbl_init(void)
1032{
1033 int array_size = sizeof(struct hlist_head) * tipc_nametbl_size;
1034
1035 table.types = (struct hlist_head *)kmalloc(array_size, GFP_ATOMIC);
1036 if (!table.types)
1037 return -ENOMEM;
1038
1039 write_lock_bh(&tipc_nametbl_lock);
1040 memset(table.types, 0, array_size);
1041 table.local_publ_count = 0;
1042 write_unlock_bh(&tipc_nametbl_lock);
1043 return 0;
1044}
1045
1046void tipc_nametbl_stop(void)
1047{
1048 struct hlist_head *seq_head;
1049 struct hlist_node *seq_node;
1050 struct hlist_node *tmp;
1051 struct name_seq *seq;
1052 u32 i;
1053
1054 if (!table.types)
1055 return;
1056
1057 write_lock_bh(&tipc_nametbl_lock);
1058 for (i = 0; i < tipc_nametbl_size; i++) {
1059 seq_head = &table.types[i];
1060 hlist_for_each_entry_safe(seq, seq_node, tmp, seq_head, ns_list) {
1061 struct sub_seq *sseq = seq->sseqs;
1062
1063 for (; sseq != &seq->sseqs[seq->first_free]; sseq++) {
1064 struct publication *publ = sseq->zone_list;
1065 assert(publ);
1066 do {
1067 struct publication *next =
1068 publ->zone_list_next;
1069 kfree(publ);
1070 publ = next;
1071 }
1072 while (publ != sseq->zone_list);
1073 }
1074 }
1075 }
1076 kfree(table.types);
1077 table.types = NULL;
1078 write_unlock_bh(&tipc_nametbl_lock);
1079}
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
new file mode 100644
index 000000000000..e8a3d71763ce
--- /dev/null
+++ b/net/tipc/name_table.h
@@ -0,0 +1,108 @@
1/*
2 * net/tipc/name_table.h: Include file for TIPC name table code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NAME_TABLE_H
38#define _TIPC_NAME_TABLE_H
39
40#include "node_subscr.h"
41
42struct subscription;
43struct port_list;
44
45/*
46 * TIPC name types reserved for internal TIPC use (both current and planned)
47 */
48
49#define TIPC_ZM_SRV 3 /* zone master service name type */
50
51
52/**
53 * struct publication - info about a published (name or) name sequence
54 * @type: name sequence type
55 * @lower: name sequence lower bound
56 * @upper: name sequence upper bound
57 * @scope: scope of publication
58 * @node: network address of publishing port's node
59 * @ref: publishing port
60 * @key: publication key
61 * @subscr: subscription to "node down" event (for off-node publications only)
62 * @local_list: adjacent entries in list of publications made by this node
63 * @pport_list: adjacent entries in list of publications made by this port
64 * @node_list: next matching name seq publication with >= node scope
65 * @cluster_list: next matching name seq publication with >= cluster scope
66 * @zone_list: next matching name seq publication with >= zone scope
67 *
68 * Note that the node list, cluster list, and zone list are circular lists.
69 */
70
71struct publication {
72 u32 type;
73 u32 lower;
74 u32 upper;
75 u32 scope;
76 u32 node;
77 u32 ref;
78 u32 key;
79 struct node_subscr subscr;
80 struct list_head local_list;
81 struct list_head pport_list;
82 struct publication *node_list_next;
83 struct publication *cluster_list_next;
84 struct publication *zone_list_next;
85};
86
87
88extern rwlock_t tipc_nametbl_lock;
89
90struct sk_buff *tipc_nametbl_get(const void *req_tlv_area, int req_tlv_space);
91u32 tipc_nametbl_translate(u32 type, u32 instance, u32 *node);
92int tipc_nametbl_mc_translate(u32 type, u32 lower, u32 upper, u32 limit,
93 struct port_list *dports);
94int tipc_nametbl_publish_rsv(u32 ref, unsigned int scope,
95 struct tipc_name_seq const *seq);
96struct publication *tipc_nametbl_publish(u32 type, u32 lower, u32 upper,
97 u32 scope, u32 port_ref, u32 key);
98int tipc_nametbl_withdraw(u32 type, u32 lower, u32 ref, u32 key);
99struct publication *tipc_nametbl_insert_publ(u32 type, u32 lower, u32 upper,
100 u32 scope, u32 node, u32 ref, u32 key);
101struct publication *tipc_nametbl_remove_publ(u32 type, u32 lower,
102 u32 node, u32 ref, u32 key);
103void tipc_nametbl_subscribe(struct subscription *s);
104void tipc_nametbl_unsubscribe(struct subscription *s);
105int tipc_nametbl_init(void);
106void tipc_nametbl_stop(void);
107
108#endif
diff --git a/net/tipc/net.c b/net/tipc/net.c
new file mode 100644
index 000000000000..074891ad4f09
--- /dev/null
+++ b/net/tipc/net.c
@@ -0,0 +1,311 @@
1/*
2 * net/tipc/net.c: TIPC network routing code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "bearer.h"
39#include "net.h"
40#include "zone.h"
41#include "addr.h"
42#include "name_table.h"
43#include "name_distr.h"
44#include "subscr.h"
45#include "link.h"
46#include "msg.h"
47#include "port.h"
48#include "bcast.h"
49#include "discover.h"
50#include "config.h"
51
52/*
53 * The TIPC locking policy is designed to ensure a very fine locking
54 * granularity, permitting complete parallel access to individual
55 * port and node/link instances. The code consists of three major
56 * locking domains, each protected with their own disjunct set of locks.
57 *
58 * 1: The routing hierarchy.
59 * Comprises the structures 'zone', 'cluster', 'node', 'link'
60 * and 'bearer'. The whole hierarchy is protected by a big
61 * read/write lock, tipc_net_lock, to enssure that nothing is added
62 * or removed while code is accessing any of these structures.
63 * This layer must not be called from the two others while they
64 * hold any of their own locks.
65 * Neither must it itself do any upcalls to the other two before
66 * it has released tipc_net_lock and other protective locks.
67 *
68 * Within the tipc_net_lock domain there are two sub-domains;'node' and
69 * 'bearer', where local write operations are permitted,
70 * provided that those are protected by individual spin_locks
71 * per instance. Code holding tipc_net_lock(read) and a node spin_lock
72 * is permitted to poke around in both the node itself and its
73 * subordinate links. I.e, it can update link counters and queues,
74 * change link state, send protocol messages, and alter the
75 * "active_links" array in the node; but it can _not_ remove a link
76 * or a node from the overall structure.
77 * Correspondingly, individual bearers may change status within a
78 * tipc_net_lock(read), protected by an individual spin_lock ber bearer
79 * instance, but it needs tipc_net_lock(write) to remove/add any bearers.
80 *
81 *
82 * 2: The transport level of the protocol.
83 * This consists of the structures port, (and its user level
84 * representations, such as user_port and tipc_sock), reference and
85 * tipc_user (port.c, reg.c, socket.c).
86 *
87 * This layer has four different locks:
88 * - The tipc_port spin_lock. This is protecting each port instance
89 * from parallel data access and removal. Since we can not place
90 * this lock in the port itself, it has been placed in the
91 * corresponding reference table entry, which has the same life
92 * cycle as the module. This entry is difficult to access from
93 * outside the TIPC core, however, so a pointer to the lock has
94 * been added in the port instance, -to be used for unlocking
95 * only.
96 * - A read/write lock to protect the reference table itself (teg.c).
97 * (Nobody is using read-only access to this, so it can just as
98 * well be changed to a spin_lock)
99 * - A spin lock to protect the registry of kernel/driver users (reg.c)
100 * - A global spin_lock (tipc_port_lock), which only task is to ensure
101 * consistency where more than one port is involved in an operation,
102 * i.e., whe a port is part of a linked list of ports.
103 * There are two such lists; 'port_list', which is used for management,
104 * and 'wait_list', which is used to queue ports during congestion.
105 *
106 * 3: The name table (name_table.c, name_distr.c, subscription.c)
107 * - There is one big read/write-lock (tipc_nametbl_lock) protecting the
108 * overall name table structure. Nothing must be added/removed to
109 * this structure without holding write access to it.
110 * - There is one local spin_lock per sub_sequence, which can be seen
111 * as a sub-domain to the tipc_nametbl_lock domain. It is used only
112 * for translation operations, and is needed because a translation
113 * steps the root of the 'publication' linked list between each lookup.
114 * This is always used within the scope of a tipc_nametbl_lock(read).
115 * - A local spin_lock protecting the queue of subscriber events.
116*/
117
118rwlock_t tipc_net_lock = RW_LOCK_UNLOCKED;
119struct network tipc_net = { 0 };
120
121struct node *tipc_net_select_remote_node(u32 addr, u32 ref)
122{
123 return tipc_zone_select_remote_node(tipc_net.zones[tipc_zone(addr)], addr, ref);
124}
125
126u32 tipc_net_select_router(u32 addr, u32 ref)
127{
128 return tipc_zone_select_router(tipc_net.zones[tipc_zone(addr)], addr, ref);
129}
130
131
132u32 tipc_net_next_node(u32 a)
133{
134 if (tipc_net.zones[tipc_zone(a)])
135 return tipc_zone_next_node(a);
136 return 0;
137}
138
139void tipc_net_remove_as_router(u32 router)
140{
141 u32 z_num;
142
143 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
144 if (!tipc_net.zones[z_num])
145 continue;
146 tipc_zone_remove_as_router(tipc_net.zones[z_num], router);
147 }
148}
149
150void tipc_net_send_external_routes(u32 dest)
151{
152 u32 z_num;
153
154 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
155 if (tipc_net.zones[z_num])
156 tipc_zone_send_external_routes(tipc_net.zones[z_num], dest);
157 }
158}
159
160static int net_init(void)
161{
162 u32 sz = sizeof(struct _zone *) * (tipc_max_zones + 1);
163
164 memset(&tipc_net, 0, sizeof(tipc_net));
165 tipc_net.zones = (struct _zone **)kmalloc(sz, GFP_ATOMIC);
166 if (!tipc_net.zones) {
167 return -ENOMEM;
168 }
169 memset(tipc_net.zones, 0, sz);
170 return TIPC_OK;
171}
172
173static void net_stop(void)
174{
175 u32 z_num;
176
177 if (!tipc_net.zones)
178 return;
179
180 for (z_num = 1; z_num <= tipc_max_zones; z_num++) {
181 tipc_zone_delete(tipc_net.zones[z_num]);
182 }
183 kfree(tipc_net.zones);
184 tipc_net.zones = 0;
185}
186
187static void net_route_named_msg(struct sk_buff *buf)
188{
189 struct tipc_msg *msg = buf_msg(buf);
190 u32 dnode;
191 u32 dport;
192
193 if (!msg_named(msg)) {
194 msg_dbg(msg, "tipc_net->drop_nam:");
195 buf_discard(buf);
196 return;
197 }
198
199 dnode = addr_domain(msg_lookup_scope(msg));
200 dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
201 dbg("tipc_net->lookup<%u,%u>-><%u,%x>\n",
202 msg_nametype(msg), msg_nameinst(msg), dport, dnode);
203 if (dport) {
204 msg_set_destnode(msg, dnode);
205 msg_set_destport(msg, dport);
206 tipc_net_route_msg(buf);
207 return;
208 }
209 msg_dbg(msg, "tipc_net->rej:NO NAME: ");
210 tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
211}
212
213void tipc_net_route_msg(struct sk_buff *buf)
214{
215 struct tipc_msg *msg;
216 u32 dnode;
217
218 if (!buf)
219 return;
220 msg = buf_msg(buf);
221
222 msg_incr_reroute_cnt(msg);
223 if (msg_reroute_cnt(msg) > 6) {
224 if (msg_errcode(msg)) {
225 msg_dbg(msg, "NET>DISC>:");
226 buf_discard(buf);
227 } else {
228 msg_dbg(msg, "NET>REJ>:");
229 tipc_reject_msg(buf, msg_destport(msg) ?
230 TIPC_ERR_NO_PORT : TIPC_ERR_NO_NAME);
231 }
232 return;
233 }
234
235 msg_dbg(msg, "tipc_net->rout: ");
236
237 /* Handle message for this node */
238 dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
239 if (in_scope(dnode, tipc_own_addr)) {
240 if (msg_isdata(msg)) {
241 if (msg_mcast(msg))
242 tipc_port_recv_mcast(buf, NULL);
243 else if (msg_destport(msg))
244 tipc_port_recv_msg(buf);
245 else
246 net_route_named_msg(buf);
247 return;
248 }
249 switch (msg_user(msg)) {
250 case ROUTE_DISTRIBUTOR:
251 tipc_cltr_recv_routing_table(buf);
252 break;
253 case NAME_DISTRIBUTOR:
254 tipc_named_recv(buf);
255 break;
256 case CONN_MANAGER:
257 tipc_port_recv_proto_msg(buf);
258 break;
259 default:
260 msg_dbg(msg,"DROP/NET/<REC<");
261 buf_discard(buf);
262 }
263 return;
264 }
265
266 /* Handle message for another node */
267 msg_dbg(msg, "NET>SEND>: ");
268 tipc_link_send(buf, dnode, msg_link_selector(msg));
269}
270
271int tipc_net_start(void)
272{
273 char addr_string[16];
274 int res;
275
276 if (tipc_mode != TIPC_NODE_MODE)
277 return -ENOPROTOOPT;
278
279 tipc_mode = TIPC_NET_MODE;
280 tipc_named_reinit();
281 tipc_port_reinit();
282
283 if ((res = tipc_bearer_init()) ||
284 (res = net_init()) ||
285 (res = tipc_cltr_init()) ||
286 (res = tipc_bclink_init())) {
287 return res;
288 }
289 tipc_subscr_stop();
290 tipc_cfg_stop();
291 tipc_k_signal((Handler)tipc_subscr_start, 0);
292 tipc_k_signal((Handler)tipc_cfg_init, 0);
293 info("Started in network mode\n");
294 info("Own node address %s, network identity %u\n",
295 addr_string_fill(addr_string, tipc_own_addr), tipc_net_id);
296 return TIPC_OK;
297}
298
299void tipc_net_stop(void)
300{
301 if (tipc_mode != TIPC_NET_MODE)
302 return;
303 write_lock_bh(&tipc_net_lock);
304 tipc_bearer_stop();
305 tipc_mode = TIPC_NODE_MODE;
306 tipc_bclink_stop();
307 net_stop();
308 write_unlock_bh(&tipc_net_lock);
309 info("Left network mode \n");
310}
311
diff --git a/net/tipc/net.h b/net/tipc/net.h
new file mode 100644
index 000000000000..f3e0b85e6475
--- /dev/null
+++ b/net/tipc/net.h
@@ -0,0 +1,64 @@
1/*
2 * net/tipc/net.h: Include file for TIPC network routing code
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NET_H
38#define _TIPC_NET_H
39
40struct _zone;
41
42/**
43 * struct network - TIPC network structure
44 * @zones: array of pointers to all zones within network
45 */
46
47struct network {
48 struct _zone **zones;
49};
50
51
52extern struct network tipc_net;
53extern rwlock_t tipc_net_lock;
54
55void tipc_net_remove_as_router(u32 router);
56void tipc_net_send_external_routes(u32 dest);
57void tipc_net_route_msg(struct sk_buff *buf);
58struct node *tipc_net_select_remote_node(u32 addr, u32 ref);
59u32 tipc_net_select_router(u32 addr, u32 ref);
60
61int tipc_net_start(void);
62void tipc_net_stop(void);
63
64#endif
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
new file mode 100644
index 000000000000..eb1bb4dce7af
--- /dev/null
+++ b/net/tipc/netlink.c
@@ -0,0 +1,112 @@
1/*
2 * net/tipc/netlink.c: TIPC configuration handling
3 *
4 * Copyright (c) 2005-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include <net/genetlink.h>
40
41static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
42{
43 struct sk_buff *rep_buf;
44 struct nlmsghdr *rep_nlh;
45 struct nlmsghdr *req_nlh = info->nlhdr;
46 struct tipc_genlmsghdr *req_userhdr = info->userhdr;
47 int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN);
48
49 if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
50 rep_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN);
51 else
52 rep_buf = tipc_cfg_do_cmd(req_userhdr->dest,
53 req_userhdr->cmd,
54 NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN,
55 NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN),
56 hdr_space);
57
58 if (rep_buf) {
59 skb_push(rep_buf, hdr_space);
60 rep_nlh = (struct nlmsghdr *)rep_buf->data;
61 memcpy(rep_nlh, req_nlh, hdr_space);
62 rep_nlh->nlmsg_len = rep_buf->len;
63 genlmsg_unicast(rep_buf, req_nlh->nlmsg_pid);
64 }
65
66 return 0;
67}
68
69static struct genl_family family = {
70 .id = GENL_ID_GENERATE,
71 .name = TIPC_GENL_NAME,
72 .version = TIPC_GENL_VERSION,
73 .hdrsize = TIPC_GENL_HDRLEN,
74 .maxattr = 0,
75};
76
77static struct genl_ops ops = {
78 .cmd = TIPC_GENL_CMD,
79 .doit = handle_cmd,
80};
81
82static int family_registered = 0;
83
84int tipc_netlink_start(void)
85{
86
87
88 if (genl_register_family(&family))
89 goto err;
90
91 family_registered = 1;
92
93 if (genl_register_ops(&family, &ops))
94 goto err_unregister;
95
96 return 0;
97
98 err_unregister:
99 genl_unregister_family(&family);
100 family_registered = 0;
101 err:
102 err("Failed to register netlink interface\n");
103 return -EFAULT;
104}
105
106void tipc_netlink_stop(void)
107{
108 if (family_registered) {
109 genl_unregister_family(&family);
110 family_registered = 0;
111 }
112}
diff --git a/net/tipc/node.c b/net/tipc/node.c
new file mode 100644
index 000000000000..6d65010e5fa1
--- /dev/null
+++ b/net/tipc/node.c
@@ -0,0 +1,678 @@
1/*
2 * net/tipc/node.c: TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "node.h"
40#include "cluster.h"
41#include "net.h"
42#include "addr.h"
43#include "node_subscr.h"
44#include "link.h"
45#include "port.h"
46#include "bearer.h"
47#include "name_distr.h"
48
49void node_print(struct print_buf *buf, struct node *n_ptr, char *str);
50static void node_lost_contact(struct node *n_ptr);
51static void node_established_contact(struct node *n_ptr);
52
53struct node *tipc_nodes = NULL; /* sorted list of nodes within cluster */
54
55u32 tipc_own_tag = 0;
56
57struct node *tipc_node_create(u32 addr)
58{
59 struct cluster *c_ptr;
60 struct node *n_ptr;
61 struct node **curr_node;
62
63 n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC);
64 if (n_ptr != NULL) {
65 memset(n_ptr, 0, sizeof(*n_ptr));
66 n_ptr->addr = addr;
67 n_ptr->lock = SPIN_LOCK_UNLOCKED;
68 INIT_LIST_HEAD(&n_ptr->nsub);
69
70 c_ptr = tipc_cltr_find(addr);
71 if (c_ptr == NULL)
72 c_ptr = tipc_cltr_create(addr);
73 if (c_ptr != NULL) {
74 n_ptr->owner = c_ptr;
75 tipc_cltr_attach_node(c_ptr, n_ptr);
76 n_ptr->last_router = -1;
77
78 /* Insert node into ordered list */
79 for (curr_node = &tipc_nodes; *curr_node;
80 curr_node = &(*curr_node)->next) {
81 if (addr < (*curr_node)->addr) {
82 n_ptr->next = *curr_node;
83 break;
84 }
85 }
86 (*curr_node) = n_ptr;
87 } else {
88 kfree(n_ptr);
89 n_ptr = NULL;
90 }
91 }
92 return n_ptr;
93}
94
95void tipc_node_delete(struct node *n_ptr)
96{
97 if (!n_ptr)
98 return;
99
100#if 0
101 /* Not needed because links are already deleted via tipc_bearer_stop() */
102
103 u32 l_num;
104
105 for (l_num = 0; l_num < MAX_BEARERS; l_num++) {
106 link_delete(n_ptr->links[l_num]);
107 }
108#endif
109
110 dbg("node %x deleted\n", n_ptr->addr);
111 kfree(n_ptr);
112}
113
114
115/**
116 * tipc_node_link_up - handle addition of link
117 *
118 * Link becomes active (alone or shared) or standby, depending on its priority.
119 */
120
121void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr)
122{
123 struct link **active = &n_ptr->active_links[0];
124
125 info("Established link <%s> on network plane %c\n",
126 l_ptr->name, l_ptr->b_ptr->net_plane);
127
128 if (!active[0]) {
129 dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]);
130 active[0] = active[1] = l_ptr;
131 node_established_contact(n_ptr);
132 return;
133 }
134 if (l_ptr->priority < active[0]->priority) {
135 info("Link is standby\n");
136 return;
137 }
138 tipc_link_send_duplicate(active[0], l_ptr);
139 if (l_ptr->priority == active[0]->priority) {
140 active[0] = l_ptr;
141 return;
142 }
143 info("Link <%s> on network plane %c becomes standby\n",
144 active[0]->name, active[0]->b_ptr->net_plane);
145 active[0] = active[1] = l_ptr;
146}
147
148/**
149 * node_select_active_links - select active link
150 */
151
152static void node_select_active_links(struct node *n_ptr)
153{
154 struct link **active = &n_ptr->active_links[0];
155 u32 i;
156 u32 highest_prio = 0;
157
158 active[0] = active[1] = 0;
159
160 for (i = 0; i < MAX_BEARERS; i++) {
161 struct link *l_ptr = n_ptr->links[i];
162
163 if (!l_ptr || !tipc_link_is_up(l_ptr) ||
164 (l_ptr->priority < highest_prio))
165 continue;
166
167 if (l_ptr->priority > highest_prio) {
168 highest_prio = l_ptr->priority;
169 active[0] = active[1] = l_ptr;
170 } else {
171 active[1] = l_ptr;
172 }
173 }
174}
175
176/**
177 * tipc_node_link_down - handle loss of link
178 */
179
180void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr)
181{
182 struct link **active;
183
184 if (!tipc_link_is_active(l_ptr)) {
185 info("Lost standby link <%s> on network plane %c\n",
186 l_ptr->name, l_ptr->b_ptr->net_plane);
187 return;
188 }
189 info("Lost link <%s> on network plane %c\n",
190 l_ptr->name, l_ptr->b_ptr->net_plane);
191
192 active = &n_ptr->active_links[0];
193 if (active[0] == l_ptr)
194 active[0] = active[1];
195 if (active[1] == l_ptr)
196 active[1] = active[0];
197 if (active[0] == l_ptr)
198 node_select_active_links(n_ptr);
199 if (tipc_node_is_up(n_ptr))
200 tipc_link_changeover(l_ptr);
201 else
202 node_lost_contact(n_ptr);
203}
204
205int tipc_node_has_active_links(struct node *n_ptr)
206{
207 return (n_ptr &&
208 ((n_ptr->active_links[0]) || (n_ptr->active_links[1])));
209}
210
211int tipc_node_has_redundant_links(struct node *n_ptr)
212{
213 return (tipc_node_has_active_links(n_ptr) &&
214 (n_ptr->active_links[0] != n_ptr->active_links[1]));
215}
216
217int tipc_node_has_active_routes(struct node *n_ptr)
218{
219 return (n_ptr && (n_ptr->last_router >= 0));
220}
221
222int tipc_node_is_up(struct node *n_ptr)
223{
224 return (tipc_node_has_active_links(n_ptr) || tipc_node_has_active_routes(n_ptr));
225}
226
227struct node *tipc_node_attach_link(struct link *l_ptr)
228{
229 struct node *n_ptr = tipc_node_find(l_ptr->addr);
230
231 if (!n_ptr)
232 n_ptr = tipc_node_create(l_ptr->addr);
233 if (n_ptr) {
234 u32 bearer_id = l_ptr->b_ptr->identity;
235 char addr_string[16];
236
237 assert(bearer_id < MAX_BEARERS);
238 if (n_ptr->link_cnt >= 2) {
239 char addr_string[16];
240
241 err("Attempt to create third link to %s\n",
242 addr_string_fill(addr_string, n_ptr->addr));
243 return 0;
244 }
245
246 if (!n_ptr->links[bearer_id]) {
247 n_ptr->links[bearer_id] = l_ptr;
248 tipc_net.zones[tipc_zone(l_ptr->addr)]->links++;
249 n_ptr->link_cnt++;
250 return n_ptr;
251 }
252 err("Attempt to establish second link on <%s> to <%s> \n",
253 l_ptr->b_ptr->publ.name,
254 addr_string_fill(addr_string, l_ptr->addr));
255 }
256 return 0;
257}
258
259void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr)
260{
261 n_ptr->links[l_ptr->b_ptr->identity] = 0;
262 tipc_net.zones[tipc_zone(l_ptr->addr)]->links--;
263 n_ptr->link_cnt--;
264}
265
266/*
267 * Routing table management - five cases to handle:
268 *
269 * 1: A link towards a zone/cluster external node comes up.
270 * => Send a multicast message updating routing tables of all
271 * system nodes within own cluster that the new destination
272 * can be reached via this node.
273 * (node.establishedContact()=>cluster.multicastNewRoute())
274 *
275 * 2: A link towards a slave node comes up.
276 * => Send a multicast message updating routing tables of all
277 * system nodes within own cluster that the new destination
278 * can be reached via this node.
279 * (node.establishedContact()=>cluster.multicastNewRoute())
280 * => Send a message to the slave node about existence
281 * of all system nodes within cluster:
282 * (node.establishedContact()=>cluster.sendLocalRoutes())
283 *
284 * 3: A new cluster local system node becomes available.
285 * => Send message(s) to this particular node containing
286 * information about all cluster external and slave
287 * nodes which can be reached via this node.
288 * (node.establishedContact()==>network.sendExternalRoutes())
289 * (node.establishedContact()==>network.sendSlaveRoutes())
290 * => Send messages to all directly connected slave nodes
291 * containing information about the existence of the new node
292 * (node.establishedContact()=>cluster.multicastNewRoute())
293 *
294 * 4: The link towards a zone/cluster external node or slave
295 * node goes down.
296 * => Send a multcast message updating routing tables of all
297 * nodes within cluster that the new destination can not any
298 * longer be reached via this node.
299 * (node.lostAllLinks()=>cluster.bcastLostRoute())
300 *
301 * 5: A cluster local system node becomes unavailable.
302 * => Remove all references to this node from the local
303 * routing tables. Note: This is a completely node
304 * local operation.
305 * (node.lostAllLinks()=>network.removeAsRouter())
306 * => Send messages to all directly connected slave nodes
307 * containing information about loss of the node
308 * (node.establishedContact()=>cluster.multicastLostRoute())
309 *
310 */
311
312static void node_established_contact(struct node *n_ptr)
313{
314 struct cluster *c_ptr;
315
316 dbg("node_established_contact:-> %x\n", n_ptr->addr);
317 if (!tipc_node_has_active_routes(n_ptr)) {
318 tipc_k_signal((Handler)tipc_named_node_up, n_ptr->addr);
319 }
320
321 /* Syncronize broadcast acks */
322 n_ptr->bclink.acked = tipc_bclink_get_last_sent();
323
324 if (is_slave(tipc_own_addr))
325 return;
326 if (!in_own_cluster(n_ptr->addr)) {
327 /* Usage case 1 (see above) */
328 c_ptr = tipc_cltr_find(tipc_own_addr);
329 if (!c_ptr)
330 c_ptr = tipc_cltr_create(tipc_own_addr);
331 if (c_ptr)
332 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1,
333 tipc_max_nodes);
334 return;
335 }
336
337 c_ptr = n_ptr->owner;
338 if (is_slave(n_ptr->addr)) {
339 /* Usage case 2 (see above) */
340 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes);
341 tipc_cltr_send_local_routes(c_ptr, n_ptr->addr);
342 return;
343 }
344
345 if (n_ptr->bclink.supported) {
346 tipc_nmap_add(&tipc_cltr_bcast_nodes, n_ptr->addr);
347 if (n_ptr->addr < tipc_own_addr)
348 tipc_own_tag++;
349 }
350
351 /* Case 3 (see above) */
352 tipc_net_send_external_routes(n_ptr->addr);
353 tipc_cltr_send_slave_routes(c_ptr, n_ptr->addr);
354 tipc_cltr_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE,
355 tipc_highest_allowed_slave);
356}
357
358static void node_lost_contact(struct node *n_ptr)
359{
360 struct cluster *c_ptr;
361 struct node_subscr *ns, *tns;
362 char addr_string[16];
363 u32 i;
364
365 /* Clean up broadcast reception remains */
366 n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0;
367 while (n_ptr->bclink.deferred_head) {
368 struct sk_buff* buf = n_ptr->bclink.deferred_head;
369 n_ptr->bclink.deferred_head = buf->next;
370 buf_discard(buf);
371 }
372 if (n_ptr->bclink.defragm) {
373 buf_discard(n_ptr->bclink.defragm);
374 n_ptr->bclink.defragm = NULL;
375 }
376 if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) {
377 tipc_bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000));
378 }
379
380 /* Update routing tables */
381 if (is_slave(tipc_own_addr)) {
382 tipc_net_remove_as_router(n_ptr->addr);
383 } else {
384 if (!in_own_cluster(n_ptr->addr)) {
385 /* Case 4 (see above) */
386 c_ptr = tipc_cltr_find(tipc_own_addr);
387 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
388 tipc_max_nodes);
389 } else {
390 /* Case 5 (see above) */
391 c_ptr = tipc_cltr_find(n_ptr->addr);
392 if (is_slave(n_ptr->addr)) {
393 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr, 1,
394 tipc_max_nodes);
395 } else {
396 if (n_ptr->bclink.supported) {
397 tipc_nmap_remove(&tipc_cltr_bcast_nodes,
398 n_ptr->addr);
399 if (n_ptr->addr < tipc_own_addr)
400 tipc_own_tag--;
401 }
402 tipc_net_remove_as_router(n_ptr->addr);
403 tipc_cltr_bcast_lost_route(c_ptr, n_ptr->addr,
404 LOWEST_SLAVE,
405 tipc_highest_allowed_slave);
406 }
407 }
408 }
409 if (tipc_node_has_active_routes(n_ptr))
410 return;
411
412 info("Lost contact with %s\n",
413 addr_string_fill(addr_string, n_ptr->addr));
414
415 /* Abort link changeover */
416 for (i = 0; i < MAX_BEARERS; i++) {
417 struct link *l_ptr = n_ptr->links[i];
418 if (!l_ptr)
419 continue;
420 l_ptr->reset_checkpoint = l_ptr->next_in_no;
421 l_ptr->exp_msg_count = 0;
422 tipc_link_reset_fragments(l_ptr);
423 }
424
425 /* Notify subscribers */
426 list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) {
427 ns->node = 0;
428 list_del_init(&ns->nodesub_list);
429 tipc_k_signal((Handler)ns->handle_node_down,
430 (unsigned long)ns->usr_handle);
431 }
432}
433
434/**
435 * tipc_node_select_next_hop - find the next-hop node for a message
436 *
437 * Called by when cluster local lookup has failed.
438 */
439
440struct node *tipc_node_select_next_hop(u32 addr, u32 selector)
441{
442 struct node *n_ptr;
443 u32 router_addr;
444
445 if (!tipc_addr_domain_valid(addr))
446 return 0;
447
448 /* Look for direct link to destination processsor */
449 n_ptr = tipc_node_find(addr);
450 if (n_ptr && tipc_node_has_active_links(n_ptr))
451 return n_ptr;
452
453 /* Cluster local system nodes *must* have direct links */
454 if (!is_slave(addr) && in_own_cluster(addr))
455 return 0;
456
457 /* Look for cluster local router with direct link to node */
458 router_addr = tipc_node_select_router(n_ptr, selector);
459 if (router_addr)
460 return tipc_node_select(router_addr, selector);
461
462 /* Slave nodes can only be accessed within own cluster via a
463 known router with direct link -- if no router was found,give up */
464 if (is_slave(addr))
465 return 0;
466
467 /* Inter zone/cluster -- find any direct link to remote cluster */
468 addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0);
469 n_ptr = tipc_net_select_remote_node(addr, selector);
470 if (n_ptr && tipc_node_has_active_links(n_ptr))
471 return n_ptr;
472
473 /* Last resort -- look for any router to anywhere in remote zone */
474 router_addr = tipc_net_select_router(addr, selector);
475 if (router_addr)
476 return tipc_node_select(router_addr, selector);
477
478 return 0;
479}
480
481/**
482 * tipc_node_select_router - select router to reach specified node
483 *
484 * Uses a deterministic and fair algorithm for selecting router node.
485 */
486
487u32 tipc_node_select_router(struct node *n_ptr, u32 ref)
488{
489 u32 ulim;
490 u32 mask;
491 u32 start;
492 u32 r;
493
494 if (!n_ptr)
495 return 0;
496
497 if (n_ptr->last_router < 0)
498 return 0;
499 ulim = ((n_ptr->last_router + 1) * 32) - 1;
500
501 /* Start entry must be random */
502 mask = tipc_max_nodes;
503 while (mask > ulim)
504 mask >>= 1;
505 start = ref & mask;
506 r = start;
507
508 /* Lookup upwards with wrap-around */
509 do {
510 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
511 break;
512 } while (++r <= ulim);
513 if (r > ulim) {
514 r = 1;
515 do {
516 if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1)
517 break;
518 } while (++r < start);
519 assert(r != start);
520 }
521 assert(r && (r <= ulim));
522 return tipc_addr(own_zone(), own_cluster(), r);
523}
524
525void tipc_node_add_router(struct node *n_ptr, u32 router)
526{
527 u32 r_num = tipc_node(router);
528
529 n_ptr->routers[r_num / 32] =
530 ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]);
531 n_ptr->last_router = tipc_max_nodes / 32;
532 while ((--n_ptr->last_router >= 0) &&
533 !n_ptr->routers[n_ptr->last_router]);
534}
535
536void tipc_node_remove_router(struct node *n_ptr, u32 router)
537{
538 u32 r_num = tipc_node(router);
539
540 if (n_ptr->last_router < 0)
541 return; /* No routes */
542
543 n_ptr->routers[r_num / 32] =
544 ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32]));
545 n_ptr->last_router = tipc_max_nodes / 32;
546 while ((--n_ptr->last_router >= 0) &&
547 !n_ptr->routers[n_ptr->last_router]);
548
549 if (!tipc_node_is_up(n_ptr))
550 node_lost_contact(n_ptr);
551}
552
553#if 0
554void node_print(struct print_buf *buf, struct node *n_ptr, char *str)
555{
556 u32 i;
557
558 tipc_printf(buf, "\n\n%s", str);
559 for (i = 0; i < MAX_BEARERS; i++) {
560 if (!n_ptr->links[i])
561 continue;
562 tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]);
563 }
564 tipc_printf(buf, "Active links: [%x,%x]\n",
565 n_ptr->active_links[0], n_ptr->active_links[1]);
566}
567#endif
568
569u32 tipc_available_nodes(const u32 domain)
570{
571 struct node *n_ptr;
572 u32 cnt = 0;
573
574 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
575 if (!in_scope(domain, n_ptr->addr))
576 continue;
577 if (tipc_node_is_up(n_ptr))
578 cnt++;
579 }
580 return cnt;
581}
582
583struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space)
584{
585 u32 domain;
586 struct sk_buff *buf;
587 struct node *n_ptr;
588 struct tipc_node_info node_info;
589
590 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
591 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
592
593 domain = *(u32 *)TLV_DATA(req_tlv_area);
594 domain = ntohl(domain);
595 if (!tipc_addr_domain_valid(domain))
596 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
597 " (network address)");
598
599 if (!tipc_nodes)
600 return tipc_cfg_reply_none();
601
602 /* For now, get space for all other nodes
603 (will need to modify this when slave nodes are supported */
604
605 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) *
606 (tipc_max_nodes - 1));
607 if (!buf)
608 return NULL;
609
610 /* Add TLVs for all nodes in scope */
611
612 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
613 if (!in_scope(domain, n_ptr->addr))
614 continue;
615 node_info.addr = htonl(n_ptr->addr);
616 node_info.up = htonl(tipc_node_is_up(n_ptr));
617 tipc_cfg_append_tlv(buf, TIPC_TLV_NODE_INFO,
618 &node_info, sizeof(node_info));
619 }
620
621 return buf;
622}
623
624struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space)
625{
626 u32 domain;
627 struct sk_buff *buf;
628 struct node *n_ptr;
629 struct tipc_link_info link_info;
630
631 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR))
632 return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
633
634 domain = *(u32 *)TLV_DATA(req_tlv_area);
635 domain = ntohl(domain);
636 if (!tipc_addr_domain_valid(domain))
637 return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE
638 " (network address)");
639
640 if (!tipc_nodes)
641 return tipc_cfg_reply_none();
642
643 /* For now, get space for 2 links to all other nodes + bcast link
644 (will need to modify this when slave nodes are supported */
645
646 buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) *
647 (2 * (tipc_max_nodes - 1) + 1));
648 if (!buf)
649 return NULL;
650
651 /* Add TLV for broadcast link */
652
653 link_info.dest = tipc_own_addr & 0xfffff00;
654 link_info.dest = htonl(link_info.dest);
655 link_info.up = htonl(1);
656 sprintf(link_info.str, tipc_bclink_name);
657 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info));
658
659 /* Add TLVs for any other links in scope */
660
661 for (n_ptr = tipc_nodes; n_ptr; n_ptr = n_ptr->next) {
662 u32 i;
663
664 if (!in_scope(domain, n_ptr->addr))
665 continue;
666 for (i = 0; i < MAX_BEARERS; i++) {
667 if (!n_ptr->links[i])
668 continue;
669 link_info.dest = htonl(n_ptr->addr);
670 link_info.up = htonl(tipc_link_is_up(n_ptr->links[i]));
671 strcpy(link_info.str, n_ptr->links[i]->name);
672 tipc_cfg_append_tlv(buf, TIPC_TLV_LINK_INFO,
673 &link_info, sizeof(link_info));
674 }
675 }
676
677 return buf;
678}
diff --git a/net/tipc/node.h b/net/tipc/node.h
new file mode 100644
index 000000000000..29f7ae6992d4
--- /dev/null
+++ b/net/tipc/node.h
@@ -0,0 +1,144 @@
1/*
2 * net/tipc/node.h: Include file for TIPC node management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NODE_H
38#define _TIPC_NODE_H
39
40#include "node_subscr.h"
41#include "addr.h"
42#include "cluster.h"
43#include "bearer.h"
44
45/**
46 * struct node - TIPC node structure
47 * @addr: network address of node
48 * @lock: spinlock governing access to structure
49 * @owner: pointer to cluster that node belongs to
50 * @next: pointer to next node in sorted list of cluster's nodes
51 * @nsub: list of "node down" subscriptions monitoring node
52 * @active_links: pointers to active links to node
53 * @links: pointers to all links to node
54 * @link_cnt: number of links to node
55 * @permit_changeover: non-zero if node has redundant links to this system
56 * @routers: bitmap (used for multicluster communication)
57 * @last_router: (used for multicluster communication)
58 * @bclink: broadcast-related info
59 * @supported: non-zero if node supports TIPC b'cast capability
60 * @acked: sequence # of last outbound b'cast message acknowledged by node
61 * @last_in: sequence # of last in-sequence b'cast message received from node
62 * @gap_after: sequence # of last message not requiring a NAK request
63 * @gap_to: sequence # of last message requiring a NAK request
64 * @nack_sync: counter that determines when NAK requests should be sent
65 * @deferred_head: oldest OOS b'cast message received from node
66 * @deferred_tail: newest OOS b'cast message received from node
67 * @defragm: list of partially reassembled b'cast message fragments from node
68 */
69
70struct node {
71 u32 addr;
72 spinlock_t lock;
73 struct cluster *owner;
74 struct node *next;
75 struct list_head nsub;
76 struct link *active_links[2];
77 struct link *links[MAX_BEARERS];
78 int link_cnt;
79 int permit_changeover;
80 u32 routers[512/32];
81 int last_router;
82 struct {
83 int supported;
84 u32 acked;
85 u32 last_in;
86 u32 gap_after;
87 u32 gap_to;
88 u32 nack_sync;
89 struct sk_buff *deferred_head;
90 struct sk_buff *deferred_tail;
91 struct sk_buff *defragm;
92 } bclink;
93};
94
95extern struct node *tipc_nodes;
96extern u32 tipc_own_tag;
97
98struct node *tipc_node_create(u32 addr);
99void tipc_node_delete(struct node *n_ptr);
100struct node *tipc_node_attach_link(struct link *l_ptr);
101void tipc_node_detach_link(struct node *n_ptr, struct link *l_ptr);
102void tipc_node_link_down(struct node *n_ptr, struct link *l_ptr);
103void tipc_node_link_up(struct node *n_ptr, struct link *l_ptr);
104int tipc_node_has_active_links(struct node *n_ptr);
105int tipc_node_has_redundant_links(struct node *n_ptr);
106u32 tipc_node_select_router(struct node *n_ptr, u32 ref);
107struct node *tipc_node_select_next_hop(u32 addr, u32 selector);
108int tipc_node_is_up(struct node *n_ptr);
109void tipc_node_add_router(struct node *n_ptr, u32 router);
110void tipc_node_remove_router(struct node *n_ptr, u32 router);
111struct sk_buff *tipc_node_get_links(const void *req_tlv_area, int req_tlv_space);
112struct sk_buff *tipc_node_get_nodes(const void *req_tlv_area, int req_tlv_space);
113
114static inline struct node *tipc_node_find(u32 addr)
115{
116 if (likely(in_own_cluster(addr)))
117 return tipc_local_nodes[tipc_node(addr)];
118 else if (tipc_addr_domain_valid(addr)) {
119 struct cluster *c_ptr = tipc_cltr_find(addr);
120
121 if (c_ptr)
122 return c_ptr->nodes[tipc_node(addr)];
123 }
124 return 0;
125}
126
127static inline struct node *tipc_node_select(u32 addr, u32 selector)
128{
129 if (likely(in_own_cluster(addr)))
130 return tipc_local_nodes[tipc_node(addr)];
131 return tipc_node_select_next_hop(addr, selector);
132}
133
134static inline void tipc_node_lock(struct node *n_ptr)
135{
136 spin_lock_bh(&n_ptr->lock);
137}
138
139static inline void tipc_node_unlock(struct node *n_ptr)
140{
141 spin_unlock_bh(&n_ptr->lock);
142}
143
144#endif
diff --git a/net/tipc/node_subscr.c b/net/tipc/node_subscr.c
new file mode 100644
index 000000000000..afeea121d8be
--- /dev/null
+++ b/net/tipc/node_subscr.c
@@ -0,0 +1,79 @@
1/*
2 * net/tipc/node_subscr.c: TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "node_subscr.h"
40#include "node.h"
41#include "addr.h"
42
43/**
44 * tipc_nodesub_subscribe - create "node down" subscription for specified node
45 */
46
47void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
48 void *usr_handle, net_ev_handler handle_down)
49{
50 node_sub->node = 0;
51 if (addr == tipc_own_addr)
52 return;
53 if (!tipc_addr_node_valid(addr)) {
54 warn("node_subscr with illegal %x\n", addr);
55 return;
56 }
57
58 node_sub->handle_node_down = handle_down;
59 node_sub->usr_handle = usr_handle;
60 node_sub->node = tipc_node_find(addr);
61 assert(node_sub->node);
62 tipc_node_lock(node_sub->node);
63 list_add_tail(&node_sub->nodesub_list, &node_sub->node->nsub);
64 tipc_node_unlock(node_sub->node);
65}
66
67/**
68 * tipc_nodesub_unsubscribe - cancel "node down" subscription (if any)
69 */
70
71void tipc_nodesub_unsubscribe(struct node_subscr *node_sub)
72{
73 if (!node_sub->node)
74 return;
75
76 tipc_node_lock(node_sub->node);
77 list_del_init(&node_sub->nodesub_list);
78 tipc_node_unlock(node_sub->node);
79}
diff --git a/net/tipc/node_subscr.h b/net/tipc/node_subscr.h
new file mode 100644
index 000000000000..01751c4fbb43
--- /dev/null
+++ b/net/tipc/node_subscr.h
@@ -0,0 +1,63 @@
1/*
2 * net/tipc/node_subscr.h: Include file for TIPC "node down" subscription handling
3 *
4 * Copyright (c) 1995-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_NODE_SUBSCR_H
38#define _TIPC_NODE_SUBSCR_H
39
40#include "addr.h"
41
42typedef void (*net_ev_handler) (void *usr_handle);
43
44/**
45 * struct node_subscr - "node down" subscription entry
46 * @node: ptr to node structure of interest (or NULL, if none)
47 * @handle_node_down: routine to invoke when node fails
48 * @usr_handle: argument to pass to routine when node fails
49 * @nodesub_list: adjacent entries in list of subscriptions for the node
50 */
51
52struct node_subscr {
53 struct node *node;
54 net_ev_handler handle_node_down;
55 void *usr_handle;
56 struct list_head nodesub_list;
57};
58
59void tipc_nodesub_subscribe(struct node_subscr *node_sub, u32 addr,
60 void *usr_handle, net_ev_handler handle_down);
61void tipc_nodesub_unsubscribe(struct node_subscr *node_sub);
62
63#endif
diff --git a/net/tipc/port.c b/net/tipc/port.c
new file mode 100644
index 000000000000..72aae52bfec1
--- /dev/null
+++ b/net/tipc/port.c
@@ -0,0 +1,1708 @@
1/*
2 * net/tipc/port.c: TIPC port code
3 *
4 * Copyright (c) 1992-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "config.h"
39#include "dbg.h"
40#include "port.h"
41#include "addr.h"
42#include "link.h"
43#include "node.h"
44#include "port.h"
45#include "name_table.h"
46#include "user_reg.h"
47#include "msg.h"
48#include "bcast.h"
49
50/* Connection management: */
51#define PROBING_INTERVAL 3600000 /* [ms] => 1 h */
52#define CONFIRMED 0
53#define PROBING 1
54
55#define MAX_REJECT_SIZE 1024
56
57static struct sk_buff *msg_queue_head = 0;
58static struct sk_buff *msg_queue_tail = 0;
59
60spinlock_t tipc_port_list_lock = SPIN_LOCK_UNLOCKED;
61static spinlock_t queue_lock = SPIN_LOCK_UNLOCKED;
62
63static LIST_HEAD(ports);
64static void port_handle_node_down(unsigned long ref);
65static struct sk_buff* port_build_self_abort_msg(struct port *,u32 err);
66static struct sk_buff* port_build_peer_abort_msg(struct port *,u32 err);
67static void port_timeout(unsigned long ref);
68
69
70static inline u32 port_peernode(struct port *p_ptr)
71{
72 return msg_destnode(&p_ptr->publ.phdr);
73}
74
75static inline u32 port_peerport(struct port *p_ptr)
76{
77 return msg_destport(&p_ptr->publ.phdr);
78}
79
80static inline u32 port_out_seqno(struct port *p_ptr)
81{
82 return msg_transp_seqno(&p_ptr->publ.phdr);
83}
84
85static inline void port_set_out_seqno(struct port *p_ptr, u32 seqno)
86{
87 msg_set_transp_seqno(&p_ptr->publ.phdr,seqno);
88}
89
90static inline void port_incr_out_seqno(struct port *p_ptr)
91{
92 struct tipc_msg *m = &p_ptr->publ.phdr;
93
94 if (likely(!msg_routed(m)))
95 return;
96 msg_set_transp_seqno(m, (msg_transp_seqno(m) + 1));
97}
98
99/**
100 * tipc_multicast - send a multicast message to local and remote destinations
101 */
102
103int tipc_multicast(u32 ref, struct tipc_name_seq const *seq, u32 domain,
104 u32 num_sect, struct iovec const *msg_sect)
105{
106 struct tipc_msg *hdr;
107 struct sk_buff *buf;
108 struct sk_buff *ibuf = NULL;
109 struct port_list dports = {0, NULL, };
110 struct port *oport = tipc_port_deref(ref);
111 int ext_targets;
112 int res;
113
114 if (unlikely(!oport))
115 return -EINVAL;
116
117 /* Create multicast message */
118
119 hdr = &oport->publ.phdr;
120 msg_set_type(hdr, TIPC_MCAST_MSG);
121 msg_set_nametype(hdr, seq->type);
122 msg_set_namelower(hdr, seq->lower);
123 msg_set_nameupper(hdr, seq->upper);
124 msg_set_hdr_sz(hdr, MCAST_H_SIZE);
125 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
126 !oport->user_port, &buf);
127 if (unlikely(!buf))
128 return res;
129
130 /* Figure out where to send multicast message */
131
132 ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
133 TIPC_NODE_SCOPE, &dports);
134
135 /* Send message to destinations (duplicate it only if necessary) */
136
137 if (ext_targets) {
138 if (dports.count != 0) {
139 ibuf = skb_copy(buf, GFP_ATOMIC);
140 if (ibuf == NULL) {
141 tipc_port_list_free(&dports);
142 buf_discard(buf);
143 return -ENOMEM;
144 }
145 }
146 res = tipc_bclink_send_msg(buf);
147 if ((res < 0) && (dports.count != 0)) {
148 buf_discard(ibuf);
149 }
150 } else {
151 ibuf = buf;
152 }
153
154 if (res >= 0) {
155 if (ibuf)
156 tipc_port_recv_mcast(ibuf, &dports);
157 } else {
158 tipc_port_list_free(&dports);
159 }
160 return res;
161}
162
163/**
164 * tipc_port_recv_mcast - deliver multicast message to all destination ports
165 *
166 * If there is no port list, perform a lookup to create one
167 */
168
169void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp)
170{
171 struct tipc_msg* msg;
172 struct port_list dports = {0, NULL, };
173 struct port_list *item = dp;
174 int cnt = 0;
175
176 assert(buf);
177 msg = buf_msg(buf);
178
179 /* Create destination port list, if one wasn't supplied */
180
181 if (dp == NULL) {
182 tipc_nametbl_mc_translate(msg_nametype(msg),
183 msg_namelower(msg),
184 msg_nameupper(msg),
185 TIPC_CLUSTER_SCOPE,
186 &dports);
187 item = dp = &dports;
188 }
189
190 /* Deliver a copy of message to each destination port */
191
192 if (dp->count != 0) {
193 if (dp->count == 1) {
194 msg_set_destport(msg, dp->ports[0]);
195 tipc_port_recv_msg(buf);
196 tipc_port_list_free(dp);
197 return;
198 }
199 for (; cnt < dp->count; cnt++) {
200 int index = cnt % PLSIZE;
201 struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
202
203 if (b == NULL) {
204 warn("Buffer allocation failure\n");
205 msg_dbg(msg, "LOST:");
206 goto exit;
207 }
208 if ((index == 0) && (cnt != 0)) {
209 item = item->next;
210 }
211 msg_set_destport(buf_msg(b),item->ports[index]);
212 tipc_port_recv_msg(b);
213 }
214 }
215exit:
216 buf_discard(buf);
217 tipc_port_list_free(dp);
218}
219
220/**
221 * tipc_createport_raw - create a native TIPC port
222 *
223 * Returns local port reference
224 */
225
226u32 tipc_createport_raw(void *usr_handle,
227 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *),
228 void (*wakeup)(struct tipc_port *),
229 const u32 importance)
230{
231 struct port *p_ptr;
232 struct tipc_msg *msg;
233 u32 ref;
234
235 p_ptr = kmalloc(sizeof(*p_ptr), GFP_ATOMIC);
236 if (p_ptr == NULL) {
237 warn("Memory squeeze; failed to create port\n");
238 return 0;
239 }
240 memset(p_ptr, 0, sizeof(*p_ptr));
241 ref = tipc_ref_acquire(p_ptr, &p_ptr->publ.lock);
242 if (!ref) {
243 warn("Reference Table Exhausted\n");
244 kfree(p_ptr);
245 return 0;
246 }
247
248 tipc_port_lock(ref);
249 p_ptr->publ.ref = ref;
250 msg = &p_ptr->publ.phdr;
251 msg_init(msg, DATA_LOW, TIPC_NAMED_MSG, TIPC_OK, LONG_H_SIZE, 0);
252 msg_set_orignode(msg, tipc_own_addr);
253 msg_set_prevnode(msg, tipc_own_addr);
254 msg_set_origport(msg, ref);
255 msg_set_importance(msg,importance);
256 p_ptr->last_in_seqno = 41;
257 p_ptr->sent = 1;
258 p_ptr->publ.usr_handle = usr_handle;
259 INIT_LIST_HEAD(&p_ptr->wait_list);
260 INIT_LIST_HEAD(&p_ptr->subscription.nodesub_list);
261 p_ptr->congested_link = 0;
262 p_ptr->max_pkt = MAX_PKT_DEFAULT;
263 p_ptr->dispatcher = dispatcher;
264 p_ptr->wakeup = wakeup;
265 p_ptr->user_port = 0;
266 k_init_timer(&p_ptr->timer, (Handler)port_timeout, ref);
267 spin_lock_bh(&tipc_port_list_lock);
268 INIT_LIST_HEAD(&p_ptr->publications);
269 INIT_LIST_HEAD(&p_ptr->port_list);
270 list_add_tail(&p_ptr->port_list, &ports);
271 spin_unlock_bh(&tipc_port_list_lock);
272 tipc_port_unlock(p_ptr);
273 return ref;
274}
275
276int tipc_deleteport(u32 ref)
277{
278 struct port *p_ptr;
279 struct sk_buff *buf = 0;
280
281 tipc_withdraw(ref, 0, 0);
282 p_ptr = tipc_port_lock(ref);
283 if (!p_ptr)
284 return -EINVAL;
285
286 tipc_ref_discard(ref);
287 tipc_port_unlock(p_ptr);
288
289 k_cancel_timer(&p_ptr->timer);
290 if (p_ptr->publ.connected) {
291 buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
292 tipc_nodesub_unsubscribe(&p_ptr->subscription);
293 }
294 if (p_ptr->user_port) {
295 tipc_reg_remove_port(p_ptr->user_port);
296 kfree(p_ptr->user_port);
297 }
298
299 spin_lock_bh(&tipc_port_list_lock);
300 list_del(&p_ptr->port_list);
301 list_del(&p_ptr->wait_list);
302 spin_unlock_bh(&tipc_port_list_lock);
303 k_term_timer(&p_ptr->timer);
304 kfree(p_ptr);
305 dbg("Deleted port %u\n", ref);
306 tipc_net_route_msg(buf);
307 return TIPC_OK;
308}
309
310/**
311 * tipc_get_port() - return port associated with 'ref'
312 *
313 * Note: Port is not locked.
314 */
315
316struct tipc_port *tipc_get_port(const u32 ref)
317{
318 return (struct tipc_port *)tipc_ref_deref(ref);
319}
320
321/**
322 * tipc_get_handle - return user handle associated to port 'ref'
323 */
324
325void *tipc_get_handle(const u32 ref)
326{
327 struct port *p_ptr;
328 void * handle;
329
330 p_ptr = tipc_port_lock(ref);
331 if (!p_ptr)
332 return 0;
333 handle = p_ptr->publ.usr_handle;
334 tipc_port_unlock(p_ptr);
335 return handle;
336}
337
338static inline int port_unreliable(struct port *p_ptr)
339{
340 return msg_src_droppable(&p_ptr->publ.phdr);
341}
342
343int tipc_portunreliable(u32 ref, unsigned int *isunreliable)
344{
345 struct port *p_ptr;
346
347 p_ptr = tipc_port_lock(ref);
348 if (!p_ptr)
349 return -EINVAL;
350 *isunreliable = port_unreliable(p_ptr);
351 spin_unlock_bh(p_ptr->publ.lock);
352 return TIPC_OK;
353}
354
355int tipc_set_portunreliable(u32 ref, unsigned int isunreliable)
356{
357 struct port *p_ptr;
358
359 p_ptr = tipc_port_lock(ref);
360 if (!p_ptr)
361 return -EINVAL;
362 msg_set_src_droppable(&p_ptr->publ.phdr, (isunreliable != 0));
363 tipc_port_unlock(p_ptr);
364 return TIPC_OK;
365}
366
367static inline int port_unreturnable(struct port *p_ptr)
368{
369 return msg_dest_droppable(&p_ptr->publ.phdr);
370}
371
372int tipc_portunreturnable(u32 ref, unsigned int *isunrejectable)
373{
374 struct port *p_ptr;
375
376 p_ptr = tipc_port_lock(ref);
377 if (!p_ptr)
378 return -EINVAL;
379 *isunrejectable = port_unreturnable(p_ptr);
380 spin_unlock_bh(p_ptr->publ.lock);
381 return TIPC_OK;
382}
383
384int tipc_set_portunreturnable(u32 ref, unsigned int isunrejectable)
385{
386 struct port *p_ptr;
387
388 p_ptr = tipc_port_lock(ref);
389 if (!p_ptr)
390 return -EINVAL;
391 msg_set_dest_droppable(&p_ptr->publ.phdr, (isunrejectable != 0));
392 tipc_port_unlock(p_ptr);
393 return TIPC_OK;
394}
395
396/*
397 * port_build_proto_msg(): build a port level protocol
398 * or a connection abortion message. Called with
399 * tipc_port lock on.
400 */
401static struct sk_buff *port_build_proto_msg(u32 destport, u32 destnode,
402 u32 origport, u32 orignode,
403 u32 usr, u32 type, u32 err,
404 u32 seqno, u32 ack)
405{
406 struct sk_buff *buf;
407 struct tipc_msg *msg;
408
409 buf = buf_acquire(LONG_H_SIZE);
410 if (buf) {
411 msg = buf_msg(buf);
412 msg_init(msg, usr, type, err, LONG_H_SIZE, destnode);
413 msg_set_destport(msg, destport);
414 msg_set_origport(msg, origport);
415 msg_set_destnode(msg, destnode);
416 msg_set_orignode(msg, orignode);
417 msg_set_transp_seqno(msg, seqno);
418 msg_set_msgcnt(msg, ack);
419 msg_dbg(msg, "PORT>SEND>:");
420 }
421 return buf;
422}
423
424int tipc_set_msg_option(struct tipc_port *tp_ptr, const char *opt, const u32 sz)
425{
426 msg_expand(&tp_ptr->phdr, msg_destnode(&tp_ptr->phdr));
427 msg_set_options(&tp_ptr->phdr, opt, sz);
428 return TIPC_OK;
429}
430
431int tipc_reject_msg(struct sk_buff *buf, u32 err)
432{
433 struct tipc_msg *msg = buf_msg(buf);
434 struct sk_buff *rbuf;
435 struct tipc_msg *rmsg;
436 int hdr_sz;
437 u32 imp = msg_importance(msg);
438 u32 data_sz = msg_data_sz(msg);
439
440 if (data_sz > MAX_REJECT_SIZE)
441 data_sz = MAX_REJECT_SIZE;
442 if (msg_connected(msg) && (imp < TIPC_CRITICAL_IMPORTANCE))
443 imp++;
444 msg_dbg(msg, "port->rej: ");
445
446 /* discard rejected message if it shouldn't be returned to sender */
447 if (msg_errcode(msg) || msg_dest_droppable(msg)) {
448 buf_discard(buf);
449 return data_sz;
450 }
451
452 /* construct rejected message */
453 if (msg_mcast(msg))
454 hdr_sz = MCAST_H_SIZE;
455 else
456 hdr_sz = LONG_H_SIZE;
457 rbuf = buf_acquire(data_sz + hdr_sz);
458 if (rbuf == NULL) {
459 buf_discard(buf);
460 return data_sz;
461 }
462 rmsg = buf_msg(rbuf);
463 msg_init(rmsg, imp, msg_type(msg), err, hdr_sz, msg_orignode(msg));
464 msg_set_destport(rmsg, msg_origport(msg));
465 msg_set_prevnode(rmsg, tipc_own_addr);
466 msg_set_origport(rmsg, msg_destport(msg));
467 if (msg_short(msg))
468 msg_set_orignode(rmsg, tipc_own_addr);
469 else
470 msg_set_orignode(rmsg, msg_destnode(msg));
471 msg_set_size(rmsg, data_sz + hdr_sz);
472 msg_set_nametype(rmsg, msg_nametype(msg));
473 msg_set_nameinst(rmsg, msg_nameinst(msg));
474 memcpy(rbuf->data + hdr_sz, msg_data(msg), data_sz);
475
476 /* send self-abort message when rejecting on a connected port */
477 if (msg_connected(msg)) {
478 struct sk_buff *abuf = 0;
479 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
480
481 if (p_ptr) {
482 if (p_ptr->publ.connected)
483 abuf = port_build_self_abort_msg(p_ptr, err);
484 tipc_port_unlock(p_ptr);
485 }
486 tipc_net_route_msg(abuf);
487 }
488
489 /* send rejected message */
490 buf_discard(buf);
491 tipc_net_route_msg(rbuf);
492 return data_sz;
493}
494
495int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
496 struct iovec const *msg_sect, u32 num_sect,
497 int err)
498{
499 struct sk_buff *buf;
500 int res;
501
502 res = msg_build(hdr, msg_sect, num_sect, MAX_MSG_SIZE,
503 !p_ptr->user_port, &buf);
504 if (!buf)
505 return res;
506
507 return tipc_reject_msg(buf, err);
508}
509
510static void port_timeout(unsigned long ref)
511{
512 struct port *p_ptr = tipc_port_lock(ref);
513 struct sk_buff *buf = 0;
514
515 if (!p_ptr || !p_ptr->publ.connected)
516 return;
517
518 /* Last probe answered ? */
519 if (p_ptr->probing_state == PROBING) {
520 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
521 } else {
522 buf = port_build_proto_msg(port_peerport(p_ptr),
523 port_peernode(p_ptr),
524 p_ptr->publ.ref,
525 tipc_own_addr,
526 CONN_MANAGER,
527 CONN_PROBE,
528 TIPC_OK,
529 port_out_seqno(p_ptr),
530 0);
531 port_incr_out_seqno(p_ptr);
532 p_ptr->probing_state = PROBING;
533 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
534 }
535 tipc_port_unlock(p_ptr);
536 tipc_net_route_msg(buf);
537}
538
539
540static void port_handle_node_down(unsigned long ref)
541{
542 struct port *p_ptr = tipc_port_lock(ref);
543 struct sk_buff* buf = 0;
544
545 if (!p_ptr)
546 return;
547 buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
548 tipc_port_unlock(p_ptr);
549 tipc_net_route_msg(buf);
550}
551
552
553static struct sk_buff *port_build_self_abort_msg(struct port *p_ptr, u32 err)
554{
555 u32 imp = msg_importance(&p_ptr->publ.phdr);
556
557 if (!p_ptr->publ.connected)
558 return 0;
559 if (imp < TIPC_CRITICAL_IMPORTANCE)
560 imp++;
561 return port_build_proto_msg(p_ptr->publ.ref,
562 tipc_own_addr,
563 port_peerport(p_ptr),
564 port_peernode(p_ptr),
565 imp,
566 TIPC_CONN_MSG,
567 err,
568 p_ptr->last_in_seqno + 1,
569 0);
570}
571
572
573static struct sk_buff *port_build_peer_abort_msg(struct port *p_ptr, u32 err)
574{
575 u32 imp = msg_importance(&p_ptr->publ.phdr);
576
577 if (!p_ptr->publ.connected)
578 return 0;
579 if (imp < TIPC_CRITICAL_IMPORTANCE)
580 imp++;
581 return port_build_proto_msg(port_peerport(p_ptr),
582 port_peernode(p_ptr),
583 p_ptr->publ.ref,
584 tipc_own_addr,
585 imp,
586 TIPC_CONN_MSG,
587 err,
588 port_out_seqno(p_ptr),
589 0);
590}
591
592void tipc_port_recv_proto_msg(struct sk_buff *buf)
593{
594 struct tipc_msg *msg = buf_msg(buf);
595 struct port *p_ptr = tipc_port_lock(msg_destport(msg));
596 u32 err = TIPC_OK;
597 struct sk_buff *r_buf = 0;
598 struct sk_buff *abort_buf = 0;
599
600 msg_dbg(msg, "PORT<RECV<:");
601
602 if (!p_ptr) {
603 err = TIPC_ERR_NO_PORT;
604 } else if (p_ptr->publ.connected) {
605 if (port_peernode(p_ptr) != msg_orignode(msg))
606 err = TIPC_ERR_NO_PORT;
607 if (port_peerport(p_ptr) != msg_origport(msg))
608 err = TIPC_ERR_NO_PORT;
609 if (!err && msg_routed(msg)) {
610 u32 seqno = msg_transp_seqno(msg);
611 u32 myno = ++p_ptr->last_in_seqno;
612 if (seqno != myno) {
613 err = TIPC_ERR_NO_PORT;
614 abort_buf = port_build_self_abort_msg(p_ptr, err);
615 }
616 }
617 if (msg_type(msg) == CONN_ACK) {
618 int wakeup = tipc_port_congested(p_ptr) &&
619 p_ptr->publ.congested &&
620 p_ptr->wakeup;
621 p_ptr->acked += msg_msgcnt(msg);
622 if (tipc_port_congested(p_ptr))
623 goto exit;
624 p_ptr->publ.congested = 0;
625 if (!wakeup)
626 goto exit;
627 p_ptr->wakeup(&p_ptr->publ);
628 goto exit;
629 }
630 } else if (p_ptr->publ.published) {
631 err = TIPC_ERR_NO_PORT;
632 }
633 if (err) {
634 r_buf = port_build_proto_msg(msg_origport(msg),
635 msg_orignode(msg),
636 msg_destport(msg),
637 tipc_own_addr,
638 DATA_HIGH,
639 TIPC_CONN_MSG,
640 err,
641 0,
642 0);
643 goto exit;
644 }
645
646 /* All is fine */
647 if (msg_type(msg) == CONN_PROBE) {
648 r_buf = port_build_proto_msg(msg_origport(msg),
649 msg_orignode(msg),
650 msg_destport(msg),
651 tipc_own_addr,
652 CONN_MANAGER,
653 CONN_PROBE_REPLY,
654 TIPC_OK,
655 port_out_seqno(p_ptr),
656 0);
657 }
658 p_ptr->probing_state = CONFIRMED;
659 port_incr_out_seqno(p_ptr);
660exit:
661 if (p_ptr)
662 tipc_port_unlock(p_ptr);
663 tipc_net_route_msg(r_buf);
664 tipc_net_route_msg(abort_buf);
665 buf_discard(buf);
666}
667
668static void port_print(struct port *p_ptr, struct print_buf *buf, int full_id)
669{
670 struct publication *publ;
671
672 if (full_id)
673 tipc_printf(buf, "<%u.%u.%u:%u>:",
674 tipc_zone(tipc_own_addr), tipc_cluster(tipc_own_addr),
675 tipc_node(tipc_own_addr), p_ptr->publ.ref);
676 else
677 tipc_printf(buf, "%-10u:", p_ptr->publ.ref);
678
679 if (p_ptr->publ.connected) {
680 u32 dport = port_peerport(p_ptr);
681 u32 destnode = port_peernode(p_ptr);
682
683 tipc_printf(buf, " connected to <%u.%u.%u:%u>",
684 tipc_zone(destnode), tipc_cluster(destnode),
685 tipc_node(destnode), dport);
686 if (p_ptr->publ.conn_type != 0)
687 tipc_printf(buf, " via {%u,%u}",
688 p_ptr->publ.conn_type,
689 p_ptr->publ.conn_instance);
690 }
691 else if (p_ptr->publ.published) {
692 tipc_printf(buf, " bound to");
693 list_for_each_entry(publ, &p_ptr->publications, pport_list) {
694 if (publ->lower == publ->upper)
695 tipc_printf(buf, " {%u,%u}", publ->type,
696 publ->lower);
697 else
698 tipc_printf(buf, " {%u,%u,%u}", publ->type,
699 publ->lower, publ->upper);
700 }
701 }
702 tipc_printf(buf, "\n");
703}
704
705#define MAX_PORT_QUERY 32768
706
707struct sk_buff *tipc_port_get_ports(void)
708{
709 struct sk_buff *buf;
710 struct tlv_desc *rep_tlv;
711 struct print_buf pb;
712 struct port *p_ptr;
713 int str_len;
714
715 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_QUERY));
716 if (!buf)
717 return NULL;
718 rep_tlv = (struct tlv_desc *)buf->data;
719
720 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_QUERY);
721 spin_lock_bh(&tipc_port_list_lock);
722 list_for_each_entry(p_ptr, &ports, port_list) {
723 spin_lock_bh(p_ptr->publ.lock);
724 port_print(p_ptr, &pb, 0);
725 spin_unlock_bh(p_ptr->publ.lock);
726 }
727 spin_unlock_bh(&tipc_port_list_lock);
728 str_len = tipc_printbuf_validate(&pb);
729
730 skb_put(buf, TLV_SPACE(str_len));
731 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
732
733 return buf;
734}
735
736#if 0
737
738#define MAX_PORT_STATS 2000
739
740struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space)
741{
742 u32 ref;
743 struct port *p_ptr;
744 struct sk_buff *buf;
745 struct tlv_desc *rep_tlv;
746 struct print_buf pb;
747 int str_len;
748
749 if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_PORT_REF))
750 return cfg_reply_error_string(TIPC_CFG_TLV_ERROR);
751
752 ref = *(u32 *)TLV_DATA(req_tlv_area);
753 ref = ntohl(ref);
754
755 p_ptr = tipc_port_lock(ref);
756 if (!p_ptr)
757 return cfg_reply_error_string("port not found");
758
759 buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_PORT_STATS));
760 if (!buf) {
761 tipc_port_unlock(p_ptr);
762 return NULL;
763 }
764 rep_tlv = (struct tlv_desc *)buf->data;
765
766 tipc_printbuf_init(&pb, TLV_DATA(rep_tlv), MAX_PORT_STATS);
767 port_print(p_ptr, &pb, 1);
768 /* NEED TO FILL IN ADDITIONAL PORT STATISTICS HERE */
769 tipc_port_unlock(p_ptr);
770 str_len = tipc_printbuf_validate(&pb);
771
772 skb_put(buf, TLV_SPACE(str_len));
773 TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len);
774
775 return buf;
776}
777
778#endif
779
780void tipc_port_reinit(void)
781{
782 struct port *p_ptr;
783 struct tipc_msg *msg;
784
785 spin_lock_bh(&tipc_port_list_lock);
786 list_for_each_entry(p_ptr, &ports, port_list) {
787 msg = &p_ptr->publ.phdr;
788 if (msg_orignode(msg) == tipc_own_addr)
789 break;
790 msg_set_orignode(msg, tipc_own_addr);
791 }
792 spin_unlock_bh(&tipc_port_list_lock);
793}
794
795
796/*
797 * port_dispatcher_sigh(): Signal handler for messages destinated
798 * to the tipc_port interface.
799 */
800
801static void port_dispatcher_sigh(void *dummy)
802{
803 struct sk_buff *buf;
804
805 spin_lock_bh(&queue_lock);
806 buf = msg_queue_head;
807 msg_queue_head = 0;
808 spin_unlock_bh(&queue_lock);
809
810 while (buf) {
811 struct port *p_ptr;
812 struct user_port *up_ptr;
813 struct tipc_portid orig;
814 struct tipc_name_seq dseq;
815 void *usr_handle;
816 int connected;
817 int published;
818
819 struct sk_buff *next = buf->next;
820 struct tipc_msg *msg = buf_msg(buf);
821 u32 dref = msg_destport(msg);
822
823 p_ptr = tipc_port_lock(dref);
824 if (!p_ptr) {
825 /* Port deleted while msg in queue */
826 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
827 buf = next;
828 continue;
829 }
830 orig.ref = msg_origport(msg);
831 orig.node = msg_orignode(msg);
832 up_ptr = p_ptr->user_port;
833 usr_handle = up_ptr->usr_handle;
834 connected = p_ptr->publ.connected;
835 published = p_ptr->publ.published;
836
837 if (unlikely(msg_errcode(msg)))
838 goto err;
839
840 switch (msg_type(msg)) {
841
842 case TIPC_CONN_MSG:{
843 tipc_conn_msg_event cb = up_ptr->conn_msg_cb;
844 u32 peer_port = port_peerport(p_ptr);
845 u32 peer_node = port_peernode(p_ptr);
846
847 spin_unlock_bh(p_ptr->publ.lock);
848 if (unlikely(!connected)) {
849 if (unlikely(published))
850 goto reject;
851 tipc_connect2port(dref,&orig);
852 }
853 if (unlikely(msg_origport(msg) != peer_port))
854 goto reject;
855 if (unlikely(msg_orignode(msg) != peer_node))
856 goto reject;
857 if (unlikely(!cb))
858 goto reject;
859 if (unlikely(++p_ptr->publ.conn_unacked >=
860 TIPC_FLOW_CONTROL_WIN))
861 tipc_acknowledge(dref,
862 p_ptr->publ.conn_unacked);
863 skb_pull(buf, msg_hdr_sz(msg));
864 cb(usr_handle, dref, &buf, msg_data(msg),
865 msg_data_sz(msg));
866 break;
867 }
868 case TIPC_DIRECT_MSG:{
869 tipc_msg_event cb = up_ptr->msg_cb;
870
871 spin_unlock_bh(p_ptr->publ.lock);
872 if (unlikely(connected))
873 goto reject;
874 if (unlikely(!cb))
875 goto reject;
876 skb_pull(buf, msg_hdr_sz(msg));
877 cb(usr_handle, dref, &buf, msg_data(msg),
878 msg_data_sz(msg), msg_importance(msg),
879 &orig);
880 break;
881 }
882 case TIPC_NAMED_MSG:{
883 tipc_named_msg_event cb = up_ptr->named_msg_cb;
884
885 spin_unlock_bh(p_ptr->publ.lock);
886 if (unlikely(connected))
887 goto reject;
888 if (unlikely(!cb))
889 goto reject;
890 if (unlikely(!published))
891 goto reject;
892 dseq.type = msg_nametype(msg);
893 dseq.lower = msg_nameinst(msg);
894 dseq.upper = dseq.lower;
895 skb_pull(buf, msg_hdr_sz(msg));
896 cb(usr_handle, dref, &buf, msg_data(msg),
897 msg_data_sz(msg), msg_importance(msg),
898 &orig, &dseq);
899 break;
900 }
901 }
902 if (buf)
903 buf_discard(buf);
904 buf = next;
905 continue;
906err:
907 switch (msg_type(msg)) {
908
909 case TIPC_CONN_MSG:{
910 tipc_conn_shutdown_event cb =
911 up_ptr->conn_err_cb;
912 u32 peer_port = port_peerport(p_ptr);
913 u32 peer_node = port_peernode(p_ptr);
914
915 spin_unlock_bh(p_ptr->publ.lock);
916 if (!connected || !cb)
917 break;
918 if (msg_origport(msg) != peer_port)
919 break;
920 if (msg_orignode(msg) != peer_node)
921 break;
922 tipc_disconnect(dref);
923 skb_pull(buf, msg_hdr_sz(msg));
924 cb(usr_handle, dref, &buf, msg_data(msg),
925 msg_data_sz(msg), msg_errcode(msg));
926 break;
927 }
928 case TIPC_DIRECT_MSG:{
929 tipc_msg_err_event cb = up_ptr->err_cb;
930
931 spin_unlock_bh(p_ptr->publ.lock);
932 if (connected || !cb)
933 break;
934 skb_pull(buf, msg_hdr_sz(msg));
935 cb(usr_handle, dref, &buf, msg_data(msg),
936 msg_data_sz(msg), msg_errcode(msg), &orig);
937 break;
938 }
939 case TIPC_NAMED_MSG:{
940 tipc_named_msg_err_event cb =
941 up_ptr->named_err_cb;
942
943 spin_unlock_bh(p_ptr->publ.lock);
944 if (connected || !cb)
945 break;
946 dseq.type = msg_nametype(msg);
947 dseq.lower = msg_nameinst(msg);
948 dseq.upper = dseq.lower;
949 skb_pull(buf, msg_hdr_sz(msg));
950 cb(usr_handle, dref, &buf, msg_data(msg),
951 msg_data_sz(msg), msg_errcode(msg), &dseq);
952 break;
953 }
954 }
955 if (buf)
956 buf_discard(buf);
957 buf = next;
958 continue;
959reject:
960 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
961 buf = next;
962 }
963}
964
965/*
966 * port_dispatcher(): Dispatcher for messages destinated
967 * to the tipc_port interface. Called with port locked.
968 */
969
970static u32 port_dispatcher(struct tipc_port *dummy, struct sk_buff *buf)
971{
972 buf->next = NULL;
973 spin_lock_bh(&queue_lock);
974 if (msg_queue_head) {
975 msg_queue_tail->next = buf;
976 msg_queue_tail = buf;
977 } else {
978 msg_queue_tail = msg_queue_head = buf;
979 tipc_k_signal((Handler)port_dispatcher_sigh, 0);
980 }
981 spin_unlock_bh(&queue_lock);
982 return TIPC_OK;
983}
984
985/*
986 * Wake up port after congestion: Called with port locked,
987 *
988 */
989
990static void port_wakeup_sh(unsigned long ref)
991{
992 struct port *p_ptr;
993 struct user_port *up_ptr;
994 tipc_continue_event cb = 0;
995 void *uh = 0;
996
997 p_ptr = tipc_port_lock(ref);
998 if (p_ptr) {
999 up_ptr = p_ptr->user_port;
1000 if (up_ptr) {
1001 cb = up_ptr->continue_event_cb;
1002 uh = up_ptr->usr_handle;
1003 }
1004 tipc_port_unlock(p_ptr);
1005 }
1006 if (cb)
1007 cb(uh, ref);
1008}
1009
1010
1011static void port_wakeup(struct tipc_port *p_ptr)
1012{
1013 tipc_k_signal((Handler)port_wakeup_sh, p_ptr->ref);
1014}
1015
1016void tipc_acknowledge(u32 ref, u32 ack)
1017{
1018 struct port *p_ptr;
1019 struct sk_buff *buf = 0;
1020
1021 p_ptr = tipc_port_lock(ref);
1022 if (!p_ptr)
1023 return;
1024 if (p_ptr->publ.connected) {
1025 p_ptr->publ.conn_unacked -= ack;
1026 buf = port_build_proto_msg(port_peerport(p_ptr),
1027 port_peernode(p_ptr),
1028 ref,
1029 tipc_own_addr,
1030 CONN_MANAGER,
1031 CONN_ACK,
1032 TIPC_OK,
1033 port_out_seqno(p_ptr),
1034 ack);
1035 }
1036 tipc_port_unlock(p_ptr);
1037 tipc_net_route_msg(buf);
1038}
1039
1040/*
1041 * tipc_createport(): user level call. Will add port to
1042 * registry if non-zero user_ref.
1043 */
1044
1045int tipc_createport(u32 user_ref,
1046 void *usr_handle,
1047 unsigned int importance,
1048 tipc_msg_err_event error_cb,
1049 tipc_named_msg_err_event named_error_cb,
1050 tipc_conn_shutdown_event conn_error_cb,
1051 tipc_msg_event msg_cb,
1052 tipc_named_msg_event named_msg_cb,
1053 tipc_conn_msg_event conn_msg_cb,
1054 tipc_continue_event continue_event_cb,/* May be zero */
1055 u32 *portref)
1056{
1057 struct user_port *up_ptr;
1058 struct port *p_ptr;
1059 u32 ref;
1060
1061 up_ptr = (struct user_port *)kmalloc(sizeof(*up_ptr), GFP_ATOMIC);
1062 if (up_ptr == NULL) {
1063 return -ENOMEM;
1064 }
1065 ref = tipc_createport_raw(0, port_dispatcher, port_wakeup, importance);
1066 p_ptr = tipc_port_lock(ref);
1067 if (!p_ptr) {
1068 kfree(up_ptr);
1069 return -ENOMEM;
1070 }
1071
1072 p_ptr->user_port = up_ptr;
1073 up_ptr->user_ref = user_ref;
1074 up_ptr->usr_handle = usr_handle;
1075 up_ptr->ref = p_ptr->publ.ref;
1076 up_ptr->err_cb = error_cb;
1077 up_ptr->named_err_cb = named_error_cb;
1078 up_ptr->conn_err_cb = conn_error_cb;
1079 up_ptr->msg_cb = msg_cb;
1080 up_ptr->named_msg_cb = named_msg_cb;
1081 up_ptr->conn_msg_cb = conn_msg_cb;
1082 up_ptr->continue_event_cb = continue_event_cb;
1083 INIT_LIST_HEAD(&up_ptr->uport_list);
1084 tipc_reg_add_port(up_ptr);
1085 *portref = p_ptr->publ.ref;
1086 dbg(" tipc_createport: %x with ref %u\n", p_ptr, p_ptr->publ.ref);
1087 tipc_port_unlock(p_ptr);
1088 return TIPC_OK;
1089}
1090
1091int tipc_ownidentity(u32 ref, struct tipc_portid *id)
1092{
1093 id->ref = ref;
1094 id->node = tipc_own_addr;
1095 return TIPC_OK;
1096}
1097
1098int tipc_portimportance(u32 ref, unsigned int *importance)
1099{
1100 struct port *p_ptr;
1101
1102 p_ptr = tipc_port_lock(ref);
1103 if (!p_ptr)
1104 return -EINVAL;
1105 *importance = (unsigned int)msg_importance(&p_ptr->publ.phdr);
1106 spin_unlock_bh(p_ptr->publ.lock);
1107 return TIPC_OK;
1108}
1109
1110int tipc_set_portimportance(u32 ref, unsigned int imp)
1111{
1112 struct port *p_ptr;
1113
1114 if (imp > TIPC_CRITICAL_IMPORTANCE)
1115 return -EINVAL;
1116
1117 p_ptr = tipc_port_lock(ref);
1118 if (!p_ptr)
1119 return -EINVAL;
1120 msg_set_importance(&p_ptr->publ.phdr, (u32)imp);
1121 spin_unlock_bh(p_ptr->publ.lock);
1122 return TIPC_OK;
1123}
1124
1125
1126int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1127{
1128 struct port *p_ptr;
1129 struct publication *publ;
1130 u32 key;
1131 int res = -EINVAL;
1132
1133 p_ptr = tipc_port_lock(ref);
1134 dbg("tipc_publ %u, p_ptr = %x, conn = %x, scope = %x, "
1135 "lower = %u, upper = %u\n",
1136 ref, p_ptr, p_ptr->publ.connected, scope, seq->lower, seq->upper);
1137 if (!p_ptr)
1138 return -EINVAL;
1139 if (p_ptr->publ.connected)
1140 goto exit;
1141 if (seq->lower > seq->upper)
1142 goto exit;
1143 if ((scope < TIPC_ZONE_SCOPE) || (scope > TIPC_NODE_SCOPE))
1144 goto exit;
1145 key = ref + p_ptr->pub_count + 1;
1146 if (key == ref) {
1147 res = -EADDRINUSE;
1148 goto exit;
1149 }
1150 publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
1151 scope, p_ptr->publ.ref, key);
1152 if (publ) {
1153 list_add(&publ->pport_list, &p_ptr->publications);
1154 p_ptr->pub_count++;
1155 p_ptr->publ.published = 1;
1156 res = TIPC_OK;
1157 }
1158exit:
1159 tipc_port_unlock(p_ptr);
1160 return res;
1161}
1162
1163int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
1164{
1165 struct port *p_ptr;
1166 struct publication *publ;
1167 struct publication *tpubl;
1168 int res = -EINVAL;
1169
1170 p_ptr = tipc_port_lock(ref);
1171 if (!p_ptr)
1172 return -EINVAL;
1173 if (!p_ptr->publ.published)
1174 goto exit;
1175 if (!seq) {
1176 list_for_each_entry_safe(publ, tpubl,
1177 &p_ptr->publications, pport_list) {
1178 tipc_nametbl_withdraw(publ->type, publ->lower,
1179 publ->ref, publ->key);
1180 }
1181 res = TIPC_OK;
1182 } else {
1183 list_for_each_entry_safe(publ, tpubl,
1184 &p_ptr->publications, pport_list) {
1185 if (publ->scope != scope)
1186 continue;
1187 if (publ->type != seq->type)
1188 continue;
1189 if (publ->lower != seq->lower)
1190 continue;
1191 if (publ->upper != seq->upper)
1192 break;
1193 tipc_nametbl_withdraw(publ->type, publ->lower,
1194 publ->ref, publ->key);
1195 res = TIPC_OK;
1196 break;
1197 }
1198 }
1199 if (list_empty(&p_ptr->publications))
1200 p_ptr->publ.published = 0;
1201exit:
1202 tipc_port_unlock(p_ptr);
1203 return res;
1204}
1205
1206int tipc_connect2port(u32 ref, struct tipc_portid const *peer)
1207{
1208 struct port *p_ptr;
1209 struct tipc_msg *msg;
1210 int res = -EINVAL;
1211
1212 p_ptr = tipc_port_lock(ref);
1213 if (!p_ptr)
1214 return -EINVAL;
1215 if (p_ptr->publ.published || p_ptr->publ.connected)
1216 goto exit;
1217 if (!peer->ref)
1218 goto exit;
1219
1220 msg = &p_ptr->publ.phdr;
1221 msg_set_destnode(msg, peer->node);
1222 msg_set_destport(msg, peer->ref);
1223 msg_set_orignode(msg, tipc_own_addr);
1224 msg_set_origport(msg, p_ptr->publ.ref);
1225 msg_set_transp_seqno(msg, 42);
1226 msg_set_type(msg, TIPC_CONN_MSG);
1227 if (!may_route(peer->node))
1228 msg_set_hdr_sz(msg, SHORT_H_SIZE);
1229 else
1230 msg_set_hdr_sz(msg, LONG_H_SIZE);
1231
1232 p_ptr->probing_interval = PROBING_INTERVAL;
1233 p_ptr->probing_state = CONFIRMED;
1234 p_ptr->publ.connected = 1;
1235 k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
1236
1237 tipc_nodesub_subscribe(&p_ptr->subscription,peer->node,
1238 (void *)(unsigned long)ref,
1239 (net_ev_handler)port_handle_node_down);
1240 res = TIPC_OK;
1241exit:
1242 tipc_port_unlock(p_ptr);
1243 p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
1244 return res;
1245}
1246
1247/*
1248 * tipc_disconnect(): Disconnect port form peer.
1249 * This is a node local operation.
1250 */
1251
1252int tipc_disconnect(u32 ref)
1253{
1254 struct port *p_ptr;
1255 int res = -ENOTCONN;
1256
1257 p_ptr = tipc_port_lock(ref);
1258 if (!p_ptr)
1259 return -EINVAL;
1260 if (p_ptr->publ.connected) {
1261 p_ptr->publ.connected = 0;
1262 /* let timer expire on it's own to avoid deadlock! */
1263 tipc_nodesub_unsubscribe(&p_ptr->subscription);
1264 res = TIPC_OK;
1265 }
1266 tipc_port_unlock(p_ptr);
1267 return res;
1268}
1269
1270/*
1271 * tipc_shutdown(): Send a SHUTDOWN msg to peer and disconnect
1272 */
1273int tipc_shutdown(u32 ref)
1274{
1275 struct port *p_ptr;
1276 struct sk_buff *buf = 0;
1277
1278 p_ptr = tipc_port_lock(ref);
1279 if (!p_ptr)
1280 return -EINVAL;
1281
1282 if (p_ptr->publ.connected) {
1283 u32 imp = msg_importance(&p_ptr->publ.phdr);
1284 if (imp < TIPC_CRITICAL_IMPORTANCE)
1285 imp++;
1286 buf = port_build_proto_msg(port_peerport(p_ptr),
1287 port_peernode(p_ptr),
1288 ref,
1289 tipc_own_addr,
1290 imp,
1291 TIPC_CONN_MSG,
1292 TIPC_CONN_SHUTDOWN,
1293 port_out_seqno(p_ptr),
1294 0);
1295 }
1296 tipc_port_unlock(p_ptr);
1297 tipc_net_route_msg(buf);
1298 return tipc_disconnect(ref);
1299}
1300
1301int tipc_isconnected(u32 ref, int *isconnected)
1302{
1303 struct port *p_ptr;
1304
1305 p_ptr = tipc_port_lock(ref);
1306 if (!p_ptr)
1307 return -EINVAL;
1308 *isconnected = p_ptr->publ.connected;
1309 tipc_port_unlock(p_ptr);
1310 return TIPC_OK;
1311}
1312
1313int tipc_peer(u32 ref, struct tipc_portid *peer)
1314{
1315 struct port *p_ptr;
1316 int res;
1317
1318 p_ptr = tipc_port_lock(ref);
1319 if (!p_ptr)
1320 return -EINVAL;
1321 if (p_ptr->publ.connected) {
1322 peer->ref = port_peerport(p_ptr);
1323 peer->node = port_peernode(p_ptr);
1324 res = TIPC_OK;
1325 } else
1326 res = -ENOTCONN;
1327 tipc_port_unlock(p_ptr);
1328 return res;
1329}
1330
1331int tipc_ref_valid(u32 ref)
1332{
1333 /* Works irrespective of type */
1334 return !!tipc_ref_deref(ref);
1335}
1336
1337
1338/*
1339 * tipc_port_recv_sections(): Concatenate and deliver sectioned
1340 * message for this node.
1341 */
1342
1343int tipc_port_recv_sections(struct port *sender, unsigned int num_sect,
1344 struct iovec const *msg_sect)
1345{
1346 struct sk_buff *buf;
1347 int res;
1348
1349 res = msg_build(&sender->publ.phdr, msg_sect, num_sect,
1350 MAX_MSG_SIZE, !sender->user_port, &buf);
1351 if (likely(buf))
1352 tipc_port_recv_msg(buf);
1353 return res;
1354}
1355
1356/**
1357 * tipc_send - send message sections on connection
1358 */
1359
1360int tipc_send(u32 ref, unsigned int num_sect, struct iovec const *msg_sect)
1361{
1362 struct port *p_ptr;
1363 u32 destnode;
1364 int res;
1365
1366 p_ptr = tipc_port_deref(ref);
1367 if (!p_ptr || !p_ptr->publ.connected)
1368 return -EINVAL;
1369
1370 p_ptr->publ.congested = 1;
1371 if (!tipc_port_congested(p_ptr)) {
1372 destnode = port_peernode(p_ptr);
1373 if (likely(destnode != tipc_own_addr))
1374 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1375 destnode);
1376 else
1377 res = tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1378
1379 if (likely(res != -ELINKCONG)) {
1380 port_incr_out_seqno(p_ptr);
1381 p_ptr->publ.congested = 0;
1382 p_ptr->sent++;
1383 return res;
1384 }
1385 }
1386 if (port_unreliable(p_ptr)) {
1387 p_ptr->publ.congested = 0;
1388 /* Just calculate msg length and return */
1389 return msg_calc_data_size(msg_sect, num_sect);
1390 }
1391 return -ELINKCONG;
1392}
1393
1394/**
1395 * tipc_send_buf - send message buffer on connection
1396 */
1397
1398int tipc_send_buf(u32 ref, struct sk_buff *buf, unsigned int dsz)
1399{
1400 struct port *p_ptr;
1401 struct tipc_msg *msg;
1402 u32 destnode;
1403 u32 hsz;
1404 u32 sz;
1405 u32 res;
1406
1407 p_ptr = tipc_port_deref(ref);
1408 if (!p_ptr || !p_ptr->publ.connected)
1409 return -EINVAL;
1410
1411 msg = &p_ptr->publ.phdr;
1412 hsz = msg_hdr_sz(msg);
1413 sz = hsz + dsz;
1414 msg_set_size(msg, sz);
1415 if (skb_cow(buf, hsz))
1416 return -ENOMEM;
1417
1418 skb_push(buf, hsz);
1419 memcpy(buf->data, (unchar *)msg, hsz);
1420 destnode = msg_destnode(msg);
1421 p_ptr->publ.congested = 1;
1422 if (!tipc_port_congested(p_ptr)) {
1423 if (likely(destnode != tipc_own_addr))
1424 res = tipc_send_buf_fast(buf, destnode);
1425 else {
1426 tipc_port_recv_msg(buf);
1427 res = sz;
1428 }
1429 if (likely(res != -ELINKCONG)) {
1430 port_incr_out_seqno(p_ptr);
1431 p_ptr->sent++;
1432 p_ptr->publ.congested = 0;
1433 return res;
1434 }
1435 }
1436 if (port_unreliable(p_ptr)) {
1437 p_ptr->publ.congested = 0;
1438 return dsz;
1439 }
1440 return -ELINKCONG;
1441}
1442
1443/**
1444 * tipc_forward2name - forward message sections to port name
1445 */
1446
1447int tipc_forward2name(u32 ref,
1448 struct tipc_name const *name,
1449 u32 domain,
1450 u32 num_sect,
1451 struct iovec const *msg_sect,
1452 struct tipc_portid const *orig,
1453 unsigned int importance)
1454{
1455 struct port *p_ptr;
1456 struct tipc_msg *msg;
1457 u32 destnode = domain;
1458 u32 destport = 0;
1459 int res;
1460
1461 p_ptr = tipc_port_deref(ref);
1462 if (!p_ptr || p_ptr->publ.connected)
1463 return -EINVAL;
1464
1465 msg = &p_ptr->publ.phdr;
1466 msg_set_type(msg, TIPC_NAMED_MSG);
1467 msg_set_orignode(msg, orig->node);
1468 msg_set_origport(msg, orig->ref);
1469 msg_set_hdr_sz(msg, LONG_H_SIZE);
1470 msg_set_nametype(msg, name->type);
1471 msg_set_nameinst(msg, name->instance);
1472 msg_set_lookup_scope(msg, addr_scope(domain));
1473 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1474 msg_set_importance(msg,importance);
1475 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1476 msg_set_destnode(msg, destnode);
1477 msg_set_destport(msg, destport);
1478
1479 if (likely(destport || destnode)) {
1480 p_ptr->sent++;
1481 if (likely(destnode == tipc_own_addr))
1482 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1483 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect,
1484 destnode);
1485 if (likely(res != -ELINKCONG))
1486 return res;
1487 if (port_unreliable(p_ptr)) {
1488 /* Just calculate msg length and return */
1489 return msg_calc_data_size(msg_sect, num_sect);
1490 }
1491 return -ELINKCONG;
1492 }
1493 return tipc_port_reject_sections(p_ptr, msg, msg_sect, num_sect,
1494 TIPC_ERR_NO_NAME);
1495}
1496
1497/**
1498 * tipc_send2name - send message sections to port name
1499 */
1500
1501int tipc_send2name(u32 ref,
1502 struct tipc_name const *name,
1503 unsigned int domain,
1504 unsigned int num_sect,
1505 struct iovec const *msg_sect)
1506{
1507 struct tipc_portid orig;
1508
1509 orig.ref = ref;
1510 orig.node = tipc_own_addr;
1511 return tipc_forward2name(ref, name, domain, num_sect, msg_sect, &orig,
1512 TIPC_PORT_IMPORTANCE);
1513}
1514
1515/**
1516 * tipc_forward_buf2name - forward message buffer to port name
1517 */
1518
1519int tipc_forward_buf2name(u32 ref,
1520 struct tipc_name const *name,
1521 u32 domain,
1522 struct sk_buff *buf,
1523 unsigned int dsz,
1524 struct tipc_portid const *orig,
1525 unsigned int importance)
1526{
1527 struct port *p_ptr;
1528 struct tipc_msg *msg;
1529 u32 destnode = domain;
1530 u32 destport = 0;
1531 int res;
1532
1533 p_ptr = (struct port *)tipc_ref_deref(ref);
1534 if (!p_ptr || p_ptr->publ.connected)
1535 return -EINVAL;
1536
1537 msg = &p_ptr->publ.phdr;
1538 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1539 msg_set_importance(msg, importance);
1540 msg_set_type(msg, TIPC_NAMED_MSG);
1541 msg_set_orignode(msg, orig->node);
1542 msg_set_origport(msg, orig->ref);
1543 msg_set_nametype(msg, name->type);
1544 msg_set_nameinst(msg, name->instance);
1545 msg_set_lookup_scope(msg, addr_scope(domain));
1546 msg_set_hdr_sz(msg, LONG_H_SIZE);
1547 msg_set_size(msg, LONG_H_SIZE + dsz);
1548 destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
1549 msg_set_destnode(msg, destnode);
1550 msg_set_destport(msg, destport);
1551 msg_dbg(msg, "forw2name ==> ");
1552 if (skb_cow(buf, LONG_H_SIZE))
1553 return -ENOMEM;
1554 skb_push(buf, LONG_H_SIZE);
1555 memcpy(buf->data, (unchar *)msg, LONG_H_SIZE);
1556 msg_dbg(buf_msg(buf),"PREP:");
1557 if (likely(destport || destnode)) {
1558 p_ptr->sent++;
1559 if (destnode == tipc_own_addr)
1560 return tipc_port_recv_msg(buf);
1561 res = tipc_send_buf_fast(buf, destnode);
1562 if (likely(res != -ELINKCONG))
1563 return res;
1564 if (port_unreliable(p_ptr))
1565 return dsz;
1566 return -ELINKCONG;
1567 }
1568 return tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
1569}
1570
1571/**
1572 * tipc_send_buf2name - send message buffer to port name
1573 */
1574
1575int tipc_send_buf2name(u32 ref,
1576 struct tipc_name const *dest,
1577 u32 domain,
1578 struct sk_buff *buf,
1579 unsigned int dsz)
1580{
1581 struct tipc_portid orig;
1582
1583 orig.ref = ref;
1584 orig.node = tipc_own_addr;
1585 return tipc_forward_buf2name(ref, dest, domain, buf, dsz, &orig,
1586 TIPC_PORT_IMPORTANCE);
1587}
1588
1589/**
1590 * tipc_forward2port - forward message sections to port identity
1591 */
1592
1593int tipc_forward2port(u32 ref,
1594 struct tipc_portid const *dest,
1595 unsigned int num_sect,
1596 struct iovec const *msg_sect,
1597 struct tipc_portid const *orig,
1598 unsigned int importance)
1599{
1600 struct port *p_ptr;
1601 struct tipc_msg *msg;
1602 int res;
1603
1604 p_ptr = tipc_port_deref(ref);
1605 if (!p_ptr || p_ptr->publ.connected)
1606 return -EINVAL;
1607
1608 msg = &p_ptr->publ.phdr;
1609 msg_set_type(msg, TIPC_DIRECT_MSG);
1610 msg_set_orignode(msg, orig->node);
1611 msg_set_origport(msg, orig->ref);
1612 msg_set_destnode(msg, dest->node);
1613 msg_set_destport(msg, dest->ref);
1614 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1615 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1616 msg_set_importance(msg, importance);
1617 p_ptr->sent++;
1618 if (dest->node == tipc_own_addr)
1619 return tipc_port_recv_sections(p_ptr, num_sect, msg_sect);
1620 res = tipc_link_send_sections_fast(p_ptr, msg_sect, num_sect, dest->node);
1621 if (likely(res != -ELINKCONG))
1622 return res;
1623 if (port_unreliable(p_ptr)) {
1624 /* Just calculate msg length and return */
1625 return msg_calc_data_size(msg_sect, num_sect);
1626 }
1627 return -ELINKCONG;
1628}
1629
1630/**
1631 * tipc_send2port - send message sections to port identity
1632 */
1633
1634int tipc_send2port(u32 ref,
1635 struct tipc_portid const *dest,
1636 unsigned int num_sect,
1637 struct iovec const *msg_sect)
1638{
1639 struct tipc_portid orig;
1640
1641 orig.ref = ref;
1642 orig.node = tipc_own_addr;
1643 return tipc_forward2port(ref, dest, num_sect, msg_sect, &orig,
1644 TIPC_PORT_IMPORTANCE);
1645}
1646
1647/**
1648 * tipc_forward_buf2port - forward message buffer to port identity
1649 */
1650int tipc_forward_buf2port(u32 ref,
1651 struct tipc_portid const *dest,
1652 struct sk_buff *buf,
1653 unsigned int dsz,
1654 struct tipc_portid const *orig,
1655 unsigned int importance)
1656{
1657 struct port *p_ptr;
1658 struct tipc_msg *msg;
1659 int res;
1660
1661 p_ptr = (struct port *)tipc_ref_deref(ref);
1662 if (!p_ptr || p_ptr->publ.connected)
1663 return -EINVAL;
1664
1665 msg = &p_ptr->publ.phdr;
1666 msg_set_type(msg, TIPC_DIRECT_MSG);
1667 msg_set_orignode(msg, orig->node);
1668 msg_set_origport(msg, orig->ref);
1669 msg_set_destnode(msg, dest->node);
1670 msg_set_destport(msg, dest->ref);
1671 msg_set_hdr_sz(msg, DIR_MSG_H_SIZE);
1672 if (importance <= TIPC_CRITICAL_IMPORTANCE)
1673 msg_set_importance(msg, importance);
1674 msg_set_size(msg, DIR_MSG_H_SIZE + dsz);
1675 if (skb_cow(buf, DIR_MSG_H_SIZE))
1676 return -ENOMEM;
1677
1678 skb_push(buf, DIR_MSG_H_SIZE);
1679 memcpy(buf->data, (unchar *)msg, DIR_MSG_H_SIZE);
1680 msg_dbg(msg, "buf2port: ");
1681 p_ptr->sent++;
1682 if (dest->node == tipc_own_addr)
1683 return tipc_port_recv_msg(buf);
1684 res = tipc_send_buf_fast(buf, dest->node);
1685 if (likely(res != -ELINKCONG))
1686 return res;
1687 if (port_unreliable(p_ptr))
1688 return dsz;
1689 return -ELINKCONG;
1690}
1691
1692/**
1693 * tipc_send_buf2port - send message buffer to port identity
1694 */
1695
1696int tipc_send_buf2port(u32 ref,
1697 struct tipc_portid const *dest,
1698 struct sk_buff *buf,
1699 unsigned int dsz)
1700{
1701 struct tipc_portid orig;
1702
1703 orig.ref = ref;
1704 orig.node = tipc_own_addr;
1705 return tipc_forward_buf2port(ref, dest, buf, dsz, &orig,
1706 TIPC_PORT_IMPORTANCE);
1707}
1708
diff --git a/net/tipc/port.h b/net/tipc/port.h
new file mode 100644
index 000000000000..839f100da646
--- /dev/null
+++ b/net/tipc/port.h
@@ -0,0 +1,209 @@
1/*
2 * net/tipc/port.h: Include file for TIPC port code
3 *
4 * Copyright (c) 1994-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_PORT_H
38#define _TIPC_PORT_H
39
40#include "core.h"
41#include "ref.h"
42#include "net.h"
43#include "msg.h"
44#include "dbg.h"
45#include "node_subscr.h"
46
47/**
48 * struct user_port - TIPC user port (used with native API)
49 * @user_ref: id of user who created user port
50 * @usr_handle: user-specified field
51 * @ref: object reference to associated TIPC port
52 * <various callback routines>
53 * @uport_list: adjacent user ports in list of ports held by user
54 */
55
56struct user_port {
57 u32 user_ref;
58 void *usr_handle;
59 u32 ref;
60 tipc_msg_err_event err_cb;
61 tipc_named_msg_err_event named_err_cb;
62 tipc_conn_shutdown_event conn_err_cb;
63 tipc_msg_event msg_cb;
64 tipc_named_msg_event named_msg_cb;
65 tipc_conn_msg_event conn_msg_cb;
66 tipc_continue_event continue_event_cb;
67 struct list_head uport_list;
68};
69
70/**
71 * struct port - TIPC port structure
72 * @publ: TIPC port info available to privileged users
73 * @port_list: adjacent ports in TIPC's global list of ports
74 * @dispatcher: ptr to routine which handles received messages
75 * @wakeup: ptr to routine to call when port is no longer congested
76 * @user_port: ptr to user port associated with port (if any)
77 * @wait_list: adjacent ports in list of ports waiting on link congestion
78 * @congested_link: ptr to congested link port is waiting on
79 * @waiting_pkts:
80 * @sent:
81 * @acked:
82 * @publications: list of publications for port
83 * @pub_count: total # of publications port has made during its lifetime
84 * @max_pkt: maximum packet size "hint" used when building messages sent by port
85 * @probing_state:
86 * @probing_interval:
87 * @last_in_seqno:
88 * @timer_ref:
89 * @subscription: "node down" subscription used to terminate failed connections
90 */
91
92struct port {
93 struct tipc_port publ;
94 struct list_head port_list;
95 u32 (*dispatcher)(struct tipc_port *, struct sk_buff *);
96 void (*wakeup)(struct tipc_port *);
97 struct user_port *user_port;
98 struct list_head wait_list;
99 struct link *congested_link;
100 u32 waiting_pkts;
101 u32 sent;
102 u32 acked;
103 struct list_head publications;
104 u32 pub_count;
105 u32 max_pkt;
106 u32 probing_state;
107 u32 probing_interval;
108 u32 last_in_seqno;
109 struct timer_list timer;
110 struct node_subscr subscription;
111};
112
113extern spinlock_t tipc_port_list_lock;
114struct port_list;
115
116int tipc_port_recv_sections(struct port *p_ptr, u32 num_sect,
117 struct iovec const *msg_sect);
118int tipc_port_reject_sections(struct port *p_ptr, struct tipc_msg *hdr,
119 struct iovec const *msg_sect, u32 num_sect,
120 int err);
121struct sk_buff *tipc_port_get_ports(void);
122struct sk_buff *port_show_stats(const void *req_tlv_area, int req_tlv_space);
123void tipc_port_recv_proto_msg(struct sk_buff *buf);
124void tipc_port_recv_mcast(struct sk_buff *buf, struct port_list *dp);
125void tipc_port_reinit(void);
126
127/**
128 * tipc_port_lock - lock port instance referred to and return its pointer
129 */
130
131static inline struct port *tipc_port_lock(u32 ref)
132{
133 return (struct port *)tipc_ref_lock(ref);
134}
135
136/**
137 * tipc_port_unlock - unlock a port instance
138 *
139 * Can use pointer instead of tipc_ref_unlock() since port is already locked.
140 */
141
142static inline void tipc_port_unlock(struct port *p_ptr)
143{
144 spin_unlock_bh(p_ptr->publ.lock);
145}
146
147static inline struct port* tipc_port_deref(u32 ref)
148{
149 return (struct port *)tipc_ref_deref(ref);
150}
151
152static inline u32 tipc_peer_port(struct port *p_ptr)
153{
154 return msg_destport(&p_ptr->publ.phdr);
155}
156
157static inline u32 tipc_peer_node(struct port *p_ptr)
158{
159 return msg_destnode(&p_ptr->publ.phdr);
160}
161
162static inline int tipc_port_congested(struct port *p_ptr)
163{
164 return((p_ptr->sent - p_ptr->acked) >= (TIPC_FLOW_CONTROL_WIN * 2));
165}
166
167/**
168 * tipc_port_recv_msg - receive message from lower layer and deliver to port user
169 */
170
171static inline int tipc_port_recv_msg(struct sk_buff *buf)
172{
173 struct port *p_ptr;
174 struct tipc_msg *msg = buf_msg(buf);
175 u32 destport = msg_destport(msg);
176 u32 dsz = msg_data_sz(msg);
177 u32 err;
178
179 /* forward unresolved named message */
180 if (unlikely(!destport)) {
181 tipc_net_route_msg(buf);
182 return dsz;
183 }
184
185 /* validate destination & pass to port, otherwise reject message */
186 p_ptr = tipc_port_lock(destport);
187 if (likely(p_ptr)) {
188 if (likely(p_ptr->publ.connected)) {
189 if ((unlikely(msg_origport(msg) != tipc_peer_port(p_ptr))) ||
190 (unlikely(msg_orignode(msg) != tipc_peer_node(p_ptr))) ||
191 (unlikely(!msg_connected(msg)))) {
192 err = TIPC_ERR_NO_PORT;
193 tipc_port_unlock(p_ptr);
194 goto reject;
195 }
196 }
197 err = p_ptr->dispatcher(&p_ptr->publ, buf);
198 tipc_port_unlock(p_ptr);
199 if (likely(!err))
200 return dsz;
201 } else {
202 err = TIPC_ERR_NO_PORT;
203 }
204reject:
205 dbg("port->rejecting, err = %x..\n",err);
206 return tipc_reject_msg(buf, err);
207}
208
209#endif
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
new file mode 100644
index 000000000000..5a13c2defe4a
--- /dev/null
+++ b/net/tipc/ref.c
@@ -0,0 +1,189 @@
1/*
2 * net/tipc/ref.c: TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "ref.h"
39#include "port.h"
40#include "subscr.h"
41#include "name_distr.h"
42#include "name_table.h"
43#include "config.h"
44#include "discover.h"
45#include "bearer.h"
46#include "node.h"
47#include "bcast.h"
48
49/*
50 * Object reference table consists of 2**N entries.
51 *
52 * A used entry has object ptr != 0, reference == XXXX|own index
53 * (XXXX changes each time entry is acquired)
54 * A free entry has object ptr == 0, reference == YYYY|next free index
55 * (YYYY is one more than last used XXXX)
56 *
57 * Free list is initially chained from entry (2**N)-1 to entry 1.
58 * Entry 0 is not used to allow index 0 to indicate the end of the free list.
59 *
60 * Note: Any accidental reference of the form XXXX|0--0 won't match entry 0
61 * because entry 0's reference field has the form XXXX|1--1.
62 */
63
64struct ref_table tipc_ref_table = { 0 };
65
66static rwlock_t ref_table_lock = RW_LOCK_UNLOCKED;
67
68/**
69 * tipc_ref_table_init - create reference table for objects
70 */
71
72int tipc_ref_table_init(u32 requested_size, u32 start)
73{
74 struct reference *table;
75 u32 sz = 1 << 4;
76 u32 index_mask;
77 int i;
78
79 while (sz < requested_size) {
80 sz <<= 1;
81 }
82 table = (struct reference *)vmalloc(sz * sizeof(struct reference));
83 if (table == NULL)
84 return -ENOMEM;
85
86 write_lock_bh(&ref_table_lock);
87 index_mask = sz - 1;
88 for (i = sz - 1; i >= 0; i--) {
89 table[i].object = 0;
90 table[i].lock = SPIN_LOCK_UNLOCKED;
91 table[i].data.next_plus_upper = (start & ~index_mask) + i - 1;
92 }
93 tipc_ref_table.entries = table;
94 tipc_ref_table.index_mask = index_mask;
95 tipc_ref_table.first_free = sz - 1;
96 tipc_ref_table.last_free = 1;
97 write_unlock_bh(&ref_table_lock);
98 return TIPC_OK;
99}
100
101/**
102 * tipc_ref_table_stop - destroy reference table for objects
103 */
104
105void tipc_ref_table_stop(void)
106{
107 if (!tipc_ref_table.entries)
108 return;
109
110 vfree(tipc_ref_table.entries);
111 tipc_ref_table.entries = 0;
112}
113
114/**
115 * tipc_ref_acquire - create reference to an object
116 *
117 * Return a unique reference value which can be translated back to the pointer
118 * 'object' at a later time. Also, pass back a pointer to the lock protecting
119 * the object, but without locking it.
120 */
121
122u32 tipc_ref_acquire(void *object, spinlock_t **lock)
123{
124 struct reference *entry;
125 u32 index;
126 u32 index_mask;
127 u32 next_plus_upper;
128 u32 reference = 0;
129
130 assert(tipc_ref_table.entries && object);
131
132 write_lock_bh(&ref_table_lock);
133 if (tipc_ref_table.first_free) {
134 index = tipc_ref_table.first_free;
135 entry = &(tipc_ref_table.entries[index]);
136 index_mask = tipc_ref_table.index_mask;
137 /* take lock in case a previous user of entry still holds it */
138 spin_lock_bh(&entry->lock);
139 next_plus_upper = entry->data.next_plus_upper;
140 tipc_ref_table.first_free = next_plus_upper & index_mask;
141 reference = (next_plus_upper & ~index_mask) + index;
142 entry->data.reference = reference;
143 entry->object = object;
144 if (lock != 0)
145 *lock = &entry->lock;
146 spin_unlock_bh(&entry->lock);
147 }
148 write_unlock_bh(&ref_table_lock);
149 return reference;
150}
151
152/**
153 * tipc_ref_discard - invalidate references to an object
154 *
155 * Disallow future references to an object and free up the entry for re-use.
156 * Note: The entry's spin_lock may still be busy after discard
157 */
158
159void tipc_ref_discard(u32 ref)
160{
161 struct reference *entry;
162 u32 index;
163 u32 index_mask;
164
165 assert(tipc_ref_table.entries);
166 assert(ref != 0);
167
168 write_lock_bh(&ref_table_lock);
169 index_mask = tipc_ref_table.index_mask;
170 index = ref & index_mask;
171 entry = &(tipc_ref_table.entries[index]);
172 assert(entry->object != 0);
173 assert(entry->data.reference == ref);
174
175 /* mark entry as unused */
176 entry->object = 0;
177 if (tipc_ref_table.first_free == 0)
178 tipc_ref_table.first_free = index;
179 else
180 /* next_plus_upper is always XXXX|0--0 for last free entry */
181 tipc_ref_table.entries[tipc_ref_table.last_free].data.next_plus_upper
182 |= index;
183 tipc_ref_table.last_free = index;
184
185 /* increment upper bits of entry to invalidate subsequent references */
186 entry->data.next_plus_upper = (ref & ~index_mask) + (index_mask + 1);
187 write_unlock_bh(&ref_table_lock);
188}
189
diff --git a/net/tipc/ref.h b/net/tipc/ref.h
new file mode 100644
index 000000000000..4f8f9f40dcac
--- /dev/null
+++ b/net/tipc/ref.h
@@ -0,0 +1,131 @@
1/*
2 * net/tipc/ref.h: Include file for TIPC object registry code
3 *
4 * Copyright (c) 1991-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_REF_H
38#define _TIPC_REF_H
39
40/**
41 * struct reference - TIPC object reference entry
42 * @object: pointer to object associated with reference entry
43 * @lock: spinlock controlling access to object
44 * @data: reference value associated with object (or link to next unused entry)
45 */
46
47struct reference {
48 void *object;
49 spinlock_t lock;
50 union {
51 u32 next_plus_upper;
52 u32 reference;
53 } data;
54};
55
56/**
57 * struct tipc_ref_table - table of TIPC object reference entries
58 * @entries: pointer to array of reference entries
59 * @index_mask: bitmask for array index portion of reference values
60 * @first_free: array index of first unused object reference entry
61 * @last_free: array index of last unused object reference entry
62 */
63
64struct ref_table {
65 struct reference *entries;
66 u32 index_mask;
67 u32 first_free;
68 u32 last_free;
69};
70
71extern struct ref_table tipc_ref_table;
72
73int tipc_ref_table_init(u32 requested_size, u32 start);
74void tipc_ref_table_stop(void);
75
76u32 tipc_ref_acquire(void *object, spinlock_t **lock);
77void tipc_ref_discard(u32 ref);
78
79
80/**
81 * tipc_ref_lock - lock referenced object and return pointer to it
82 */
83
84static inline void *tipc_ref_lock(u32 ref)
85{
86 if (likely(tipc_ref_table.entries)) {
87 struct reference *r =
88 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
89
90 spin_lock_bh(&r->lock);
91 if (likely(r->data.reference == ref))
92 return r->object;
93 spin_unlock_bh(&r->lock);
94 }
95 return 0;
96}
97
98/**
99 * tipc_ref_unlock - unlock referenced object
100 */
101
102static inline void tipc_ref_unlock(u32 ref)
103{
104 if (likely(tipc_ref_table.entries)) {
105 struct reference *r =
106 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
107
108 if (likely(r->data.reference == ref))
109 spin_unlock_bh(&r->lock);
110 else
111 err("tipc_ref_unlock() invoked using obsolete reference\n");
112 }
113}
114
115/**
116 * tipc_ref_deref - return pointer referenced object (without locking it)
117 */
118
119static inline void *tipc_ref_deref(u32 ref)
120{
121 if (likely(tipc_ref_table.entries)) {
122 struct reference *r =
123 &tipc_ref_table.entries[ref & tipc_ref_table.index_mask];
124
125 if (likely(r->data.reference == ref))
126 return r->object;
127 }
128 return 0;
129}
130
131#endif
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
new file mode 100644
index 000000000000..67253bfcd702
--- /dev/null
+++ b/net/tipc/socket.c
@@ -0,0 +1,1724 @@
1/*
2 * net/tipc/socket.c: TIPC socket API
3 *
4 * Copyright (c) 2001-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include <linux/module.h>
38#include <linux/types.h>
39#include <linux/net.h>
40#include <linux/socket.h>
41#include <linux/errno.h>
42#include <linux/mm.h>
43#include <linux/slab.h>
44#include <linux/poll.h>
45#include <linux/fcntl.h>
46#include <asm/semaphore.h>
47#include <asm/string.h>
48#include <asm/atomic.h>
49#include <net/sock.h>
50
51#include <linux/tipc.h>
52#include <linux/tipc_config.h>
53#include <net/tipc/tipc_msg.h>
54#include <net/tipc/tipc_port.h>
55
56#include "core.h"
57
58#define SS_LISTENING -1 /* socket is listening */
59#define SS_READY -2 /* socket is connectionless */
60
61#define OVERLOAD_LIMIT_BASE 5000
62
63struct tipc_sock {
64 struct sock sk;
65 struct tipc_port *p;
66 struct semaphore sem;
67};
68
69#define tipc_sk(sk) ((struct tipc_sock*)sk)
70
71static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf);
72static void wakeupdispatch(struct tipc_port *tport);
73
74static struct proto_ops packet_ops;
75static struct proto_ops stream_ops;
76static struct proto_ops msg_ops;
77
78static struct proto tipc_proto;
79
80static int sockets_enabled = 0;
81
82static atomic_t tipc_queue_size = ATOMIC_INIT(0);
83
84
85/*
86 * sock_lock(): Lock a port/socket pair. lock_sock() can
87 * not be used here, since the same lock must protect ports
88 * with non-socket interfaces.
89 * See net.c for description of locking policy.
90 */
91static inline void sock_lock(struct tipc_sock* tsock)
92{
93 spin_lock_bh(tsock->p->lock);
94}
95
96/*
97 * sock_unlock(): Unlock a port/socket pair
98 */
99static inline void sock_unlock(struct tipc_sock* tsock)
100{
101 spin_unlock_bh(tsock->p->lock);
102}
103
104/**
105 * pollmask - determine the current set of poll() events for a socket
106 * @sock: socket structure
107 *
108 * TIPC sets the returned events as follows:
109 * a) POLLRDNORM and POLLIN are set if the socket's receive queue is non-empty
110 * or if a connection-oriented socket is does not have an active connection
111 * (i.e. a read operation will not block).
112 * b) POLLOUT is set except when a socket's connection has been terminated
113 * (i.e. a write operation will not block).
114 * c) POLLHUP is set when a socket's connection has been terminated.
115 *
116 * IMPORTANT: The fact that a read or write operation will not block does NOT
117 * imply that the operation will succeed!
118 *
119 * Returns pollmask value
120 */
121
122static inline u32 pollmask(struct socket *sock)
123{
124 u32 mask;
125
126 if ((skb_queue_len(&sock->sk->sk_receive_queue) != 0) ||
127 (sock->state == SS_UNCONNECTED) ||
128 (sock->state == SS_DISCONNECTING))
129 mask = (POLLRDNORM | POLLIN);
130 else
131 mask = 0;
132
133 if (sock->state == SS_DISCONNECTING)
134 mask |= POLLHUP;
135 else
136 mask |= POLLOUT;
137
138 return mask;
139}
140
141
142/**
143 * advance_queue - discard first buffer in queue
144 * @tsock: TIPC socket
145 */
146
147static inline void advance_queue(struct tipc_sock *tsock)
148{
149 sock_lock(tsock);
150 buf_discard(skb_dequeue(&tsock->sk.sk_receive_queue));
151 sock_unlock(tsock);
152 atomic_dec(&tipc_queue_size);
153}
154
155/**
156 * tipc_create - create a TIPC socket
157 * @sock: pre-allocated socket structure
158 * @protocol: protocol indicator (must be 0)
159 *
160 * This routine creates and attaches a 'struct sock' to the 'struct socket',
161 * then create and attaches a TIPC port to the 'struct sock' part.
162 *
163 * Returns 0 on success, errno otherwise
164 */
165static int tipc_create(struct socket *sock, int protocol)
166{
167 struct tipc_sock *tsock;
168 struct tipc_port *port;
169 struct sock *sk;
170 u32 ref;
171
172 if ((sock->type != SOCK_STREAM) &&
173 (sock->type != SOCK_SEQPACKET) &&
174 (sock->type != SOCK_DGRAM) &&
175 (sock->type != SOCK_RDM))
176 return -EPROTOTYPE;
177
178 if (unlikely(protocol != 0))
179 return -EPROTONOSUPPORT;
180
181 ref = tipc_createport_raw(0, &dispatch, &wakeupdispatch, TIPC_LOW_IMPORTANCE);
182 if (unlikely(!ref))
183 return -ENOMEM;
184
185 sock->state = SS_UNCONNECTED;
186
187 switch (sock->type) {
188 case SOCK_STREAM:
189 sock->ops = &stream_ops;
190 break;
191 case SOCK_SEQPACKET:
192 sock->ops = &packet_ops;
193 break;
194 case SOCK_DGRAM:
195 tipc_set_portunreliable(ref, 1);
196 /* fall through */
197 case SOCK_RDM:
198 tipc_set_portunreturnable(ref, 1);
199 sock->ops = &msg_ops;
200 sock->state = SS_READY;
201 break;
202 }
203
204 sk = sk_alloc(AF_TIPC, GFP_KERNEL, &tipc_proto, 1);
205 if (!sk) {
206 tipc_deleteport(ref);
207 return -ENOMEM;
208 }
209
210 sock_init_data(sock, sk);
211 init_waitqueue_head(sk->sk_sleep);
212 sk->sk_rcvtimeo = 8 * HZ; /* default connect timeout = 8s */
213
214 tsock = tipc_sk(sk);
215 port = tipc_get_port(ref);
216
217 tsock->p = port;
218 port->usr_handle = tsock;
219
220 init_MUTEX(&tsock->sem);
221
222 dbg("sock_create: %x\n",tsock);
223
224 atomic_inc(&tipc_user_count);
225
226 return 0;
227}
228
229/**
230 * release - destroy a TIPC socket
231 * @sock: socket to destroy
232 *
233 * This routine cleans up any messages that are still queued on the socket.
234 * For DGRAM and RDM socket types, all queued messages are rejected.
235 * For SEQPACKET and STREAM socket types, the first message is rejected
236 * and any others are discarded. (If the first message on a STREAM socket
237 * is partially-read, it is discarded and the next one is rejected instead.)
238 *
239 * NOTE: Rejected messages are not necessarily returned to the sender! They
240 * are returned or discarded according to the "destination droppable" setting
241 * specified for the message by the sender.
242 *
243 * Returns 0 on success, errno otherwise
244 */
245
246static int release(struct socket *sock)
247{
248 struct tipc_sock *tsock = tipc_sk(sock->sk);
249 struct sock *sk = sock->sk;
250 int res = TIPC_OK;
251 struct sk_buff *buf;
252
253 dbg("sock_delete: %x\n",tsock);
254 if (!tsock)
255 return 0;
256 down_interruptible(&tsock->sem);
257 if (!sock->sk) {
258 up(&tsock->sem);
259 return 0;
260 }
261
262 /* Reject unreceived messages, unless no longer connected */
263
264 while (sock->state != SS_DISCONNECTING) {
265 sock_lock(tsock);
266 buf = skb_dequeue(&sk->sk_receive_queue);
267 if (!buf)
268 tsock->p->usr_handle = 0;
269 sock_unlock(tsock);
270 if (!buf)
271 break;
272 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf)))
273 buf_discard(buf);
274 else
275 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
276 atomic_dec(&tipc_queue_size);
277 }
278
279 /* Delete TIPC port */
280
281 res = tipc_deleteport(tsock->p->ref);
282 sock->sk = NULL;
283
284 /* Discard any remaining messages */
285
286 while ((buf = skb_dequeue(&sk->sk_receive_queue))) {
287 buf_discard(buf);
288 atomic_dec(&tipc_queue_size);
289 }
290
291 up(&tsock->sem);
292
293 sock_put(sk);
294
295 atomic_dec(&tipc_user_count);
296 return res;
297}
298
299/**
300 * bind - associate or disassocate TIPC name(s) with a socket
301 * @sock: socket structure
302 * @uaddr: socket address describing name(s) and desired operation
303 * @uaddr_len: size of socket address data structure
304 *
305 * Name and name sequence binding is indicated using a positive scope value;
306 * a negative scope value unbinds the specified name. Specifying no name
307 * (i.e. a socket address length of 0) unbinds all names from the socket.
308 *
309 * Returns 0 on success, errno otherwise
310 */
311
312static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
313{
314 struct tipc_sock *tsock = tipc_sk(sock->sk);
315 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
316 int res;
317
318 if (down_interruptible(&tsock->sem))
319 return -ERESTARTSYS;
320
321 if (unlikely(!uaddr_len)) {
322 res = tipc_withdraw(tsock->p->ref, 0, 0);
323 goto exit;
324 }
325
326 if (uaddr_len < sizeof(struct sockaddr_tipc)) {
327 res = -EINVAL;
328 goto exit;
329 }
330
331 if (addr->family != AF_TIPC) {
332 res = -EAFNOSUPPORT;
333 goto exit;
334 }
335 if (addr->addrtype == TIPC_ADDR_NAME)
336 addr->addr.nameseq.upper = addr->addr.nameseq.lower;
337 else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
338 res = -EAFNOSUPPORT;
339 goto exit;
340 }
341
342 if (addr->scope > 0)
343 res = tipc_publish(tsock->p->ref, addr->scope,
344 &addr->addr.nameseq);
345 else
346 res = tipc_withdraw(tsock->p->ref, -addr->scope,
347 &addr->addr.nameseq);
348exit:
349 up(&tsock->sem);
350 return res;
351}
352
353/**
354 * get_name - get port ID of socket or peer socket
355 * @sock: socket structure
356 * @uaddr: area for returned socket address
357 * @uaddr_len: area for returned length of socket address
358 * @peer: 0 to obtain socket name, 1 to obtain peer socket name
359 *
360 * Returns 0 on success, errno otherwise
361 */
362
363static int get_name(struct socket *sock, struct sockaddr *uaddr,
364 int *uaddr_len, int peer)
365{
366 struct tipc_sock *tsock = tipc_sk(sock->sk);
367 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
368 u32 res;
369
370 if (down_interruptible(&tsock->sem))
371 return -ERESTARTSYS;
372
373 *uaddr_len = sizeof(*addr);
374 addr->addrtype = TIPC_ADDR_ID;
375 addr->family = AF_TIPC;
376 addr->scope = 0;
377 if (peer)
378 res = tipc_peer(tsock->p->ref, &addr->addr.id);
379 else
380 res = tipc_ownidentity(tsock->p->ref, &addr->addr.id);
381 addr->addr.name.domain = 0;
382
383 up(&tsock->sem);
384 return res;
385}
386
387/**
388 * poll - read and possibly block on pollmask
389 * @file: file structure associated with the socket
390 * @sock: socket for which to calculate the poll bits
391 * @wait: ???
392 *
393 * Returns the pollmask
394 */
395
396static unsigned int poll(struct file *file, struct socket *sock,
397 poll_table *wait)
398{
399 poll_wait(file, sock->sk->sk_sleep, wait);
400 /* NEED LOCK HERE? */
401 return pollmask(sock);
402}
403
404/**
405 * dest_name_check - verify user is permitted to send to specified port name
406 * @dest: destination address
407 * @m: descriptor for message to be sent
408 *
409 * Prevents restricted configuration commands from being issued by
410 * unauthorized users.
411 *
412 * Returns 0 if permission is granted, otherwise errno
413 */
414
415static inline int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
416{
417 struct tipc_cfg_msg_hdr hdr;
418
419 if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
420 return 0;
421 if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
422 return 0;
423
424 if (likely(dest->addr.name.name.type != TIPC_CFG_SRV))
425 return -EACCES;
426
427 if (copy_from_user(&hdr, m->msg_iov[0].iov_base, sizeof(hdr)))
428 return -EFAULT;
429 if ((ntohs(hdr.tcm_type) & 0xC000) & (!capable(CAP_NET_ADMIN)))
430 return -EACCES;
431
432 return 0;
433}
434
435/**
436 * send_msg - send message in connectionless manner
437 * @iocb: (unused)
438 * @sock: socket structure
439 * @m: message to send
440 * @total_len: (unused)
441 *
442 * Message must have an destination specified explicitly.
443 * Used for SOCK_RDM and SOCK_DGRAM messages,
444 * and for 'SYN' messages on SOCK_SEQPACKET and SOCK_STREAM connections.
445 * (Note: 'SYN+' is prohibited on SOCK_STREAM.)
446 *
447 * Returns the number of bytes sent on success, or errno otherwise
448 */
449
450static int send_msg(struct kiocb *iocb, struct socket *sock,
451 struct msghdr *m, size_t total_len)
452{
453 struct tipc_sock *tsock = tipc_sk(sock->sk);
454 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
455 struct sk_buff *buf;
456 int needs_conn;
457 int res = -EINVAL;
458
459 if (unlikely(!dest))
460 return -EDESTADDRREQ;
461 if (unlikely(dest->family != AF_TIPC))
462 return -EINVAL;
463
464 needs_conn = (sock->state != SS_READY);
465 if (unlikely(needs_conn)) {
466 if (sock->state == SS_LISTENING)
467 return -EPIPE;
468 if (sock->state != SS_UNCONNECTED)
469 return -EISCONN;
470 if ((tsock->p->published) ||
471 ((sock->type == SOCK_STREAM) && (total_len != 0)))
472 return -EOPNOTSUPP;
473 }
474
475 if (down_interruptible(&tsock->sem))
476 return -ERESTARTSYS;
477
478 if (needs_conn) {
479
480 /* Abort any pending connection attempts (very unlikely) */
481
482 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
483 tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
484 atomic_dec(&tipc_queue_size);
485 }
486
487 sock->state = SS_CONNECTING;
488 }
489
490 do {
491 if (dest->addrtype == TIPC_ADDR_NAME) {
492 if ((res = dest_name_check(dest, m)))
493 goto exit;
494 res = tipc_send2name(tsock->p->ref,
495 &dest->addr.name.name,
496 dest->addr.name.domain,
497 m->msg_iovlen,
498 m->msg_iov);
499 }
500 else if (dest->addrtype == TIPC_ADDR_ID) {
501 res = tipc_send2port(tsock->p->ref,
502 &dest->addr.id,
503 m->msg_iovlen,
504 m->msg_iov);
505 }
506 else if (dest->addrtype == TIPC_ADDR_MCAST) {
507 if (needs_conn) {
508 res = -EOPNOTSUPP;
509 goto exit;
510 }
511 if ((res = dest_name_check(dest, m)))
512 goto exit;
513 res = tipc_multicast(tsock->p->ref,
514 &dest->addr.nameseq,
515 0,
516 m->msg_iovlen,
517 m->msg_iov);
518 }
519 if (likely(res != -ELINKCONG)) {
520exit:
521 up(&tsock->sem);
522 return res;
523 }
524 if (m->msg_flags & MSG_DONTWAIT) {
525 res = -EWOULDBLOCK;
526 goto exit;
527 }
528 if (wait_event_interruptible(*sock->sk->sk_sleep,
529 !tsock->p->congested)) {
530 res = -ERESTARTSYS;
531 goto exit;
532 }
533 } while (1);
534}
535
536/**
537 * send_packet - send a connection-oriented message
538 * @iocb: (unused)
539 * @sock: socket structure
540 * @m: message to send
541 * @total_len: (unused)
542 *
543 * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
544 *
545 * Returns the number of bytes sent on success, or errno otherwise
546 */
547
548static int send_packet(struct kiocb *iocb, struct socket *sock,
549 struct msghdr *m, size_t total_len)
550{
551 struct tipc_sock *tsock = tipc_sk(sock->sk);
552 struct sockaddr_tipc *dest = (struct sockaddr_tipc *)m->msg_name;
553 int res;
554
555 /* Handle implied connection establishment */
556
557 if (unlikely(dest))
558 return send_msg(iocb, sock, m, total_len);
559
560 if (down_interruptible(&tsock->sem)) {
561 return -ERESTARTSYS;
562 }
563
564 if (unlikely(sock->state != SS_CONNECTED)) {
565 if (sock->state == SS_DISCONNECTING)
566 res = -EPIPE;
567 else
568 res = -ENOTCONN;
569 goto exit;
570 }
571
572 do {
573 res = tipc_send(tsock->p->ref, m->msg_iovlen, m->msg_iov);
574 if (likely(res != -ELINKCONG)) {
575exit:
576 up(&tsock->sem);
577 return res;
578 }
579 if (m->msg_flags & MSG_DONTWAIT) {
580 res = -EWOULDBLOCK;
581 goto exit;
582 }
583 if (wait_event_interruptible(*sock->sk->sk_sleep,
584 !tsock->p->congested)) {
585 res = -ERESTARTSYS;
586 goto exit;
587 }
588 } while (1);
589}
590
591/**
592 * send_stream - send stream-oriented data
593 * @iocb: (unused)
594 * @sock: socket structure
595 * @m: data to send
596 * @total_len: total length of data to be sent
597 *
598 * Used for SOCK_STREAM data.
599 *
600 * Returns the number of bytes sent on success, or errno otherwise
601 */
602
603
604static int send_stream(struct kiocb *iocb, struct socket *sock,
605 struct msghdr *m, size_t total_len)
606{
607 struct msghdr my_msg;
608 struct iovec my_iov;
609 struct iovec *curr_iov;
610 int curr_iovlen;
611 char __user *curr_start;
612 int curr_left;
613 int bytes_to_send;
614 int res;
615
616 if (likely(total_len <= TIPC_MAX_USER_MSG_SIZE))
617 return send_packet(iocb, sock, m, total_len);
618
619 /* Can only send large data streams if already connected */
620
621 if (unlikely(sock->state != SS_CONNECTED)) {
622 if (sock->state == SS_DISCONNECTING)
623 return -EPIPE;
624 else
625 return -ENOTCONN;
626 }
627
628 /*
629 * Send each iovec entry using one or more messages
630 *
631 * Note: This algorithm is good for the most likely case
632 * (i.e. one large iovec entry), but could be improved to pass sets
633 * of small iovec entries into send_packet().
634 */
635
636 my_msg = *m;
637 curr_iov = my_msg.msg_iov;
638 curr_iovlen = my_msg.msg_iovlen;
639 my_msg.msg_iov = &my_iov;
640 my_msg.msg_iovlen = 1;
641
642 while (curr_iovlen--) {
643 curr_start = curr_iov->iov_base;
644 curr_left = curr_iov->iov_len;
645
646 while (curr_left) {
647 bytes_to_send = (curr_left < TIPC_MAX_USER_MSG_SIZE)
648 ? curr_left : TIPC_MAX_USER_MSG_SIZE;
649 my_iov.iov_base = curr_start;
650 my_iov.iov_len = bytes_to_send;
651 if ((res = send_packet(iocb, sock, &my_msg, 0)) < 0)
652 return res;
653 curr_left -= bytes_to_send;
654 curr_start += bytes_to_send;
655 }
656
657 curr_iov++;
658 }
659
660 return total_len;
661}
662
663/**
664 * auto_connect - complete connection setup to a remote port
665 * @sock: socket structure
666 * @tsock: TIPC-specific socket structure
667 * @msg: peer's response message
668 *
669 * Returns 0 on success, errno otherwise
670 */
671
672static int auto_connect(struct socket *sock, struct tipc_sock *tsock,
673 struct tipc_msg *msg)
674{
675 struct tipc_portid peer;
676
677 if (msg_errcode(msg)) {
678 sock->state = SS_DISCONNECTING;
679 return -ECONNREFUSED;
680 }
681
682 peer.ref = msg_origport(msg);
683 peer.node = msg_orignode(msg);
684 tipc_connect2port(tsock->p->ref, &peer);
685 tipc_set_portimportance(tsock->p->ref, msg_importance(msg));
686 sock->state = SS_CONNECTED;
687 return 0;
688}
689
690/**
691 * set_orig_addr - capture sender's address for received message
692 * @m: descriptor for message info
693 * @msg: received message header
694 *
695 * Note: Address is not captured if not requested by receiver.
696 */
697
698static inline void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)
699{
700 struct sockaddr_tipc *addr = (struct sockaddr_tipc *)m->msg_name;
701
702 if (addr) {
703 addr->family = AF_TIPC;
704 addr->addrtype = TIPC_ADDR_ID;
705 addr->addr.id.ref = msg_origport(msg);
706 addr->addr.id.node = msg_orignode(msg);
707 addr->addr.name.domain = 0; /* could leave uninitialized */
708 addr->scope = 0; /* could leave uninitialized */
709 m->msg_namelen = sizeof(struct sockaddr_tipc);
710 }
711}
712
713/**
714 * anc_data_recv - optionally capture ancillary data for received message
715 * @m: descriptor for message info
716 * @msg: received message header
717 * @tport: TIPC port associated with message
718 *
719 * Note: Ancillary data is not captured if not requested by receiver.
720 *
721 * Returns 0 if successful, otherwise errno
722 */
723
724static inline int anc_data_recv(struct msghdr *m, struct tipc_msg *msg,
725 struct tipc_port *tport)
726{
727 u32 anc_data[3];
728 u32 err;
729 u32 dest_type;
730 int res;
731
732 if (likely(m->msg_controllen == 0))
733 return 0;
734
735 /* Optionally capture errored message object(s) */
736
737 err = msg ? msg_errcode(msg) : 0;
738 if (unlikely(err)) {
739 anc_data[0] = err;
740 anc_data[1] = msg_data_sz(msg);
741 if ((res = put_cmsg(m, SOL_SOCKET, TIPC_ERRINFO, 8, anc_data)))
742 return res;
743 if (anc_data[1] &&
744 (res = put_cmsg(m, SOL_SOCKET, TIPC_RETDATA, anc_data[1],
745 msg_data(msg))))
746 return res;
747 }
748
749 /* Optionally capture message destination object */
750
751 dest_type = msg ? msg_type(msg) : TIPC_DIRECT_MSG;
752 switch (dest_type) {
753 case TIPC_NAMED_MSG:
754 anc_data[0] = msg_nametype(msg);
755 anc_data[1] = msg_namelower(msg);
756 anc_data[2] = msg_namelower(msg);
757 break;
758 case TIPC_MCAST_MSG:
759 anc_data[0] = msg_nametype(msg);
760 anc_data[1] = msg_namelower(msg);
761 anc_data[2] = msg_nameupper(msg);
762 break;
763 case TIPC_CONN_MSG:
764 anc_data[0] = tport->conn_type;
765 anc_data[1] = tport->conn_instance;
766 anc_data[2] = tport->conn_instance;
767 break;
768 default:
769 anc_data[0] = 0;
770 }
771 if (anc_data[0] &&
772 (res = put_cmsg(m, SOL_SOCKET, TIPC_DESTNAME, 12, anc_data)))
773 return res;
774
775 return 0;
776}
777
778/**
779 * recv_msg - receive packet-oriented message
780 * @iocb: (unused)
781 * @m: descriptor for message info
782 * @buf_len: total size of user buffer area
783 * @flags: receive flags
784 *
785 * Used for SOCK_DGRAM, SOCK_RDM, and SOCK_SEQPACKET messages.
786 * If the complete message doesn't fit in user area, truncate it.
787 *
788 * Returns size of returned message data, errno otherwise
789 */
790
791static int recv_msg(struct kiocb *iocb, struct socket *sock,
792 struct msghdr *m, size_t buf_len, int flags)
793{
794 struct tipc_sock *tsock = tipc_sk(sock->sk);
795 struct sk_buff *buf;
796 struct tipc_msg *msg;
797 unsigned int q_len;
798 unsigned int sz;
799 u32 err;
800 int res;
801
802 /* Currently doesn't support receiving into multiple iovec entries */
803
804 if (m->msg_iovlen != 1)
805 return -EOPNOTSUPP;
806
807 /* Catch invalid receive attempts */
808
809 if (unlikely(!buf_len))
810 return -EINVAL;
811
812 if (sock->type == SOCK_SEQPACKET) {
813 if (unlikely(sock->state == SS_UNCONNECTED))
814 return -ENOTCONN;
815 if (unlikely((sock->state == SS_DISCONNECTING) &&
816 (skb_queue_len(&sock->sk->sk_receive_queue) == 0)))
817 return -ENOTCONN;
818 }
819
820 /* Look for a message in receive queue; wait if necessary */
821
822 if (unlikely(down_interruptible(&tsock->sem)))
823 return -ERESTARTSYS;
824
825restart:
826 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
827 (flags & MSG_DONTWAIT))) {
828 res = -EWOULDBLOCK;
829 goto exit;
830 }
831
832 if ((res = wait_event_interruptible(
833 *sock->sk->sk_sleep,
834 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
835 (sock->state == SS_DISCONNECTING))) )) {
836 goto exit;
837 }
838
839 /* Catch attempt to receive on an already terminated connection */
840 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
841
842 if (!q_len) {
843 res = -ENOTCONN;
844 goto exit;
845 }
846
847 /* Get access to first message in receive queue */
848
849 buf = skb_peek(&sock->sk->sk_receive_queue);
850 msg = buf_msg(buf);
851 sz = msg_data_sz(msg);
852 err = msg_errcode(msg);
853
854 /* Complete connection setup for an implied connect */
855
856 if (unlikely(sock->state == SS_CONNECTING)) {
857 if ((res = auto_connect(sock, tsock, msg)))
858 goto exit;
859 }
860
861 /* Discard an empty non-errored message & try again */
862
863 if ((!sz) && (!err)) {
864 advance_queue(tsock);
865 goto restart;
866 }
867
868 /* Capture sender's address (optional) */
869
870 set_orig_addr(m, msg);
871
872 /* Capture ancillary data (optional) */
873
874 if ((res = anc_data_recv(m, msg, tsock->p)))
875 goto exit;
876
877 /* Capture message data (if valid) & compute return value (always) */
878
879 if (!err) {
880 if (unlikely(buf_len < sz)) {
881 sz = buf_len;
882 m->msg_flags |= MSG_TRUNC;
883 }
884 if (unlikely(copy_to_user(m->msg_iov->iov_base, msg_data(msg),
885 sz))) {
886 res = -EFAULT;
887 goto exit;
888 }
889 res = sz;
890 } else {
891 if ((sock->state == SS_READY) ||
892 ((err == TIPC_CONN_SHUTDOWN) || m->msg_control))
893 res = 0;
894 else
895 res = -ECONNRESET;
896 }
897
898 /* Consume received message (optional) */
899
900 if (likely(!(flags & MSG_PEEK))) {
901 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
902 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
903 advance_queue(tsock);
904 }
905exit:
906 up(&tsock->sem);
907 return res;
908}
909
910/**
911 * recv_stream - receive stream-oriented data
912 * @iocb: (unused)
913 * @m: descriptor for message info
914 * @buf_len: total size of user buffer area
915 * @flags: receive flags
916 *
917 * Used for SOCK_STREAM messages only. If not enough data is available
918 * will optionally wait for more; never truncates data.
919 *
920 * Returns size of returned message data, errno otherwise
921 */
922
923static int recv_stream(struct kiocb *iocb, struct socket *sock,
924 struct msghdr *m, size_t buf_len, int flags)
925{
926 struct tipc_sock *tsock = tipc_sk(sock->sk);
927 struct sk_buff *buf;
928 struct tipc_msg *msg;
929 unsigned int q_len;
930 unsigned int sz;
931 int sz_to_copy;
932 int sz_copied = 0;
933 int needed;
934 char *crs = m->msg_iov->iov_base;
935 unsigned char *buf_crs;
936 u32 err;
937 int res;
938
939 /* Currently doesn't support receiving into multiple iovec entries */
940
941 if (m->msg_iovlen != 1)
942 return -EOPNOTSUPP;
943
944 /* Catch invalid receive attempts */
945
946 if (unlikely(!buf_len))
947 return -EINVAL;
948
949 if (unlikely(sock->state == SS_DISCONNECTING)) {
950 if (skb_queue_len(&sock->sk->sk_receive_queue) == 0)
951 return -ENOTCONN;
952 } else if (unlikely(sock->state != SS_CONNECTED))
953 return -ENOTCONN;
954
955 /* Look for a message in receive queue; wait if necessary */
956
957 if (unlikely(down_interruptible(&tsock->sem)))
958 return -ERESTARTSYS;
959
960restart:
961 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
962 (flags & MSG_DONTWAIT))) {
963 res = (sz_copied == 0) ? -EWOULDBLOCK : 0;
964 goto exit;
965 }
966
967 if ((res = wait_event_interruptible(
968 *sock->sk->sk_sleep,
969 ((q_len = skb_queue_len(&sock->sk->sk_receive_queue)) ||
970 (sock->state == SS_DISCONNECTING))) )) {
971 goto exit;
972 }
973
974 /* Catch attempt to receive on an already terminated connection */
975 /* [THIS CHECK MAY OVERLAP WITH AN EARLIER CHECK] */
976
977 if (!q_len) {
978 res = -ENOTCONN;
979 goto exit;
980 }
981
982 /* Get access to first message in receive queue */
983
984 buf = skb_peek(&sock->sk->sk_receive_queue);
985 msg = buf_msg(buf);
986 sz = msg_data_sz(msg);
987 err = msg_errcode(msg);
988
989 /* Discard an empty non-errored message & try again */
990
991 if ((!sz) && (!err)) {
992 advance_queue(tsock);
993 goto restart;
994 }
995
996 /* Optionally capture sender's address & ancillary data of first msg */
997
998 if (sz_copied == 0) {
999 set_orig_addr(m, msg);
1000 if ((res = anc_data_recv(m, msg, tsock->p)))
1001 goto exit;
1002 }
1003
1004 /* Capture message data (if valid) & compute return value (always) */
1005
1006 if (!err) {
1007 buf_crs = (unsigned char *)(TIPC_SKB_CB(buf)->handle);
1008 sz = buf->tail - buf_crs;
1009
1010 needed = (buf_len - sz_copied);
1011 sz_to_copy = (sz <= needed) ? sz : needed;
1012 if (unlikely(copy_to_user(crs, buf_crs, sz_to_copy))) {
1013 res = -EFAULT;
1014 goto exit;
1015 }
1016 sz_copied += sz_to_copy;
1017
1018 if (sz_to_copy < sz) {
1019 if (!(flags & MSG_PEEK))
1020 TIPC_SKB_CB(buf)->handle = buf_crs + sz_to_copy;
1021 goto exit;
1022 }
1023
1024 crs += sz_to_copy;
1025 } else {
1026 if (sz_copied != 0)
1027 goto exit; /* can't add error msg to valid data */
1028
1029 if ((err == TIPC_CONN_SHUTDOWN) || m->msg_control)
1030 res = 0;
1031 else
1032 res = -ECONNRESET;
1033 }
1034
1035 /* Consume received message (optional) */
1036
1037 if (likely(!(flags & MSG_PEEK))) {
1038 if (unlikely(++tsock->p->conn_unacked >= TIPC_FLOW_CONTROL_WIN))
1039 tipc_acknowledge(tsock->p->ref, tsock->p->conn_unacked);
1040 advance_queue(tsock);
1041 }
1042
1043 /* Loop around if more data is required */
1044
1045 if ((sz_copied < buf_len) /* didn't get all requested data */
1046 && (flags & MSG_WAITALL) /* ... and need to wait for more */
1047 && (!(flags & MSG_PEEK)) /* ... and aren't just peeking at data */
1048 && (!err) /* ... and haven't reached a FIN */
1049 )
1050 goto restart;
1051
1052exit:
1053 up(&tsock->sem);
1054 return res ? res : sz_copied;
1055}
1056
1057/**
1058 * queue_overloaded - test if queue overload condition exists
1059 * @queue_size: current size of queue
1060 * @base: nominal maximum size of queue
1061 * @msg: message to be added to queue
1062 *
1063 * Returns 1 if queue is currently overloaded, 0 otherwise
1064 */
1065
1066static int queue_overloaded(u32 queue_size, u32 base, struct tipc_msg *msg)
1067{
1068 u32 threshold;
1069 u32 imp = msg_importance(msg);
1070
1071 if (imp == TIPC_LOW_IMPORTANCE)
1072 threshold = base;
1073 else if (imp == TIPC_MEDIUM_IMPORTANCE)
1074 threshold = base * 2;
1075 else if (imp == TIPC_HIGH_IMPORTANCE)
1076 threshold = base * 100;
1077 else
1078 return 0;
1079
1080 if (msg_connected(msg))
1081 threshold *= 4;
1082
1083 return (queue_size > threshold);
1084}
1085
1086/**
1087 * async_disconnect - wrapper function used to disconnect port
1088 * @portref: TIPC port reference (passed as pointer-sized value)
1089 */
1090
1091static void async_disconnect(unsigned long portref)
1092{
1093 tipc_disconnect((u32)portref);
1094}
1095
1096/**
1097 * dispatch - handle arriving message
1098 * @tport: TIPC port that received message
1099 * @buf: message
1100 *
1101 * Called with port locked. Must not take socket lock to avoid deadlock risk.
1102 *
1103 * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
1104 */
1105
1106static u32 dispatch(struct tipc_port *tport, struct sk_buff *buf)
1107{
1108 struct tipc_msg *msg = buf_msg(buf);
1109 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1110 struct socket *sock;
1111 u32 recv_q_len;
1112
1113 /* Reject message if socket is closing */
1114
1115 if (!tsock)
1116 return TIPC_ERR_NO_PORT;
1117
1118 /* Reject message if it is wrong sort of message for socket */
1119
1120 /*
1121 * WOULD IT BE BETTER TO JUST DISCARD THESE MESSAGES INSTEAD?
1122 * "NO PORT" ISN'T REALLY THE RIGHT ERROR CODE, AND THERE MAY
1123 * BE SECURITY IMPLICATIONS INHERENT IN REJECTING INVALID TRAFFIC
1124 */
1125 sock = tsock->sk.sk_socket;
1126 if (sock->state == SS_READY) {
1127 if (msg_connected(msg)) {
1128 msg_dbg(msg, "dispatch filter 1\n");
1129 return TIPC_ERR_NO_PORT;
1130 }
1131 } else {
1132 if (msg_mcast(msg)) {
1133 msg_dbg(msg, "dispatch filter 2\n");
1134 return TIPC_ERR_NO_PORT;
1135 }
1136 if (sock->state == SS_CONNECTED) {
1137 if (!msg_connected(msg)) {
1138 msg_dbg(msg, "dispatch filter 3\n");
1139 return TIPC_ERR_NO_PORT;
1140 }
1141 }
1142 else if (sock->state == SS_CONNECTING) {
1143 if (!msg_connected(msg) && (msg_errcode(msg) == 0)) {
1144 msg_dbg(msg, "dispatch filter 4\n");
1145 return TIPC_ERR_NO_PORT;
1146 }
1147 }
1148 else if (sock->state == SS_LISTENING) {
1149 if (msg_connected(msg) || msg_errcode(msg)) {
1150 msg_dbg(msg, "dispatch filter 5\n");
1151 return TIPC_ERR_NO_PORT;
1152 }
1153 }
1154 else if (sock->state == SS_DISCONNECTING) {
1155 msg_dbg(msg, "dispatch filter 6\n");
1156 return TIPC_ERR_NO_PORT;
1157 }
1158 else /* (sock->state == SS_UNCONNECTED) */ {
1159 if (msg_connected(msg) || msg_errcode(msg)) {
1160 msg_dbg(msg, "dispatch filter 7\n");
1161 return TIPC_ERR_NO_PORT;
1162 }
1163 }
1164 }
1165
1166 /* Reject message if there isn't room to queue it */
1167
1168 if (unlikely((u32)atomic_read(&tipc_queue_size) >
1169 OVERLOAD_LIMIT_BASE)) {
1170 if (queue_overloaded(atomic_read(&tipc_queue_size),
1171 OVERLOAD_LIMIT_BASE, msg))
1172 return TIPC_ERR_OVERLOAD;
1173 }
1174 recv_q_len = skb_queue_len(&tsock->sk.sk_receive_queue);
1175 if (unlikely(recv_q_len > (OVERLOAD_LIMIT_BASE / 2))) {
1176 if (queue_overloaded(recv_q_len,
1177 OVERLOAD_LIMIT_BASE / 2, msg))
1178 return TIPC_ERR_OVERLOAD;
1179 }
1180
1181 /* Initiate connection termination for an incoming 'FIN' */
1182
1183 if (unlikely(msg_errcode(msg) && (sock->state == SS_CONNECTED))) {
1184 sock->state = SS_DISCONNECTING;
1185 /* Note: Use signal since port lock is already taken! */
1186 tipc_k_signal((Handler)async_disconnect, tport->ref);
1187 }
1188
1189 /* Enqueue message (finally!) */
1190
1191 msg_dbg(msg,"<DISP<: ");
1192 TIPC_SKB_CB(buf)->handle = msg_data(msg);
1193 atomic_inc(&tipc_queue_size);
1194 skb_queue_tail(&sock->sk->sk_receive_queue, buf);
1195
1196 wake_up_interruptible(sock->sk->sk_sleep);
1197 return TIPC_OK;
1198}
1199
1200/**
1201 * wakeupdispatch - wake up port after congestion
1202 * @tport: port to wakeup
1203 *
1204 * Called with port lock on.
1205 */
1206
1207static void wakeupdispatch(struct tipc_port *tport)
1208{
1209 struct tipc_sock *tsock = (struct tipc_sock *)tport->usr_handle;
1210
1211 wake_up_interruptible(tsock->sk.sk_sleep);
1212}
1213
1214/**
1215 * connect - establish a connection to another TIPC port
1216 * @sock: socket structure
1217 * @dest: socket address for destination port
1218 * @destlen: size of socket address data structure
1219 * @flags: (unused)
1220 *
1221 * Returns 0 on success, errno otherwise
1222 */
1223
1224static int connect(struct socket *sock, struct sockaddr *dest, int destlen,
1225 int flags)
1226{
1227 struct tipc_sock *tsock = tipc_sk(sock->sk);
1228 struct sockaddr_tipc *dst = (struct sockaddr_tipc *)dest;
1229 struct msghdr m = {0,};
1230 struct sk_buff *buf;
1231 struct tipc_msg *msg;
1232 int res;
1233
1234 /* For now, TIPC does not allow use of connect() with DGRAM or RDM types */
1235
1236 if (sock->state == SS_READY)
1237 return -EOPNOTSUPP;
1238
1239 /* MOVE THE REST OF THIS ERROR CHECKING TO send_msg()? */
1240 if (sock->state == SS_LISTENING)
1241 return -EOPNOTSUPP;
1242 if (sock->state == SS_CONNECTING)
1243 return -EALREADY;
1244 if (sock->state != SS_UNCONNECTED)
1245 return -EISCONN;
1246
1247 if ((dst->family != AF_TIPC) ||
1248 ((dst->addrtype != TIPC_ADDR_NAME) && (dst->addrtype != TIPC_ADDR_ID)))
1249 return -EINVAL;
1250
1251 /* Send a 'SYN-' to destination */
1252
1253 m.msg_name = dest;
1254 if ((res = send_msg(0, sock, &m, 0)) < 0) {
1255 sock->state = SS_DISCONNECTING;
1256 return res;
1257 }
1258
1259 if (down_interruptible(&tsock->sem))
1260 return -ERESTARTSYS;
1261
1262 /* Wait for destination's 'ACK' response */
1263
1264 res = wait_event_interruptible_timeout(*sock->sk->sk_sleep,
1265 skb_queue_len(&sock->sk->sk_receive_queue),
1266 sock->sk->sk_rcvtimeo);
1267 buf = skb_peek(&sock->sk->sk_receive_queue);
1268 if (res > 0) {
1269 msg = buf_msg(buf);
1270 res = auto_connect(sock, tsock, msg);
1271 if (!res) {
1272 if (dst->addrtype == TIPC_ADDR_NAME) {
1273 tsock->p->conn_type = dst->addr.name.name.type;
1274 tsock->p->conn_instance = dst->addr.name.name.instance;
1275 }
1276 if (!msg_data_sz(msg))
1277 advance_queue(tsock);
1278 }
1279 } else {
1280 if (res == 0) {
1281 res = -ETIMEDOUT;
1282 } else
1283 { /* leave "res" unchanged */ }
1284 sock->state = SS_DISCONNECTING;
1285 }
1286
1287 up(&tsock->sem);
1288 return res;
1289}
1290
1291/**
1292 * listen - allow socket to listen for incoming connections
1293 * @sock: socket structure
1294 * @len: (unused)
1295 *
1296 * Returns 0 on success, errno otherwise
1297 */
1298
1299static int listen(struct socket *sock, int len)
1300{
1301 /* REQUIRES SOCKET LOCKING OF SOME SORT? */
1302
1303 if (sock->state == SS_READY)
1304 return -EOPNOTSUPP;
1305 if (sock->state != SS_UNCONNECTED)
1306 return -EINVAL;
1307 sock->state = SS_LISTENING;
1308 return 0;
1309}
1310
1311/**
1312 * accept - wait for connection request
1313 * @sock: listening socket
1314 * @newsock: new socket that is to be connected
1315 * @flags: file-related flags associated with socket
1316 *
1317 * Returns 0 on success, errno otherwise
1318 */
1319
1320static int accept(struct socket *sock, struct socket *newsock, int flags)
1321{
1322 struct tipc_sock *tsock = tipc_sk(sock->sk);
1323 struct sk_buff *buf;
1324 int res = -EFAULT;
1325
1326 if (sock->state == SS_READY)
1327 return -EOPNOTSUPP;
1328 if (sock->state != SS_LISTENING)
1329 return -EINVAL;
1330
1331 if (unlikely((skb_queue_len(&sock->sk->sk_receive_queue) == 0) &&
1332 (flags & O_NONBLOCK)))
1333 return -EWOULDBLOCK;
1334
1335 if (down_interruptible(&tsock->sem))
1336 return -ERESTARTSYS;
1337
1338 if (wait_event_interruptible(*sock->sk->sk_sleep,
1339 skb_queue_len(&sock->sk->sk_receive_queue))) {
1340 res = -ERESTARTSYS;
1341 goto exit;
1342 }
1343 buf = skb_peek(&sock->sk->sk_receive_queue);
1344
1345 res = tipc_create(newsock, 0);
1346 if (!res) {
1347 struct tipc_sock *new_tsock = tipc_sk(newsock->sk);
1348 struct tipc_portid id;
1349 struct tipc_msg *msg = buf_msg(buf);
1350 u32 new_ref = new_tsock->p->ref;
1351
1352 id.ref = msg_origport(msg);
1353 id.node = msg_orignode(msg);
1354 tipc_connect2port(new_ref, &id);
1355 newsock->state = SS_CONNECTED;
1356
1357 tipc_set_portimportance(new_ref, msg_importance(msg));
1358 if (msg_named(msg)) {
1359 new_tsock->p->conn_type = msg_nametype(msg);
1360 new_tsock->p->conn_instance = msg_nameinst(msg);
1361 }
1362
1363 /*
1364 * Respond to 'SYN-' by discarding it & returning 'ACK'-.
1365 * Respond to 'SYN+' by queuing it on new socket.
1366 */
1367
1368 msg_dbg(msg,"<ACC<: ");
1369 if (!msg_data_sz(msg)) {
1370 struct msghdr m = {0,};
1371
1372 send_packet(0, newsock, &m, 0);
1373 advance_queue(tsock);
1374 } else {
1375 sock_lock(tsock);
1376 skb_dequeue(&sock->sk->sk_receive_queue);
1377 sock_unlock(tsock);
1378 skb_queue_head(&newsock->sk->sk_receive_queue, buf);
1379 }
1380 }
1381exit:
1382 up(&tsock->sem);
1383 return res;
1384}
1385
1386/**
1387 * shutdown - shutdown socket connection
1388 * @sock: socket structure
1389 * @how: direction to close (always treated as read + write)
1390 *
1391 * Terminates connection (if necessary), then purges socket's receive queue.
1392 *
1393 * Returns 0 on success, errno otherwise
1394 */
1395
1396static int shutdown(struct socket *sock, int how)
1397{
1398 struct tipc_sock* tsock = tipc_sk(sock->sk);
1399 struct sk_buff *buf;
1400 int res;
1401
1402 /* Could return -EINVAL for an invalid "how", but why bother? */
1403
1404 if (down_interruptible(&tsock->sem))
1405 return -ERESTARTSYS;
1406
1407 sock_lock(tsock);
1408
1409 switch (sock->state) {
1410 case SS_CONNECTED:
1411
1412 /* Send 'FIN+' or 'FIN-' message to peer */
1413
1414 sock_unlock(tsock);
1415restart:
1416 if ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
1417 atomic_dec(&tipc_queue_size);
1418 if (TIPC_SKB_CB(buf)->handle != msg_data(buf_msg(buf))) {
1419 buf_discard(buf);
1420 goto restart;
1421 }
1422 tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
1423 }
1424 else {
1425 tipc_shutdown(tsock->p->ref);
1426 }
1427 sock_lock(tsock);
1428
1429 /* fall through */
1430
1431 case SS_DISCONNECTING:
1432
1433 /* Discard any unreceived messages */
1434
1435 while ((buf = skb_dequeue(&sock->sk->sk_receive_queue))) {
1436 atomic_dec(&tipc_queue_size);
1437 buf_discard(buf);
1438 }
1439 tsock->p->conn_unacked = 0;
1440
1441 /* fall through */
1442
1443 case SS_CONNECTING:
1444 sock->state = SS_DISCONNECTING;
1445 res = 0;
1446 break;
1447
1448 default:
1449 res = -ENOTCONN;
1450 }
1451
1452 sock_unlock(tsock);
1453
1454 up(&tsock->sem);
1455 return res;
1456}
1457
1458/**
1459 * setsockopt - set socket option
1460 * @sock: socket structure
1461 * @lvl: option level
1462 * @opt: option identifier
1463 * @ov: pointer to new option value
1464 * @ol: length of option value
1465 *
1466 * For stream sockets only, accepts and ignores all IPPROTO_TCP options
1467 * (to ease compatibility).
1468 *
1469 * Returns 0 on success, errno otherwise
1470 */
1471
1472static int setsockopt(struct socket *sock, int lvl, int opt, char *ov, int ol)
1473{
1474 struct tipc_sock *tsock = tipc_sk(sock->sk);
1475 u32 value;
1476 int res;
1477
1478 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1479 return 0;
1480 if (lvl != SOL_TIPC)
1481 return -ENOPROTOOPT;
1482 if (ol < sizeof(value))
1483 return -EINVAL;
1484 if ((res = get_user(value, (u32 *)ov)))
1485 return res;
1486
1487 if (down_interruptible(&tsock->sem))
1488 return -ERESTARTSYS;
1489
1490 switch (opt) {
1491 case TIPC_IMPORTANCE:
1492 res = tipc_set_portimportance(tsock->p->ref, value);
1493 break;
1494 case TIPC_SRC_DROPPABLE:
1495 if (sock->type != SOCK_STREAM)
1496 res = tipc_set_portunreliable(tsock->p->ref, value);
1497 else
1498 res = -ENOPROTOOPT;
1499 break;
1500 case TIPC_DEST_DROPPABLE:
1501 res = tipc_set_portunreturnable(tsock->p->ref, value);
1502 break;
1503 case TIPC_CONN_TIMEOUT:
1504 sock->sk->sk_rcvtimeo = (value * HZ / 1000);
1505 break;
1506 default:
1507 res = -EINVAL;
1508 }
1509
1510 up(&tsock->sem);
1511 return res;
1512}
1513
1514/**
1515 * getsockopt - get socket option
1516 * @sock: socket structure
1517 * @lvl: option level
1518 * @opt: option identifier
1519 * @ov: receptacle for option value
1520 * @ol: receptacle for length of option value
1521 *
1522 * For stream sockets only, returns 0 length result for all IPPROTO_TCP options
1523 * (to ease compatibility).
1524 *
1525 * Returns 0 on success, errno otherwise
1526 */
1527
1528static int getsockopt(struct socket *sock, int lvl, int opt, char *ov, int *ol)
1529{
1530 struct tipc_sock *tsock = tipc_sk(sock->sk);
1531 int len;
1532 u32 value;
1533 int res;
1534
1535 if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
1536 return put_user(0, ol);
1537 if (lvl != SOL_TIPC)
1538 return -ENOPROTOOPT;
1539 if ((res = get_user(len, ol)))
1540 return res;
1541
1542 if (down_interruptible(&tsock->sem))
1543 return -ERESTARTSYS;
1544
1545 switch (opt) {
1546 case TIPC_IMPORTANCE:
1547 res = tipc_portimportance(tsock->p->ref, &value);
1548 break;
1549 case TIPC_SRC_DROPPABLE:
1550 res = tipc_portunreliable(tsock->p->ref, &value);
1551 break;
1552 case TIPC_DEST_DROPPABLE:
1553 res = tipc_portunreturnable(tsock->p->ref, &value);
1554 break;
1555 case TIPC_CONN_TIMEOUT:
1556 value = (sock->sk->sk_rcvtimeo * 1000) / HZ;
1557 break;
1558 default:
1559 res = -EINVAL;
1560 }
1561
1562 if (res) {
1563 /* "get" failed */
1564 }
1565 else if (len < sizeof(value)) {
1566 res = -EINVAL;
1567 }
1568 else if ((res = copy_to_user(ov, &value, sizeof(value)))) {
1569 /* couldn't return value */
1570 }
1571 else {
1572 res = put_user(sizeof(value), ol);
1573 }
1574
1575 up(&tsock->sem);
1576 return res;
1577}
1578
1579/**
1580 * Placeholders for non-implemented functionality
1581 *
1582 * Returns error code (POSIX-compliant where defined)
1583 */
1584
1585static int ioctl(struct socket *s, u32 cmd, unsigned long arg)
1586{
1587 return -EINVAL;
1588}
1589
1590static int no_mmap(struct file *file, struct socket *sock,
1591 struct vm_area_struct *vma)
1592{
1593 return -EINVAL;
1594}
1595static ssize_t no_sendpage(struct socket *sock, struct page *page,
1596 int offset, size_t size, int flags)
1597{
1598 return -EINVAL;
1599}
1600
1601static int no_skpair(struct socket *s1, struct socket *s2)
1602{
1603 return -EOPNOTSUPP;
1604}
1605
1606/**
1607 * Protocol switches for the various types of TIPC sockets
1608 */
1609
1610static struct proto_ops msg_ops = {
1611 .owner = THIS_MODULE,
1612 .family = AF_TIPC,
1613 .release = release,
1614 .bind = bind,
1615 .connect = connect,
1616 .socketpair = no_skpair,
1617 .accept = accept,
1618 .getname = get_name,
1619 .poll = poll,
1620 .ioctl = ioctl,
1621 .listen = listen,
1622 .shutdown = shutdown,
1623 .setsockopt = setsockopt,
1624 .getsockopt = getsockopt,
1625 .sendmsg = send_msg,
1626 .recvmsg = recv_msg,
1627 .mmap = no_mmap,
1628 .sendpage = no_sendpage
1629};
1630
1631static struct proto_ops packet_ops = {
1632 .owner = THIS_MODULE,
1633 .family = AF_TIPC,
1634 .release = release,
1635 .bind = bind,
1636 .connect = connect,
1637 .socketpair = no_skpair,
1638 .accept = accept,
1639 .getname = get_name,
1640 .poll = poll,
1641 .ioctl = ioctl,
1642 .listen = listen,
1643 .shutdown = shutdown,
1644 .setsockopt = setsockopt,
1645 .getsockopt = getsockopt,
1646 .sendmsg = send_packet,
1647 .recvmsg = recv_msg,
1648 .mmap = no_mmap,
1649 .sendpage = no_sendpage
1650};
1651
1652static struct proto_ops stream_ops = {
1653 .owner = THIS_MODULE,
1654 .family = AF_TIPC,
1655 .release = release,
1656 .bind = bind,
1657 .connect = connect,
1658 .socketpair = no_skpair,
1659 .accept = accept,
1660 .getname = get_name,
1661 .poll = poll,
1662 .ioctl = ioctl,
1663 .listen = listen,
1664 .shutdown = shutdown,
1665 .setsockopt = setsockopt,
1666 .getsockopt = getsockopt,
1667 .sendmsg = send_stream,
1668 .recvmsg = recv_stream,
1669 .mmap = no_mmap,
1670 .sendpage = no_sendpage
1671};
1672
1673static struct net_proto_family tipc_family_ops = {
1674 .owner = THIS_MODULE,
1675 .family = AF_TIPC,
1676 .create = tipc_create
1677};
1678
1679static struct proto tipc_proto = {
1680 .name = "TIPC",
1681 .owner = THIS_MODULE,
1682 .obj_size = sizeof(struct tipc_sock)
1683};
1684
1685/**
1686 * tipc_socket_init - initialize TIPC socket interface
1687 *
1688 * Returns 0 on success, errno otherwise
1689 */
1690int tipc_socket_init(void)
1691{
1692 int res;
1693
1694 res = proto_register(&tipc_proto, 1);
1695 if (res) {
1696 err("Failed to register TIPC protocol type\n");
1697 goto out;
1698 }
1699
1700 res = sock_register(&tipc_family_ops);
1701 if (res) {
1702 err("Failed to register TIPC socket type\n");
1703 proto_unregister(&tipc_proto);
1704 goto out;
1705 }
1706
1707 sockets_enabled = 1;
1708 out:
1709 return res;
1710}
1711
1712/**
1713 * tipc_socket_stop - stop TIPC socket interface
1714 */
1715void tipc_socket_stop(void)
1716{
1717 if (!sockets_enabled)
1718 return;
1719
1720 sockets_enabled = 0;
1721 sock_unregister(tipc_family_ops.family);
1722 proto_unregister(&tipc_proto);
1723}
1724
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
new file mode 100644
index 000000000000..5ff38b9f3194
--- /dev/null
+++ b/net/tipc/subscr.c
@@ -0,0 +1,527 @@
1/*
2 * net/tipc/subscr.c: TIPC subscription service
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "dbg.h"
39#include "subscr.h"
40#include "name_table.h"
41#include "ref.h"
42
43/**
44 * struct subscriber - TIPC network topology subscriber
45 * @ref: object reference to subscriber object itself
46 * @lock: pointer to spinlock controlling access to subscriber object
47 * @subscriber_list: adjacent subscribers in top. server's list of subscribers
48 * @subscription_list: list of subscription objects for this subscriber
49 * @port_ref: object reference to port used to communicate with subscriber
50 * @swap: indicates if subscriber uses opposite endianness in its messages
51 */
52
53struct subscriber {
54 u32 ref;
55 spinlock_t *lock;
56 struct list_head subscriber_list;
57 struct list_head subscription_list;
58 u32 port_ref;
59 int swap;
60};
61
62/**
63 * struct top_srv - TIPC network topology subscription service
64 * @user_ref: TIPC userid of subscription service
65 * @setup_port: reference to TIPC port that handles subscription requests
66 * @subscription_count: number of active subscriptions (not subscribers!)
67 * @subscriber_list: list of ports subscribing to service
68 * @lock: spinlock govering access to subscriber list
69 */
70
71struct top_srv {
72 u32 user_ref;
73 u32 setup_port;
74 atomic_t subscription_count;
75 struct list_head subscriber_list;
76 spinlock_t lock;
77};
78
79static struct top_srv topsrv = { 0 };
80
81/**
82 * htohl - convert value to endianness used by destination
83 * @in: value to convert
84 * @swap: non-zero if endianness must be reversed
85 *
86 * Returns converted value
87 */
88
89static inline u32 htohl(u32 in, int swap)
90{
91 char *c = (char *)&in;
92
93 return swap ? ((c[3] << 3) + (c[2] << 2) + (c[1] << 1) + c[0]) : in;
94}
95
96/**
97 * subscr_send_event - send a message containing a tipc_event to the subscriber
98 */
99
100static void subscr_send_event(struct subscription *sub,
101 u32 found_lower,
102 u32 found_upper,
103 u32 event,
104 u32 port_ref,
105 u32 node)
106{
107 struct iovec msg_sect;
108
109 msg_sect.iov_base = (void *)&sub->evt;
110 msg_sect.iov_len = sizeof(struct tipc_event);
111
112 sub->evt.event = htohl(event, sub->owner->swap);
113 sub->evt.found_lower = htohl(found_lower, sub->owner->swap);
114 sub->evt.found_upper = htohl(found_upper, sub->owner->swap);
115 sub->evt.port.ref = htohl(port_ref, sub->owner->swap);
116 sub->evt.port.node = htohl(node, sub->owner->swap);
117 tipc_send(sub->owner->port_ref, 1, &msg_sect);
118}
119
120/**
121 * tipc_subscr_overlap - test for subscription overlap with the given values
122 *
123 * Returns 1 if there is overlap, otherwise 0.
124 */
125
126int tipc_subscr_overlap(struct subscription *sub,
127 u32 found_lower,
128 u32 found_upper)
129
130{
131 if (found_lower < sub->seq.lower)
132 found_lower = sub->seq.lower;
133 if (found_upper > sub->seq.upper)
134 found_upper = sub->seq.upper;
135 if (found_lower > found_upper)
136 return 0;
137 return 1;
138}
139
140/**
141 * tipc_subscr_report_overlap - issue event if there is subscription overlap
142 *
143 * Protected by nameseq.lock in name_table.c
144 */
145
146void tipc_subscr_report_overlap(struct subscription *sub,
147 u32 found_lower,
148 u32 found_upper,
149 u32 event,
150 u32 port_ref,
151 u32 node,
152 int must)
153{
154 dbg("Rep overlap %u:%u,%u<->%u,%u\n", sub->seq.type, sub->seq.lower,
155 sub->seq.upper, found_lower, found_upper);
156 if (!tipc_subscr_overlap(sub, found_lower, found_upper))
157 return;
158 if (!must && (sub->filter != TIPC_SUB_PORTS))
159 return;
160 subscr_send_event(sub, found_lower, found_upper, event, port_ref, node);
161}
162
163/**
164 * subscr_timeout - subscription timeout has occurred
165 */
166
167static void subscr_timeout(struct subscription *sub)
168{
169 struct subscriber *subscriber;
170 u32 subscriber_ref;
171
172 /* Validate subscriber reference (in case subscriber is terminating) */
173
174 subscriber_ref = sub->owner->ref;
175 subscriber = (struct subscriber *)tipc_ref_lock(subscriber_ref);
176 if (subscriber == NULL)
177 return;
178
179 /* Unlink subscription from name table */
180
181 tipc_nametbl_unsubscribe(sub);
182
183 /* Notify subscriber of timeout, then unlink subscription */
184
185 subscr_send_event(sub,
186 sub->evt.s.seq.lower,
187 sub->evt.s.seq.upper,
188 TIPC_SUBSCR_TIMEOUT,
189 0,
190 0);
191 list_del(&sub->subscription_list);
192
193 /* Now destroy subscription */
194
195 tipc_ref_unlock(subscriber_ref);
196 k_term_timer(&sub->timer);
197 kfree(sub);
198 atomic_dec(&topsrv.subscription_count);
199}
200
201/**
202 * subscr_terminate - terminate communication with a subscriber
203 *
204 * Called with subscriber locked. Routine must temporarily release this lock
205 * to enable subscription timeout routine(s) to finish without deadlocking;
206 * the lock is then reclaimed to allow caller to release it upon return.
207 * (This should work even in the unlikely event some other thread creates
208 * a new object reference in the interim that uses this lock; this routine will
209 * simply wait for it to be released, then claim it.)
210 */
211
212static void subscr_terminate(struct subscriber *subscriber)
213{
214 struct subscription *sub;
215 struct subscription *sub_temp;
216
217 /* Invalidate subscriber reference */
218
219 tipc_ref_discard(subscriber->ref);
220 spin_unlock_bh(subscriber->lock);
221
222 /* Destroy any existing subscriptions for subscriber */
223
224 list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
225 subscription_list) {
226 if (sub->timeout != TIPC_WAIT_FOREVER) {
227 k_cancel_timer(&sub->timer);
228 k_term_timer(&sub->timer);
229 }
230 tipc_nametbl_unsubscribe(sub);
231 list_del(&sub->subscription_list);
232 dbg("Term: Removed sub %u,%u,%u from subscriber %x list\n",
233 sub->seq.type, sub->seq.lower, sub->seq.upper, subscriber);
234 kfree(sub);
235 atomic_dec(&topsrv.subscription_count);
236 }
237
238 /* Sever connection to subscriber */
239
240 tipc_shutdown(subscriber->port_ref);
241 tipc_deleteport(subscriber->port_ref);
242
243 /* Remove subscriber from topology server's subscriber list */
244
245 spin_lock_bh(&topsrv.lock);
246 list_del(&subscriber->subscriber_list);
247 spin_unlock_bh(&topsrv.lock);
248
249 /* Now destroy subscriber */
250
251 spin_lock_bh(subscriber->lock);
252 kfree(subscriber);
253}
254
255/**
256 * subscr_subscribe - create subscription for subscriber
257 *
258 * Called with subscriber locked
259 */
260
261static void subscr_subscribe(struct tipc_subscr *s,
262 struct subscriber *subscriber)
263{
264 struct subscription *sub;
265
266 /* Refuse subscription if global limit exceeded */
267
268 if (atomic_read(&topsrv.subscription_count) >= tipc_max_subscriptions) {
269 warn("Failed: max %u subscriptions\n", tipc_max_subscriptions);
270 subscr_terminate(subscriber);
271 return;
272 }
273
274 /* Allocate subscription object */
275
276 sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
277 if (sub == NULL) {
278 warn("Memory squeeze; ignoring subscription\n");
279 subscr_terminate(subscriber);
280 return;
281 }
282
283 /* Determine/update subscriber's endianness */
284
285 if ((s->filter == TIPC_SUB_PORTS) || (s->filter == TIPC_SUB_SERVICE))
286 subscriber->swap = 0;
287 else
288 subscriber->swap = 1;
289
290 /* Initialize subscription object */
291
292 memset(sub, 0, sizeof(*sub));
293 sub->seq.type = htohl(s->seq.type, subscriber->swap);
294 sub->seq.lower = htohl(s->seq.lower, subscriber->swap);
295 sub->seq.upper = htohl(s->seq.upper, subscriber->swap);
296 sub->timeout = htohl(s->timeout, subscriber->swap);
297 sub->filter = htohl(s->filter, subscriber->swap);
298 if ((((sub->filter != TIPC_SUB_PORTS)
299 && (sub->filter != TIPC_SUB_SERVICE)))
300 || (sub->seq.lower > sub->seq.upper)) {
301 warn("Rejecting illegal subscription %u,%u,%u\n",
302 sub->seq.type, sub->seq.lower, sub->seq.upper);
303 kfree(sub);
304 subscr_terminate(subscriber);
305 return;
306 }
307 memcpy(&sub->evt.s, s, sizeof(struct tipc_subscr));
308 INIT_LIST_HEAD(&sub->subscription_list);
309 INIT_LIST_HEAD(&sub->nameseq_list);
310 list_add(&sub->subscription_list, &subscriber->subscription_list);
311 atomic_inc(&topsrv.subscription_count);
312 if (sub->timeout != TIPC_WAIT_FOREVER) {
313 k_init_timer(&sub->timer,
314 (Handler)subscr_timeout, (unsigned long)sub);
315 k_start_timer(&sub->timer, sub->timeout);
316 }
317 sub->owner = subscriber;
318 tipc_nametbl_subscribe(sub);
319}
320
321/**
322 * subscr_conn_shutdown_event - handle termination request from subscriber
323 */
324
325static void subscr_conn_shutdown_event(void *usr_handle,
326 u32 portref,
327 struct sk_buff **buf,
328 unsigned char const *data,
329 unsigned int size,
330 int reason)
331{
332 struct subscriber *subscriber;
333 spinlock_t *subscriber_lock;
334
335 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
336 if (subscriber == NULL)
337 return;
338
339 subscriber_lock = subscriber->lock;
340 subscr_terminate(subscriber);
341 spin_unlock_bh(subscriber_lock);
342}
343
344/**
345 * subscr_conn_msg_event - handle new subscription request from subscriber
346 */
347
348static void subscr_conn_msg_event(void *usr_handle,
349 u32 port_ref,
350 struct sk_buff **buf,
351 const unchar *data,
352 u32 size)
353{
354 struct subscriber *subscriber;
355 spinlock_t *subscriber_lock;
356
357 subscriber = tipc_ref_lock((u32)(unsigned long)usr_handle);
358 if (subscriber == NULL)
359 return;
360
361 subscriber_lock = subscriber->lock;
362 if (size != sizeof(struct tipc_subscr))
363 subscr_terminate(subscriber);
364 else
365 subscr_subscribe((struct tipc_subscr *)data, subscriber);
366
367 spin_unlock_bh(subscriber_lock);
368}
369
370/**
371 * subscr_named_msg_event - handle request to establish a new subscriber
372 */
373
374static void subscr_named_msg_event(void *usr_handle,
375 u32 port_ref,
376 struct sk_buff **buf,
377 const unchar *data,
378 u32 size,
379 u32 importance,
380 struct tipc_portid const *orig,
381 struct tipc_name_seq const *dest)
382{
383 struct subscriber *subscriber;
384 struct iovec msg_sect = {0, 0};
385 spinlock_t *subscriber_lock;
386
387 dbg("subscr_named_msg_event: orig = %x own = %x,\n",
388 orig->node, tipc_own_addr);
389 if (size && (size != sizeof(struct tipc_subscr))) {
390 warn("Received tipc_subscr of invalid size\n");
391 return;
392 }
393
394 /* Create subscriber object */
395
396 subscriber = kmalloc(sizeof(struct subscriber), GFP_ATOMIC);
397 if (subscriber == NULL) {
398 warn("Memory squeeze; ignoring subscriber setup\n");
399 return;
400 }
401 memset(subscriber, 0, sizeof(struct subscriber));
402 INIT_LIST_HEAD(&subscriber->subscription_list);
403 INIT_LIST_HEAD(&subscriber->subscriber_list);
404 subscriber->ref = tipc_ref_acquire(subscriber, &subscriber->lock);
405 if (subscriber->ref == 0) {
406 warn("Failed to acquire subscriber reference\n");
407 kfree(subscriber);
408 return;
409 }
410
411 /* Establish a connection to subscriber */
412
413 tipc_createport(topsrv.user_ref,
414 (void *)(unsigned long)subscriber->ref,
415 importance,
416 0,
417 0,
418 subscr_conn_shutdown_event,
419 0,
420 0,
421 subscr_conn_msg_event,
422 0,
423 &subscriber->port_ref);
424 if (subscriber->port_ref == 0) {
425 warn("Memory squeeze; failed to create subscription port\n");
426 tipc_ref_discard(subscriber->ref);
427 kfree(subscriber);
428 return;
429 }
430 tipc_connect2port(subscriber->port_ref, orig);
431
432
433 /* Add subscriber to topology server's subscriber list */
434
435 tipc_ref_lock(subscriber->ref);
436 spin_lock_bh(&topsrv.lock);
437 list_add(&subscriber->subscriber_list, &topsrv.subscriber_list);
438 spin_unlock_bh(&topsrv.lock);
439
440 /*
441 * Subscribe now if message contains a subscription,
442 * otherwise send an empty response to complete connection handshaking
443 */
444
445 subscriber_lock = subscriber->lock;
446 if (size)
447 subscr_subscribe((struct tipc_subscr *)data, subscriber);
448 else
449 tipc_send(subscriber->port_ref, 1, &msg_sect);
450
451 spin_unlock_bh(subscriber_lock);
452}
453
454int tipc_subscr_start(void)
455{
456 struct tipc_name_seq seq = {TIPC_TOP_SRV, TIPC_TOP_SRV, TIPC_TOP_SRV};
457 int res = -1;
458
459 memset(&topsrv, 0, sizeof (topsrv));
460 topsrv.lock = SPIN_LOCK_UNLOCKED;
461 INIT_LIST_HEAD(&topsrv.subscriber_list);
462
463 spin_lock_bh(&topsrv.lock);
464 res = tipc_attach(&topsrv.user_ref, 0, 0);
465 if (res) {
466 spin_unlock_bh(&topsrv.lock);
467 return res;
468 }
469
470 res = tipc_createport(topsrv.user_ref,
471 0,
472 TIPC_CRITICAL_IMPORTANCE,
473 0,
474 0,
475 0,
476 0,
477 subscr_named_msg_event,
478 0,
479 0,
480 &topsrv.setup_port);
481 if (res)
482 goto failed;
483
484 res = tipc_nametbl_publish_rsv(topsrv.setup_port, TIPC_NODE_SCOPE, &seq);
485 if (res)
486 goto failed;
487
488 spin_unlock_bh(&topsrv.lock);
489 return 0;
490
491failed:
492 err("Failed to create subscription service\n");
493 tipc_detach(topsrv.user_ref);
494 topsrv.user_ref = 0;
495 spin_unlock_bh(&topsrv.lock);
496 return res;
497}
498
499void tipc_subscr_stop(void)
500{
501 struct subscriber *subscriber;
502 struct subscriber *subscriber_temp;
503 spinlock_t *subscriber_lock;
504
505 if (topsrv.user_ref) {
506 tipc_deleteport(topsrv.setup_port);
507 list_for_each_entry_safe(subscriber, subscriber_temp,
508 &topsrv.subscriber_list,
509 subscriber_list) {
510 tipc_ref_lock(subscriber->ref);
511 subscriber_lock = subscriber->lock;
512 subscr_terminate(subscriber);
513 spin_unlock_bh(subscriber_lock);
514 }
515 tipc_detach(topsrv.user_ref);
516 topsrv.user_ref = 0;
517 }
518}
519
520
521int tipc_ispublished(struct tipc_name const *name)
522{
523 u32 domain = 0;
524
525 return(tipc_nametbl_translate(name->type, name->instance,&domain) != 0);
526}
527
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
new file mode 100644
index 000000000000..1e5090465d2e
--- /dev/null
+++ b/net/tipc/subscr.h
@@ -0,0 +1,80 @@
1/*
2 * net/tipc/subscr.h: Include file for TIPC subscription service
3 *
4 * Copyright (c) 2003-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_SUBSCR_H
38#define _TIPC_SUBSCR_H
39
40/**
41 * struct subscription - TIPC network topology subscription object
42 * @seq: name sequence associated with subscription
43 * @timeout: duration of subscription (in ms)
44 * @filter: event filtering to be done for subscription
45 * @evt: template for events generated by subscription
46 * @subscription_list: adjacent subscriptions in subscriber's subscription list
47 * @nameseq_list: adjacent subscriptions in name sequence's subscription list
48 * @timer_ref: reference to timer governing subscription duration (may be NULL)
49 * @owner: pointer to subscriber object associated with this subscription
50 */
51
52struct subscription {
53 struct tipc_name_seq seq;
54 u32 timeout;
55 u32 filter;
56 struct tipc_event evt;
57 struct list_head subscription_list;
58 struct list_head nameseq_list;
59 struct timer_list timer;
60 struct subscriber *owner;
61};
62
63int tipc_subscr_overlap(struct subscription * sub,
64 u32 found_lower,
65 u32 found_upper);
66
67void tipc_subscr_report_overlap(struct subscription * sub,
68 u32 found_lower,
69 u32 found_upper,
70 u32 event,
71 u32 port_ref,
72 u32 node,
73 int must_report);
74
75int tipc_subscr_start(void);
76
77void tipc_subscr_stop(void);
78
79
80#endif
diff --git a/net/tipc/user_reg.c b/net/tipc/user_reg.c
new file mode 100644
index 000000000000..106200d76587
--- /dev/null
+++ b/net/tipc/user_reg.c
@@ -0,0 +1,265 @@
1/*
2 * net/tipc/user_reg.c: TIPC user registry code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2004-2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "user_reg.h"
39
40/*
41 * TIPC user registry keeps track of users of the tipc_port interface.
42 *
43 * The registry utilizes an array of "TIPC user" entries;
44 * a user's ID is the index of their associated array entry.
45 * Array entry 0 is not used, so userid 0 is not valid;
46 * TIPC sometimes uses this value to denote an anonymous user.
47 * The list of free entries is initially chained from last entry to entry 1.
48 */
49
50/**
51 * struct tipc_user - registered TIPC user info
52 * @next: index of next free registry entry (or -1 for an allocated entry)
53 * @callback: ptr to routine to call when TIPC mode changes (NULL if none)
54 * @usr_handle: user-defined value passed to callback routine
55 * @ports: list of user ports owned by the user
56 */
57
58struct tipc_user {
59 int next;
60 tipc_mode_event callback;
61 void *usr_handle;
62 struct list_head ports;
63};
64
65#define MAX_USERID 64
66#define USER_LIST_SIZE ((MAX_USERID + 1) * sizeof(struct tipc_user))
67
68static struct tipc_user *users = 0;
69static u32 next_free_user = MAX_USERID + 1;
70static spinlock_t reg_lock = SPIN_LOCK_UNLOCKED;
71
72/**
73 * reg_init - create TIPC user registry (but don't activate it)
74 *
75 * If registry has been pre-initialized it is left "as is".
76 * NOTE: This routine may be called when TIPC is inactive.
77 */
78
79static int reg_init(void)
80{
81 u32 i;
82
83 spin_lock_bh(&reg_lock);
84 if (!users) {
85 users = (struct tipc_user *)kmalloc(USER_LIST_SIZE, GFP_ATOMIC);
86 if (users) {
87 memset(users, 0, USER_LIST_SIZE);
88 for (i = 1; i <= MAX_USERID; i++) {
89 users[i].next = i - 1;
90 }
91 next_free_user = MAX_USERID;
92 }
93 }
94 spin_unlock_bh(&reg_lock);
95 return users ? TIPC_OK : -ENOMEM;
96}
97
98/**
99 * reg_callback - inform TIPC user about current operating mode
100 */
101
102static void reg_callback(struct tipc_user *user_ptr)
103{
104 tipc_mode_event cb;
105 void *arg;
106
107 spin_lock_bh(&reg_lock);
108 cb = user_ptr->callback;
109 arg = user_ptr->usr_handle;
110 spin_unlock_bh(&reg_lock);
111
112 if (cb)
113 cb(arg, tipc_mode, tipc_own_addr);
114}
115
116/**
117 * tipc_reg_start - activate TIPC user registry
118 */
119
120int tipc_reg_start(void)
121{
122 u32 u;
123 int res;
124
125 if ((res = reg_init()))
126 return res;
127
128 for (u = 1; u <= MAX_USERID; u++) {
129 if (users[u].callback)
130 tipc_k_signal((Handler)reg_callback,
131 (unsigned long)&users[u]);
132 }
133 return TIPC_OK;
134}
135
136/**
137 * tipc_reg_stop - shut down & delete TIPC user registry
138 */
139
140void tipc_reg_stop(void)
141{
142 int id;
143
144 if (!users)
145 return;
146
147 for (id = 1; id <= MAX_USERID; id++) {
148 if (users[id].callback)
149 reg_callback(&users[id]);
150 }
151 kfree(users);
152 users = 0;
153}
154
155/**
156 * tipc_attach - register a TIPC user
157 *
158 * NOTE: This routine may be called when TIPC is inactive.
159 */
160
161int tipc_attach(u32 *userid, tipc_mode_event cb, void *usr_handle)
162{
163 struct tipc_user *user_ptr;
164
165 if ((tipc_mode == TIPC_NOT_RUNNING) && !cb)
166 return -ENOPROTOOPT;
167 if (!users)
168 reg_init();
169
170 spin_lock_bh(&reg_lock);
171 if (!next_free_user) {
172 spin_unlock_bh(&reg_lock);
173 return -EBUSY;
174 }
175 user_ptr = &users[next_free_user];
176 *userid = next_free_user;
177 next_free_user = user_ptr->next;
178 user_ptr->next = -1;
179 spin_unlock_bh(&reg_lock);
180
181 user_ptr->callback = cb;
182 user_ptr->usr_handle = usr_handle;
183 INIT_LIST_HEAD(&user_ptr->ports);
184 atomic_inc(&tipc_user_count);
185
186 if (cb && (tipc_mode != TIPC_NOT_RUNNING))
187 tipc_k_signal((Handler)reg_callback, (unsigned long)user_ptr);
188 return TIPC_OK;
189}
190
191/**
192 * tipc_detach - deregister a TIPC user
193 */
194
195void tipc_detach(u32 userid)
196{
197 struct tipc_user *user_ptr;
198 struct list_head ports_temp;
199 struct user_port *up_ptr, *temp_up_ptr;
200
201 if ((userid == 0) || (userid > MAX_USERID))
202 return;
203
204 spin_lock_bh(&reg_lock);
205 if ((!users) || (users[userid].next >= 0)) {
206 spin_unlock_bh(&reg_lock);
207 return;
208 }
209
210 user_ptr = &users[userid];
211 user_ptr->callback = NULL;
212 INIT_LIST_HEAD(&ports_temp);
213 list_splice(&user_ptr->ports, &ports_temp);
214 user_ptr->next = next_free_user;
215 next_free_user = userid;
216 spin_unlock_bh(&reg_lock);
217
218 atomic_dec(&tipc_user_count);
219
220 list_for_each_entry_safe(up_ptr, temp_up_ptr, &ports_temp, uport_list) {
221 tipc_deleteport(up_ptr->ref);
222 }
223}
224
225/**
226 * tipc_reg_add_port - register a user's driver port
227 */
228
229int tipc_reg_add_port(struct user_port *up_ptr)
230{
231 struct tipc_user *user_ptr;
232
233 if (up_ptr->user_ref == 0)
234 return TIPC_OK;
235 if (up_ptr->user_ref > MAX_USERID)
236 return -EINVAL;
237 if ((tipc_mode == TIPC_NOT_RUNNING) || !users )
238 return -ENOPROTOOPT;
239
240 spin_lock_bh(&reg_lock);
241 user_ptr = &users[up_ptr->user_ref];
242 list_add(&up_ptr->uport_list, &user_ptr->ports);
243 spin_unlock_bh(&reg_lock);
244 return TIPC_OK;
245}
246
247/**
248 * tipc_reg_remove_port - deregister a user's driver port
249 */
250
251int tipc_reg_remove_port(struct user_port *up_ptr)
252{
253 if (up_ptr->user_ref == 0)
254 return TIPC_OK;
255 if (up_ptr->user_ref > MAX_USERID)
256 return -EINVAL;
257 if (!users )
258 return -ENOPROTOOPT;
259
260 spin_lock_bh(&reg_lock);
261 list_del_init(&up_ptr->uport_list);
262 spin_unlock_bh(&reg_lock);
263 return TIPC_OK;
264}
265
diff --git a/net/tipc/user_reg.h b/net/tipc/user_reg.h
new file mode 100644
index 000000000000..d0e88794ed1b
--- /dev/null
+++ b/net/tipc/user_reg.h
@@ -0,0 +1,48 @@
1/*
2 * net/tipc/user_reg.h: Include file for TIPC user registry code
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_USER_REG_H
38#define _TIPC_USER_REG_H
39
40#include "port.h"
41
42int tipc_reg_start(void);
43void tipc_reg_stop(void);
44
45int tipc_reg_add_port(struct user_port *up_ptr);
46int tipc_reg_remove_port(struct user_port *up_ptr);
47
48#endif
diff --git a/net/tipc/zone.c b/net/tipc/zone.c
new file mode 100644
index 000000000000..7c11f7f83a21
--- /dev/null
+++ b/net/tipc/zone.c
@@ -0,0 +1,169 @@
1/*
2 * net/tipc/zone.c: TIPC zone management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#include "core.h"
38#include "zone.h"
39#include "net.h"
40#include "addr.h"
41#include "node_subscr.h"
42#include "cluster.h"
43#include "node.h"
44
45struct _zone *tipc_zone_create(u32 addr)
46{
47 struct _zone *z_ptr = 0;
48 u32 z_num;
49
50 if (!tipc_addr_domain_valid(addr))
51 return 0;
52
53 z_ptr = (struct _zone *)kmalloc(sizeof(*z_ptr), GFP_ATOMIC);
54 if (z_ptr != NULL) {
55 memset(z_ptr, 0, sizeof(*z_ptr));
56 z_num = tipc_zone(addr);
57 z_ptr->addr = tipc_addr(z_num, 0, 0);
58 tipc_net.zones[z_num] = z_ptr;
59 }
60 return z_ptr;
61}
62
63void tipc_zone_delete(struct _zone *z_ptr)
64{
65 u32 c_num;
66
67 if (!z_ptr)
68 return;
69 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
70 tipc_cltr_delete(z_ptr->clusters[c_num]);
71 }
72 kfree(z_ptr);
73}
74
75void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr)
76{
77 u32 c_num = tipc_cluster(c_ptr->addr);
78
79 assert(c_ptr->addr);
80 assert(c_num <= tipc_max_clusters);
81 assert(z_ptr->clusters[c_num] == 0);
82 z_ptr->clusters[c_num] = c_ptr;
83}
84
85void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router)
86{
87 u32 c_num;
88
89 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
90 if (z_ptr->clusters[c_num]) {
91 tipc_cltr_remove_as_router(z_ptr->clusters[c_num],
92 router);
93 }
94 }
95}
96
97void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest)
98{
99 u32 c_num;
100
101 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
102 if (z_ptr->clusters[c_num]) {
103 if (in_own_cluster(z_ptr->addr))
104 continue;
105 tipc_cltr_send_ext_routes(z_ptr->clusters[c_num], dest);
106 }
107 }
108}
109
110struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref)
111{
112 struct cluster *c_ptr;
113 struct node *n_ptr;
114 u32 c_num;
115
116 if (!z_ptr)
117 return 0;
118 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
119 if (!c_ptr)
120 return 0;
121 n_ptr = tipc_cltr_select_node(c_ptr, ref);
122 if (n_ptr)
123 return n_ptr;
124
125 /* Links to any other clusters within this zone ? */
126 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
127 c_ptr = z_ptr->clusters[c_num];
128 if (!c_ptr)
129 return 0;
130 n_ptr = tipc_cltr_select_node(c_ptr, ref);
131 if (n_ptr)
132 return n_ptr;
133 }
134 return 0;
135}
136
137u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref)
138{
139 struct cluster *c_ptr;
140 u32 c_num;
141 u32 router;
142
143 if (!z_ptr)
144 return 0;
145 c_ptr = z_ptr->clusters[tipc_cluster(addr)];
146 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
147 if (router)
148 return router;
149
150 /* Links to any other clusters within the zone? */
151 for (c_num = 1; c_num <= tipc_max_clusters; c_num++) {
152 c_ptr = z_ptr->clusters[c_num];
153 router = c_ptr ? tipc_cltr_select_router(c_ptr, ref) : 0;
154 if (router)
155 return router;
156 }
157 return 0;
158}
159
160
161u32 tipc_zone_next_node(u32 addr)
162{
163 struct cluster *c_ptr = tipc_cltr_find(addr);
164
165 if (c_ptr)
166 return tipc_cltr_next_node(c_ptr, addr);
167 return 0;
168}
169
diff --git a/net/tipc/zone.h b/net/tipc/zone.h
new file mode 100644
index 000000000000..267999c5a240
--- /dev/null
+++ b/net/tipc/zone.h
@@ -0,0 +1,71 @@
1/*
2 * net/tipc/zone.h: Include file for TIPC zone management routines
3 *
4 * Copyright (c) 2000-2006, Ericsson AB
5 * Copyright (c) 2005, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37#ifndef _TIPC_ZONE_H
38#define _TIPC_ZONE_H
39
40#include "node_subscr.h"
41#include "net.h"
42
43
44/**
45 * struct _zone - TIPC zone structure
46 * @addr: network address of zone
47 * @clusters: array of pointers to all clusters within zone
48 * @links: (used for inter-zone communication)
49 */
50
51struct _zone {
52 u32 addr;
53 struct cluster *clusters[2]; /* currently limited to just 1 cluster */
54 u32 links;
55};
56
57struct node *tipc_zone_select_remote_node(struct _zone *z_ptr, u32 addr, u32 ref);
58u32 tipc_zone_select_router(struct _zone *z_ptr, u32 addr, u32 ref);
59void tipc_zone_remove_as_router(struct _zone *z_ptr, u32 router);
60void tipc_zone_send_external_routes(struct _zone *z_ptr, u32 dest);
61struct _zone *tipc_zone_create(u32 addr);
62void tipc_zone_delete(struct _zone *z_ptr);
63void tipc_zone_attach_cluster(struct _zone *z_ptr, struct cluster *c_ptr);
64u32 tipc_zone_next_node(u32 addr);
65
66static inline struct _zone *tipc_zone_find(u32 addr)
67{
68 return tipc_net.zones[tipc_zone(addr)];
69}
70
71#endif