diff options
Diffstat (limited to 'net/sctp')
-rw-r--r-- | net/sctp/Kconfig | 89 | ||||
-rw-r--r-- | net/sctp/Makefile | 17 | ||||
-rw-r--r-- | net/sctp/associola.c | 1205 | ||||
-rw-r--r-- | net/sctp/bind_addr.c | 417 | ||||
-rw-r--r-- | net/sctp/chunk.c | 309 | ||||
-rw-r--r-- | net/sctp/command.c | 81 | ||||
-rw-r--r-- | net/sctp/crc32c.c | 220 | ||||
-rw-r--r-- | net/sctp/debug.c | 191 | ||||
-rw-r--r-- | net/sctp/endpointola.c | 389 | ||||
-rw-r--r-- | net/sctp/input.c | 913 | ||||
-rw-r--r-- | net/sctp/inqueue.c | 204 | ||||
-rw-r--r-- | net/sctp/ipv6.c | 1013 | ||||
-rw-r--r-- | net/sctp/objcnt.c | 140 | ||||
-rw-r--r-- | net/sctp/output.c | 646 | ||||
-rw-r--r-- | net/sctp/outqueue.c | 1734 | ||||
-rw-r--r-- | net/sctp/primitive.c | 219 | ||||
-rw-r--r-- | net/sctp/proc.c | 288 | ||||
-rw-r--r-- | net/sctp/protocol.c | 1240 | ||||
-rw-r--r-- | net/sctp/sm_make_chunk.c | 2766 | ||||
-rw-r--r-- | net/sctp/sm_sideeffect.c | 1395 | ||||
-rw-r--r-- | net/sctp/sm_statefuns.c | 5238 | ||||
-rw-r--r-- | net/sctp/sm_statetable.c | 1004 | ||||
-rw-r--r-- | net/sctp/socket.c | 4797 | ||||
-rw-r--r-- | net/sctp/ssnmap.c | 131 | ||||
-rw-r--r-- | net/sctp/sysctl.c | 251 | ||||
-rw-r--r-- | net/sctp/transport.c | 514 | ||||
-rw-r--r-- | net/sctp/tsnmap.c | 417 | ||||
-rw-r--r-- | net/sctp/ulpevent.c | 942 | ||||
-rw-r--r-- | net/sctp/ulpqueue.c | 864 |
29 files changed, 27634 insertions, 0 deletions
diff --git a/net/sctp/Kconfig b/net/sctp/Kconfig new file mode 100644 index 000000000000..9cba49e2ad43 --- /dev/null +++ b/net/sctp/Kconfig | |||
@@ -0,0 +1,89 @@ | |||
1 | # | ||
2 | # SCTP configuration | ||
3 | # | ||
4 | |||
5 | menu "SCTP Configuration (EXPERIMENTAL)" | ||
6 | depends on INET && EXPERIMENTAL | ||
7 | |||
8 | config IP_SCTP | ||
9 | tristate "The SCTP Protocol (EXPERIMENTAL)" | ||
10 | depends on IPV6 || IPV6=n | ||
11 | select CRYPTO if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5 | ||
12 | select CRYPTO_HMAC if SCTP_HMAC_SHA1 || SCTP_HMAC_MD5 | ||
13 | select CRYPTO_SHA1 if SCTP_HMAC_SHA1 | ||
14 | select CRYPTO_MD5 if SCTP_HMAC_MD5 | ||
15 | ---help--- | ||
16 | Stream Control Transmission Protocol | ||
17 | |||
18 | From RFC 2960 <http://www.ietf.org/rfc/rfc2960.txt>. | ||
19 | |||
20 | "SCTP is a reliable transport protocol operating on top of a | ||
21 | connectionless packet network such as IP. It offers the following | ||
22 | services to its users: | ||
23 | |||
24 | -- acknowledged error-free non-duplicated transfer of user data, | ||
25 | -- data fragmentation to conform to discovered path MTU size, | ||
26 | -- sequenced delivery of user messages within multiple streams, | ||
27 | with an option for order-of-arrival delivery of individual user | ||
28 | messages, | ||
29 | -- optional bundling of multiple user messages into a single SCTP | ||
30 | packet, and | ||
31 | -- network-level fault tolerance through supporting of multi- | ||
32 | homing at either or both ends of an association." | ||
33 | |||
34 | To compile this protocol support as a module, choose M here: the | ||
35 | module will be called sctp. | ||
36 | |||
37 | If in doubt, say N. | ||
38 | |||
39 | config SCTP_DBG_MSG | ||
40 | bool "SCTP: Debug messages" | ||
41 | depends on IP_SCTP | ||
42 | help | ||
43 | If you say Y, this will enable verbose debugging messages. | ||
44 | |||
45 | If unsure, say N. However, if you are running into problems, use | ||
46 | this option to gather detailed trace information | ||
47 | |||
48 | config SCTP_DBG_OBJCNT | ||
49 | bool "SCTP: Debug object counts" | ||
50 | depends on IP_SCTP | ||
51 | help | ||
52 | If you say Y, this will enable debugging support for counting the | ||
53 | type of objects that are currently allocated. This is useful for | ||
54 | identifying memory leaks. If the /proc filesystem is enabled this | ||
55 | debug information can be viewed by | ||
56 | 'cat /proc/net/sctp/sctp_dbg_objcnt' | ||
57 | |||
58 | If unsure, say N | ||
59 | |||
60 | choice | ||
61 | prompt "SCTP: Cookie HMAC Algorithm" | ||
62 | depends on IP_SCTP | ||
63 | default SCTP_HMAC_MD5 | ||
64 | help | ||
65 | HMAC algorithm to be used during association initialization. It | ||
66 | is strongly recommended to use HMAC-SHA1 or HMAC-MD5. See | ||
67 | configuration for Cryptographic API and enable those algorithms | ||
68 | to make usable by SCTP. | ||
69 | |||
70 | config SCTP_HMAC_NONE | ||
71 | bool "None" | ||
72 | help | ||
73 | Choosing this disables the use of an HMAC during association | ||
74 | establishment. It is advised to use either HMAC-MD5 or HMAC-SHA1. | ||
75 | |||
76 | config SCTP_HMAC_SHA1 | ||
77 | bool "HMAC-SHA1" | ||
78 | help | ||
79 | Enable the use of HMAC-SHA1 during association establishment. It | ||
80 | is advised to use either HMAC-MD5 or HMAC-SHA1. | ||
81 | |||
82 | config SCTP_HMAC_MD5 | ||
83 | bool "HMAC-MD5" | ||
84 | help | ||
85 | Enable the use of HMAC-MD5 during association establishment. It is | ||
86 | advised to use either HMAC-MD5 or HMAC-SHA1. | ||
87 | |||
88 | endchoice | ||
89 | endmenu | ||
diff --git a/net/sctp/Makefile b/net/sctp/Makefile new file mode 100644 index 000000000000..70c828bbe444 --- /dev/null +++ b/net/sctp/Makefile | |||
@@ -0,0 +1,17 @@ | |||
1 | # | ||
2 | # Makefile for SCTP support code. | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_IP_SCTP) += sctp.o | ||
6 | |||
7 | sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \ | ||
8 | protocol.o endpointola.o associola.o \ | ||
9 | transport.o chunk.o sm_make_chunk.o ulpevent.o \ | ||
10 | inqueue.o outqueue.o ulpqueue.o command.o \ | ||
11 | tsnmap.o bind_addr.o socket.o primitive.o \ | ||
12 | output.o input.o debug.o ssnmap.o proc.o crc32c.o | ||
13 | |||
14 | sctp-$(CONFIG_SCTP_DBG_OBJCNT) += objcnt.o | ||
15 | sctp-$(CONFIG_SYSCTL) += sysctl.o | ||
16 | |||
17 | sctp-$(subst m,y,$(CONFIG_IPV6)) += ipv6.o | ||
diff --git a/net/sctp/associola.c b/net/sctp/associola.c new file mode 100644 index 000000000000..663843d97a92 --- /dev/null +++ b/net/sctp/associola.c | |||
@@ -0,0 +1,1205 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
7 | * | ||
8 | * This file is part of the SCTP kernel reference Implementation | ||
9 | * | ||
10 | * This module provides the abstraction for an SCTP association. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Jon Grimm <jgrimm@us.ibm.com> | ||
40 | * Xingang Guo <xingang.guo@intel.com> | ||
41 | * Hui Huang <hui.huang@nokia.com> | ||
42 | * Sridhar Samudrala <sri@us.ibm.com> | ||
43 | * Daisy Chang <daisyc@us.ibm.com> | ||
44 | * Ryan Layer <rmlayer@us.ibm.com> | ||
45 | * Kevin Gao <kevin.gao@intel.com> | ||
46 | * | ||
47 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
48 | * be incorporated into the next SCTP release. | ||
49 | */ | ||
50 | |||
51 | #include <linux/types.h> | ||
52 | #include <linux/fcntl.h> | ||
53 | #include <linux/poll.h> | ||
54 | #include <linux/init.h> | ||
55 | #include <linux/sched.h> | ||
56 | |||
57 | #include <linux/slab.h> | ||
58 | #include <linux/in.h> | ||
59 | #include <net/ipv6.h> | ||
60 | #include <net/sctp/sctp.h> | ||
61 | #include <net/sctp/sm.h> | ||
62 | |||
63 | /* Forward declarations for internal functions. */ | ||
64 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc); | ||
65 | |||
66 | |||
67 | /* 1st Level Abstractions. */ | ||
68 | |||
69 | /* Initialize a new association from provided memory. */ | ||
70 | static struct sctp_association *sctp_association_init(struct sctp_association *asoc, | ||
71 | const struct sctp_endpoint *ep, | ||
72 | const struct sock *sk, | ||
73 | sctp_scope_t scope, | ||
74 | int gfp) | ||
75 | { | ||
76 | struct sctp_sock *sp; | ||
77 | int i; | ||
78 | |||
79 | /* Retrieve the SCTP per socket area. */ | ||
80 | sp = sctp_sk((struct sock *)sk); | ||
81 | |||
82 | /* Init all variables to a known value. */ | ||
83 | memset(asoc, 0, sizeof(struct sctp_association)); | ||
84 | |||
85 | /* Discarding const is appropriate here. */ | ||
86 | asoc->ep = (struct sctp_endpoint *)ep; | ||
87 | sctp_endpoint_hold(asoc->ep); | ||
88 | |||
89 | /* Hold the sock. */ | ||
90 | asoc->base.sk = (struct sock *)sk; | ||
91 | sock_hold(asoc->base.sk); | ||
92 | |||
93 | /* Initialize the common base substructure. */ | ||
94 | asoc->base.type = SCTP_EP_TYPE_ASSOCIATION; | ||
95 | |||
96 | /* Initialize the object handling fields. */ | ||
97 | atomic_set(&asoc->base.refcnt, 1); | ||
98 | asoc->base.dead = 0; | ||
99 | asoc->base.malloced = 0; | ||
100 | |||
101 | /* Initialize the bind addr area. */ | ||
102 | sctp_bind_addr_init(&asoc->base.bind_addr, ep->base.bind_addr.port); | ||
103 | rwlock_init(&asoc->base.addr_lock); | ||
104 | |||
105 | asoc->state = SCTP_STATE_CLOSED; | ||
106 | |||
107 | /* Set these values from the socket values, a conversion between | ||
108 | * millsecons to seconds/microseconds must also be done. | ||
109 | */ | ||
110 | asoc->cookie_life.tv_sec = sp->assocparams.sasoc_cookie_life / 1000; | ||
111 | asoc->cookie_life.tv_usec = (sp->assocparams.sasoc_cookie_life % 1000) | ||
112 | * 1000; | ||
113 | asoc->pmtu = 0; | ||
114 | asoc->frag_point = 0; | ||
115 | |||
116 | /* Set the association max_retrans and RTO values from the | ||
117 | * socket values. | ||
118 | */ | ||
119 | asoc->max_retrans = sp->assocparams.sasoc_asocmaxrxt; | ||
120 | asoc->rto_initial = msecs_to_jiffies(sp->rtoinfo.srto_initial); | ||
121 | asoc->rto_max = msecs_to_jiffies(sp->rtoinfo.srto_max); | ||
122 | asoc->rto_min = msecs_to_jiffies(sp->rtoinfo.srto_min); | ||
123 | |||
124 | asoc->overall_error_count = 0; | ||
125 | |||
126 | /* Initialize the maximum mumber of new data packets that can be sent | ||
127 | * in a burst. | ||
128 | */ | ||
129 | asoc->max_burst = sctp_max_burst; | ||
130 | |||
131 | /* Copy things from the endpoint. */ | ||
132 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { | ||
133 | asoc->timeouts[i] = ep->timeouts[i]; | ||
134 | init_timer(&asoc->timers[i]); | ||
135 | asoc->timers[i].function = sctp_timer_events[i]; | ||
136 | asoc->timers[i].data = (unsigned long) asoc; | ||
137 | } | ||
138 | |||
139 | /* Pull default initialization values from the sock options. | ||
140 | * Note: This assumes that the values have already been | ||
141 | * validated in the sock. | ||
142 | */ | ||
143 | asoc->c.sinit_max_instreams = sp->initmsg.sinit_max_instreams; | ||
144 | asoc->c.sinit_num_ostreams = sp->initmsg.sinit_num_ostreams; | ||
145 | asoc->max_init_attempts = sp->initmsg.sinit_max_attempts; | ||
146 | |||
147 | asoc->max_init_timeo = | ||
148 | msecs_to_jiffies(sp->initmsg.sinit_max_init_timeo); | ||
149 | |||
150 | /* Allocate storage for the ssnmap after the inbound and outbound | ||
151 | * streams have been negotiated during Init. | ||
152 | */ | ||
153 | asoc->ssnmap = NULL; | ||
154 | |||
155 | /* Set the local window size for receive. | ||
156 | * This is also the rcvbuf space per association. | ||
157 | * RFC 6 - A SCTP receiver MUST be able to receive a minimum of | ||
158 | * 1500 bytes in one SCTP packet. | ||
159 | */ | ||
160 | if (sk->sk_rcvbuf < SCTP_DEFAULT_MINWINDOW) | ||
161 | asoc->rwnd = SCTP_DEFAULT_MINWINDOW; | ||
162 | else | ||
163 | asoc->rwnd = sk->sk_rcvbuf; | ||
164 | |||
165 | asoc->a_rwnd = asoc->rwnd; | ||
166 | |||
167 | asoc->rwnd_over = 0; | ||
168 | |||
169 | /* Use my own max window until I learn something better. */ | ||
170 | asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW; | ||
171 | |||
172 | /* Set the sndbuf size for transmit. */ | ||
173 | asoc->sndbuf_used = 0; | ||
174 | |||
175 | init_waitqueue_head(&asoc->wait); | ||
176 | |||
177 | asoc->c.my_vtag = sctp_generate_tag(ep); | ||
178 | asoc->peer.i.init_tag = 0; /* INIT needs a vtag of 0. */ | ||
179 | asoc->c.peer_vtag = 0; | ||
180 | asoc->c.my_ttag = 0; | ||
181 | asoc->c.peer_ttag = 0; | ||
182 | asoc->c.my_port = ep->base.bind_addr.port; | ||
183 | |||
184 | asoc->c.initial_tsn = sctp_generate_tsn(ep); | ||
185 | |||
186 | asoc->next_tsn = asoc->c.initial_tsn; | ||
187 | |||
188 | asoc->ctsn_ack_point = asoc->next_tsn - 1; | ||
189 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; | ||
190 | asoc->highest_sacked = asoc->ctsn_ack_point; | ||
191 | asoc->last_cwr_tsn = asoc->ctsn_ack_point; | ||
192 | asoc->unack_data = 0; | ||
193 | |||
194 | SCTP_DEBUG_PRINTK("myctsnap for %s INIT as 0x%x.\n", | ||
195 | asoc->ep->debug_name, | ||
196 | asoc->ctsn_ack_point); | ||
197 | |||
198 | /* ADDIP Section 4.1 Asconf Chunk Procedures | ||
199 | * | ||
200 | * When an endpoint has an ASCONF signaled change to be sent to the | ||
201 | * remote endpoint it should do the following: | ||
202 | * ... | ||
203 | * A2) a serial number should be assigned to the chunk. The serial | ||
204 | * number SHOULD be a monotonically increasing number. The serial | ||
205 | * numbers SHOULD be initialized at the start of the | ||
206 | * association to the same value as the initial TSN. | ||
207 | */ | ||
208 | asoc->addip_serial = asoc->c.initial_tsn; | ||
209 | |||
210 | skb_queue_head_init(&asoc->addip_chunks); | ||
211 | |||
212 | /* Make an empty list of remote transport addresses. */ | ||
213 | INIT_LIST_HEAD(&asoc->peer.transport_addr_list); | ||
214 | |||
215 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
216 | * | ||
217 | * After the reception of the first data chunk in an | ||
218 | * association the endpoint must immediately respond with a | ||
219 | * sack to acknowledge the data chunk. Subsequent | ||
220 | * acknowledgements should be done as described in Section | ||
221 | * 6.2. | ||
222 | * | ||
223 | * [We implement this by telling a new association that it | ||
224 | * already received one packet.] | ||
225 | */ | ||
226 | asoc->peer.sack_needed = 1; | ||
227 | |||
228 | /* Assume that the peer recongizes ASCONF until reported otherwise | ||
229 | * via an ERROR chunk. | ||
230 | */ | ||
231 | asoc->peer.asconf_capable = 1; | ||
232 | |||
233 | /* Create an input queue. */ | ||
234 | sctp_inq_init(&asoc->base.inqueue); | ||
235 | sctp_inq_set_th_handler(&asoc->base.inqueue, | ||
236 | (void (*)(void *))sctp_assoc_bh_rcv, | ||
237 | asoc); | ||
238 | |||
239 | /* Create an output queue. */ | ||
240 | sctp_outq_init(asoc, &asoc->outqueue); | ||
241 | |||
242 | if (!sctp_ulpq_init(&asoc->ulpq, asoc)) | ||
243 | goto fail_init; | ||
244 | |||
245 | /* Set up the tsn tracking. */ | ||
246 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, 0); | ||
247 | |||
248 | asoc->need_ecne = 0; | ||
249 | |||
250 | asoc->assoc_id = 0; | ||
251 | |||
252 | /* Assume that peer would support both address types unless we are | ||
253 | * told otherwise. | ||
254 | */ | ||
255 | asoc->peer.ipv4_address = 1; | ||
256 | asoc->peer.ipv6_address = 1; | ||
257 | INIT_LIST_HEAD(&asoc->asocs); | ||
258 | |||
259 | asoc->autoclose = sp->autoclose; | ||
260 | |||
261 | asoc->default_stream = sp->default_stream; | ||
262 | asoc->default_ppid = sp->default_ppid; | ||
263 | asoc->default_flags = sp->default_flags; | ||
264 | asoc->default_context = sp->default_context; | ||
265 | asoc->default_timetolive = sp->default_timetolive; | ||
266 | |||
267 | return asoc; | ||
268 | |||
269 | fail_init: | ||
270 | sctp_endpoint_put(asoc->ep); | ||
271 | sock_put(asoc->base.sk); | ||
272 | return NULL; | ||
273 | } | ||
274 | |||
275 | /* Allocate and initialize a new association */ | ||
276 | struct sctp_association *sctp_association_new(const struct sctp_endpoint *ep, | ||
277 | const struct sock *sk, | ||
278 | sctp_scope_t scope, int gfp) | ||
279 | { | ||
280 | struct sctp_association *asoc; | ||
281 | |||
282 | asoc = t_new(struct sctp_association, gfp); | ||
283 | if (!asoc) | ||
284 | goto fail; | ||
285 | |||
286 | if (!sctp_association_init(asoc, ep, sk, scope, gfp)) | ||
287 | goto fail_init; | ||
288 | |||
289 | asoc->base.malloced = 1; | ||
290 | SCTP_DBG_OBJCNT_INC(assoc); | ||
291 | |||
292 | return asoc; | ||
293 | |||
294 | fail_init: | ||
295 | kfree(asoc); | ||
296 | fail: | ||
297 | return NULL; | ||
298 | } | ||
299 | |||
300 | /* Free this association if possible. There may still be users, so | ||
301 | * the actual deallocation may be delayed. | ||
302 | */ | ||
303 | void sctp_association_free(struct sctp_association *asoc) | ||
304 | { | ||
305 | struct sock *sk = asoc->base.sk; | ||
306 | struct sctp_transport *transport; | ||
307 | struct list_head *pos, *temp; | ||
308 | int i; | ||
309 | |||
310 | list_del(&asoc->asocs); | ||
311 | |||
312 | /* Decrement the backlog value for a TCP-style listening socket. */ | ||
313 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) | ||
314 | sk->sk_ack_backlog--; | ||
315 | |||
316 | /* Mark as dead, so other users can know this structure is | ||
317 | * going away. | ||
318 | */ | ||
319 | asoc->base.dead = 1; | ||
320 | |||
321 | /* Dispose of any data lying around in the outqueue. */ | ||
322 | sctp_outq_free(&asoc->outqueue); | ||
323 | |||
324 | /* Dispose of any pending messages for the upper layer. */ | ||
325 | sctp_ulpq_free(&asoc->ulpq); | ||
326 | |||
327 | /* Dispose of any pending chunks on the inqueue. */ | ||
328 | sctp_inq_free(&asoc->base.inqueue); | ||
329 | |||
330 | /* Free ssnmap storage. */ | ||
331 | sctp_ssnmap_free(asoc->ssnmap); | ||
332 | |||
333 | /* Clean up the bound address list. */ | ||
334 | sctp_bind_addr_free(&asoc->base.bind_addr); | ||
335 | |||
336 | /* Do we need to go through all of our timers and | ||
337 | * delete them? To be safe we will try to delete all, but we | ||
338 | * should be able to go through and make a guess based | ||
339 | * on our state. | ||
340 | */ | ||
341 | for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i) { | ||
342 | if (timer_pending(&asoc->timers[i]) && | ||
343 | del_timer(&asoc->timers[i])) | ||
344 | sctp_association_put(asoc); | ||
345 | } | ||
346 | |||
347 | /* Free peer's cached cookie. */ | ||
348 | if (asoc->peer.cookie) { | ||
349 | kfree(asoc->peer.cookie); | ||
350 | } | ||
351 | |||
352 | /* Release the transport structures. */ | ||
353 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
354 | transport = list_entry(pos, struct sctp_transport, transports); | ||
355 | list_del(pos); | ||
356 | sctp_transport_free(transport); | ||
357 | } | ||
358 | |||
359 | /* Free any cached ASCONF_ACK chunk. */ | ||
360 | if (asoc->addip_last_asconf_ack) | ||
361 | sctp_chunk_free(asoc->addip_last_asconf_ack); | ||
362 | |||
363 | /* Free any cached ASCONF chunk. */ | ||
364 | if (asoc->addip_last_asconf) | ||
365 | sctp_chunk_free(asoc->addip_last_asconf); | ||
366 | |||
367 | sctp_association_put(asoc); | ||
368 | } | ||
369 | |||
370 | /* Cleanup and free up an association. */ | ||
371 | static void sctp_association_destroy(struct sctp_association *asoc) | ||
372 | { | ||
373 | SCTP_ASSERT(asoc->base.dead, "Assoc is not dead", return); | ||
374 | |||
375 | sctp_endpoint_put(asoc->ep); | ||
376 | sock_put(asoc->base.sk); | ||
377 | |||
378 | if (asoc->assoc_id != 0) { | ||
379 | spin_lock_bh(&sctp_assocs_id_lock); | ||
380 | idr_remove(&sctp_assocs_id, asoc->assoc_id); | ||
381 | spin_unlock_bh(&sctp_assocs_id_lock); | ||
382 | } | ||
383 | |||
384 | if (asoc->base.malloced) { | ||
385 | kfree(asoc); | ||
386 | SCTP_DBG_OBJCNT_DEC(assoc); | ||
387 | } | ||
388 | } | ||
389 | |||
390 | /* Change the primary destination address for the peer. */ | ||
391 | void sctp_assoc_set_primary(struct sctp_association *asoc, | ||
392 | struct sctp_transport *transport) | ||
393 | { | ||
394 | asoc->peer.primary_path = transport; | ||
395 | |||
396 | /* Set a default msg_name for events. */ | ||
397 | memcpy(&asoc->peer.primary_addr, &transport->ipaddr, | ||
398 | sizeof(union sctp_addr)); | ||
399 | |||
400 | /* If the primary path is changing, assume that the | ||
401 | * user wants to use this new path. | ||
402 | */ | ||
403 | if (transport->active) | ||
404 | asoc->peer.active_path = transport; | ||
405 | |||
406 | /* | ||
407 | * SFR-CACC algorithm: | ||
408 | * Upon the receipt of a request to change the primary | ||
409 | * destination address, on the data structure for the new | ||
410 | * primary destination, the sender MUST do the following: | ||
411 | * | ||
412 | * 1) If CHANGEOVER_ACTIVE is set, then there was a switch | ||
413 | * to this destination address earlier. The sender MUST set | ||
414 | * CYCLING_CHANGEOVER to indicate that this switch is a | ||
415 | * double switch to the same destination address. | ||
416 | */ | ||
417 | if (transport->cacc.changeover_active) | ||
418 | transport->cacc.cycling_changeover = 1; | ||
419 | |||
420 | /* 2) The sender MUST set CHANGEOVER_ACTIVE to indicate that | ||
421 | * a changeover has occurred. | ||
422 | */ | ||
423 | transport->cacc.changeover_active = 1; | ||
424 | |||
425 | /* 3) The sender MUST store the next TSN to be sent in | ||
426 | * next_tsn_at_change. | ||
427 | */ | ||
428 | transport->cacc.next_tsn_at_change = asoc->next_tsn; | ||
429 | } | ||
430 | |||
431 | /* Add a transport address to an association. */ | ||
432 | struct sctp_transport *sctp_assoc_add_peer(struct sctp_association *asoc, | ||
433 | const union sctp_addr *addr, | ||
434 | int gfp) | ||
435 | { | ||
436 | struct sctp_transport *peer; | ||
437 | struct sctp_sock *sp; | ||
438 | unsigned short port; | ||
439 | |||
440 | sp = sctp_sk(asoc->base.sk); | ||
441 | |||
442 | /* AF_INET and AF_INET6 share common port field. */ | ||
443 | port = addr->v4.sin_port; | ||
444 | |||
445 | /* Set the port if it has not been set yet. */ | ||
446 | if (0 == asoc->peer.port) | ||
447 | asoc->peer.port = port; | ||
448 | |||
449 | /* Check to see if this is a duplicate. */ | ||
450 | peer = sctp_assoc_lookup_paddr(asoc, addr); | ||
451 | if (peer) | ||
452 | return peer; | ||
453 | |||
454 | peer = sctp_transport_new(addr, gfp); | ||
455 | if (!peer) | ||
456 | return NULL; | ||
457 | |||
458 | sctp_transport_set_owner(peer, asoc); | ||
459 | |||
460 | /* Initialize the pmtu of the transport. */ | ||
461 | sctp_transport_pmtu(peer); | ||
462 | |||
463 | /* If this is the first transport addr on this association, | ||
464 | * initialize the association PMTU to the peer's PMTU. | ||
465 | * If not and the current association PMTU is higher than the new | ||
466 | * peer's PMTU, reset the association PMTU to the new peer's PMTU. | ||
467 | */ | ||
468 | if (asoc->pmtu) | ||
469 | asoc->pmtu = min_t(int, peer->pmtu, asoc->pmtu); | ||
470 | else | ||
471 | asoc->pmtu = peer->pmtu; | ||
472 | |||
473 | SCTP_DEBUG_PRINTK("sctp_assoc_add_peer:association %p PMTU set to " | ||
474 | "%d\n", asoc, asoc->pmtu); | ||
475 | |||
476 | asoc->frag_point = sctp_frag_point(sp, asoc->pmtu); | ||
477 | |||
478 | /* The asoc->peer.port might not be meaningful yet, but | ||
479 | * initialize the packet structure anyway. | ||
480 | */ | ||
481 | sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port, | ||
482 | asoc->peer.port); | ||
483 | |||
484 | /* 7.2.1 Slow-Start | ||
485 | * | ||
486 | * o The initial cwnd before DATA transmission or after a sufficiently | ||
487 | * long idle period MUST be set to | ||
488 | * min(4*MTU, max(2*MTU, 4380 bytes)) | ||
489 | * | ||
490 | * o The initial value of ssthresh MAY be arbitrarily high | ||
491 | * (for example, implementations MAY use the size of the | ||
492 | * receiver advertised window). | ||
493 | */ | ||
494 | peer->cwnd = min(4*asoc->pmtu, max_t(__u32, 2*asoc->pmtu, 4380)); | ||
495 | |||
496 | /* At this point, we may not have the receiver's advertised window, | ||
497 | * so initialize ssthresh to the default value and it will be set | ||
498 | * later when we process the INIT. | ||
499 | */ | ||
500 | peer->ssthresh = SCTP_DEFAULT_MAXWINDOW; | ||
501 | |||
502 | peer->partial_bytes_acked = 0; | ||
503 | peer->flight_size = 0; | ||
504 | |||
505 | /* By default, enable heartbeat for peer address. */ | ||
506 | peer->hb_allowed = 1; | ||
507 | |||
508 | /* Initialize the peer's heartbeat interval based on the | ||
509 | * sock configured value. | ||
510 | */ | ||
511 | peer->hb_interval = msecs_to_jiffies(sp->paddrparam.spp_hbinterval); | ||
512 | |||
513 | /* Set the path max_retrans. */ | ||
514 | peer->max_retrans = sp->paddrparam.spp_pathmaxrxt; | ||
515 | |||
516 | /* Set the transport's RTO.initial value */ | ||
517 | peer->rto = asoc->rto_initial; | ||
518 | |||
519 | /* Attach the remote transport to our asoc. */ | ||
520 | list_add_tail(&peer->transports, &asoc->peer.transport_addr_list); | ||
521 | |||
522 | /* If we do not yet have a primary path, set one. */ | ||
523 | if (!asoc->peer.primary_path) { | ||
524 | sctp_assoc_set_primary(asoc, peer); | ||
525 | asoc->peer.retran_path = peer; | ||
526 | } | ||
527 | |||
528 | if (asoc->peer.active_path == asoc->peer.retran_path) | ||
529 | asoc->peer.retran_path = peer; | ||
530 | |||
531 | return peer; | ||
532 | } | ||
533 | |||
534 | /* Delete a transport address from an association. */ | ||
535 | void sctp_assoc_del_peer(struct sctp_association *asoc, | ||
536 | const union sctp_addr *addr) | ||
537 | { | ||
538 | struct list_head *pos; | ||
539 | struct list_head *temp; | ||
540 | struct sctp_transport *peer = NULL; | ||
541 | struct sctp_transport *transport; | ||
542 | |||
543 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
544 | transport = list_entry(pos, struct sctp_transport, transports); | ||
545 | if (sctp_cmp_addr_exact(addr, &transport->ipaddr)) { | ||
546 | peer = transport; | ||
547 | list_del(pos); | ||
548 | break; | ||
549 | } | ||
550 | } | ||
551 | |||
552 | /* The address we want delete is not in the association. */ | ||
553 | if (!peer) | ||
554 | return; | ||
555 | |||
556 | /* Get the first transport of asoc. */ | ||
557 | pos = asoc->peer.transport_addr_list.next; | ||
558 | transport = list_entry(pos, struct sctp_transport, transports); | ||
559 | |||
560 | /* Update any entries that match the peer to be deleted. */ | ||
561 | if (asoc->peer.primary_path == peer) | ||
562 | sctp_assoc_set_primary(asoc, transport); | ||
563 | if (asoc->peer.active_path == peer) | ||
564 | asoc->peer.active_path = transport; | ||
565 | if (asoc->peer.retran_path == peer) | ||
566 | asoc->peer.retran_path = transport; | ||
567 | if (asoc->peer.last_data_from == peer) | ||
568 | asoc->peer.last_data_from = transport; | ||
569 | |||
570 | sctp_transport_free(peer); | ||
571 | } | ||
572 | |||
573 | /* Lookup a transport by address. */ | ||
574 | struct sctp_transport *sctp_assoc_lookup_paddr( | ||
575 | const struct sctp_association *asoc, | ||
576 | const union sctp_addr *address) | ||
577 | { | ||
578 | struct sctp_transport *t; | ||
579 | struct list_head *pos; | ||
580 | |||
581 | /* Cycle through all transports searching for a peer address. */ | ||
582 | |||
583 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
584 | t = list_entry(pos, struct sctp_transport, transports); | ||
585 | if (sctp_cmp_addr_exact(address, &t->ipaddr)) | ||
586 | return t; | ||
587 | } | ||
588 | |||
589 | return NULL; | ||
590 | } | ||
591 | |||
592 | /* Engage in transport control operations. | ||
593 | * Mark the transport up or down and send a notification to the user. | ||
594 | * Select and update the new active and retran paths. | ||
595 | */ | ||
596 | void sctp_assoc_control_transport(struct sctp_association *asoc, | ||
597 | struct sctp_transport *transport, | ||
598 | sctp_transport_cmd_t command, | ||
599 | sctp_sn_error_t error) | ||
600 | { | ||
601 | struct sctp_transport *t = NULL; | ||
602 | struct sctp_transport *first; | ||
603 | struct sctp_transport *second; | ||
604 | struct sctp_ulpevent *event; | ||
605 | struct list_head *pos; | ||
606 | int spc_state = 0; | ||
607 | |||
608 | /* Record the transition on the transport. */ | ||
609 | switch (command) { | ||
610 | case SCTP_TRANSPORT_UP: | ||
611 | transport->active = SCTP_ACTIVE; | ||
612 | spc_state = SCTP_ADDR_AVAILABLE; | ||
613 | break; | ||
614 | |||
615 | case SCTP_TRANSPORT_DOWN: | ||
616 | transport->active = SCTP_INACTIVE; | ||
617 | spc_state = SCTP_ADDR_UNREACHABLE; | ||
618 | break; | ||
619 | |||
620 | default: | ||
621 | return; | ||
622 | }; | ||
623 | |||
624 | /* Generate and send a SCTP_PEER_ADDR_CHANGE notification to the | ||
625 | * user. | ||
626 | */ | ||
627 | event = sctp_ulpevent_make_peer_addr_change(asoc, | ||
628 | (struct sockaddr_storage *) &transport->ipaddr, | ||
629 | 0, spc_state, error, GFP_ATOMIC); | ||
630 | if (event) | ||
631 | sctp_ulpq_tail_event(&asoc->ulpq, event); | ||
632 | |||
633 | /* Select new active and retran paths. */ | ||
634 | |||
635 | /* Look for the two most recently used active transports. | ||
636 | * | ||
637 | * This code produces the wrong ordering whenever jiffies | ||
638 | * rolls over, but we still get usable transports, so we don't | ||
639 | * worry about it. | ||
640 | */ | ||
641 | first = NULL; second = NULL; | ||
642 | |||
643 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
644 | t = list_entry(pos, struct sctp_transport, transports); | ||
645 | |||
646 | if (!t->active) | ||
647 | continue; | ||
648 | if (!first || t->last_time_heard > first->last_time_heard) { | ||
649 | second = first; | ||
650 | first = t; | ||
651 | } | ||
652 | if (!second || t->last_time_heard > second->last_time_heard) | ||
653 | second = t; | ||
654 | } | ||
655 | |||
656 | /* RFC 2960 6.4 Multi-Homed SCTP Endpoints | ||
657 | * | ||
658 | * By default, an endpoint should always transmit to the | ||
659 | * primary path, unless the SCTP user explicitly specifies the | ||
660 | * destination transport address (and possibly source | ||
661 | * transport address) to use. | ||
662 | * | ||
663 | * [If the primary is active but not most recent, bump the most | ||
664 | * recently used transport.] | ||
665 | */ | ||
666 | if (asoc->peer.primary_path->active && | ||
667 | first != asoc->peer.primary_path) { | ||
668 | second = first; | ||
669 | first = asoc->peer.primary_path; | ||
670 | } | ||
671 | |||
672 | /* If we failed to find a usable transport, just camp on the | ||
673 | * primary, even if it is inactive. | ||
674 | */ | ||
675 | if (!first) { | ||
676 | first = asoc->peer.primary_path; | ||
677 | second = asoc->peer.primary_path; | ||
678 | } | ||
679 | |||
680 | /* Set the active and retran transports. */ | ||
681 | asoc->peer.active_path = first; | ||
682 | asoc->peer.retran_path = second; | ||
683 | } | ||
684 | |||
685 | /* Hold a reference to an association. */ | ||
686 | void sctp_association_hold(struct sctp_association *asoc) | ||
687 | { | ||
688 | atomic_inc(&asoc->base.refcnt); | ||
689 | } | ||
690 | |||
691 | /* Release a reference to an association and cleanup | ||
692 | * if there are no more references. | ||
693 | */ | ||
694 | void sctp_association_put(struct sctp_association *asoc) | ||
695 | { | ||
696 | if (atomic_dec_and_test(&asoc->base.refcnt)) | ||
697 | sctp_association_destroy(asoc); | ||
698 | } | ||
699 | |||
700 | /* Allocate the next TSN, Transmission Sequence Number, for the given | ||
701 | * association. | ||
702 | */ | ||
703 | __u32 sctp_association_get_next_tsn(struct sctp_association *asoc) | ||
704 | { | ||
705 | /* From Section 1.6 Serial Number Arithmetic: | ||
706 | * Transmission Sequence Numbers wrap around when they reach | ||
707 | * 2**32 - 1. That is, the next TSN a DATA chunk MUST use | ||
708 | * after transmitting TSN = 2*32 - 1 is TSN = 0. | ||
709 | */ | ||
710 | __u32 retval = asoc->next_tsn; | ||
711 | asoc->next_tsn++; | ||
712 | asoc->unack_data++; | ||
713 | |||
714 | return retval; | ||
715 | } | ||
716 | |||
717 | /* Compare two addresses to see if they match. Wildcard addresses | ||
718 | * only match themselves. | ||
719 | */ | ||
720 | int sctp_cmp_addr_exact(const union sctp_addr *ss1, | ||
721 | const union sctp_addr *ss2) | ||
722 | { | ||
723 | struct sctp_af *af; | ||
724 | |||
725 | af = sctp_get_af_specific(ss1->sa.sa_family); | ||
726 | if (unlikely(!af)) | ||
727 | return 0; | ||
728 | |||
729 | return af->cmp_addr(ss1, ss2); | ||
730 | } | ||
731 | |||
732 | /* Return an ecne chunk to get prepended to a packet. | ||
733 | * Note: We are sly and return a shared, prealloced chunk. FIXME: | ||
734 | * No we don't, but we could/should. | ||
735 | */ | ||
736 | struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc) | ||
737 | { | ||
738 | struct sctp_chunk *chunk; | ||
739 | |||
740 | /* Send ECNE if needed. | ||
741 | * Not being able to allocate a chunk here is not deadly. | ||
742 | */ | ||
743 | if (asoc->need_ecne) | ||
744 | chunk = sctp_make_ecne(asoc, asoc->last_ecne_tsn); | ||
745 | else | ||
746 | chunk = NULL; | ||
747 | |||
748 | return chunk; | ||
749 | } | ||
750 | |||
751 | /* | ||
752 | * Find which transport this TSN was sent on. | ||
753 | */ | ||
754 | struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc, | ||
755 | __u32 tsn) | ||
756 | { | ||
757 | struct sctp_transport *active; | ||
758 | struct sctp_transport *match; | ||
759 | struct list_head *entry, *pos; | ||
760 | struct sctp_transport *transport; | ||
761 | struct sctp_chunk *chunk; | ||
762 | __u32 key = htonl(tsn); | ||
763 | |||
764 | match = NULL; | ||
765 | |||
766 | /* | ||
767 | * FIXME: In general, find a more efficient data structure for | ||
768 | * searching. | ||
769 | */ | ||
770 | |||
771 | /* | ||
772 | * The general strategy is to search each transport's transmitted | ||
773 | * list. Return which transport this TSN lives on. | ||
774 | * | ||
775 | * Let's be hopeful and check the active_path first. | ||
776 | * Another optimization would be to know if there is only one | ||
777 | * outbound path and not have to look for the TSN at all. | ||
778 | * | ||
779 | */ | ||
780 | |||
781 | active = asoc->peer.active_path; | ||
782 | |||
783 | list_for_each(entry, &active->transmitted) { | ||
784 | chunk = list_entry(entry, struct sctp_chunk, transmitted_list); | ||
785 | |||
786 | if (key == chunk->subh.data_hdr->tsn) { | ||
787 | match = active; | ||
788 | goto out; | ||
789 | } | ||
790 | } | ||
791 | |||
792 | /* If not found, go search all the other transports. */ | ||
793 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
794 | transport = list_entry(pos, struct sctp_transport, transports); | ||
795 | |||
796 | if (transport == active) | ||
797 | break; | ||
798 | list_for_each(entry, &transport->transmitted) { | ||
799 | chunk = list_entry(entry, struct sctp_chunk, | ||
800 | transmitted_list); | ||
801 | if (key == chunk->subh.data_hdr->tsn) { | ||
802 | match = transport; | ||
803 | goto out; | ||
804 | } | ||
805 | } | ||
806 | } | ||
807 | out: | ||
808 | return match; | ||
809 | } | ||
810 | |||
811 | /* Is this the association we are looking for? */ | ||
812 | struct sctp_transport *sctp_assoc_is_match(struct sctp_association *asoc, | ||
813 | const union sctp_addr *laddr, | ||
814 | const union sctp_addr *paddr) | ||
815 | { | ||
816 | struct sctp_transport *transport; | ||
817 | |||
818 | sctp_read_lock(&asoc->base.addr_lock); | ||
819 | |||
820 | if ((asoc->base.bind_addr.port == laddr->v4.sin_port) && | ||
821 | (asoc->peer.port == paddr->v4.sin_port)) { | ||
822 | transport = sctp_assoc_lookup_paddr(asoc, paddr); | ||
823 | if (!transport) | ||
824 | goto out; | ||
825 | |||
826 | if (sctp_bind_addr_match(&asoc->base.bind_addr, laddr, | ||
827 | sctp_sk(asoc->base.sk))) | ||
828 | goto out; | ||
829 | } | ||
830 | transport = NULL; | ||
831 | |||
832 | out: | ||
833 | sctp_read_unlock(&asoc->base.addr_lock); | ||
834 | return transport; | ||
835 | } | ||
836 | |||
837 | /* Do delayed input processing. This is scheduled by sctp_rcv(). */ | ||
838 | static void sctp_assoc_bh_rcv(struct sctp_association *asoc) | ||
839 | { | ||
840 | struct sctp_endpoint *ep; | ||
841 | struct sctp_chunk *chunk; | ||
842 | struct sock *sk; | ||
843 | struct sctp_inq *inqueue; | ||
844 | int state; | ||
845 | sctp_subtype_t subtype; | ||
846 | int error = 0; | ||
847 | |||
848 | /* The association should be held so we should be safe. */ | ||
849 | ep = asoc->ep; | ||
850 | sk = asoc->base.sk; | ||
851 | |||
852 | inqueue = &asoc->base.inqueue; | ||
853 | sctp_association_hold(asoc); | ||
854 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { | ||
855 | state = asoc->state; | ||
856 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); | ||
857 | |||
858 | /* Remember where the last DATA chunk came from so we | ||
859 | * know where to send the SACK. | ||
860 | */ | ||
861 | if (sctp_chunk_is_data(chunk)) | ||
862 | asoc->peer.last_data_from = chunk->transport; | ||
863 | else | ||
864 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); | ||
865 | |||
866 | if (chunk->transport) | ||
867 | chunk->transport->last_time_heard = jiffies; | ||
868 | |||
869 | /* Run through the state machine. */ | ||
870 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, | ||
871 | state, ep, asoc, chunk, GFP_ATOMIC); | ||
872 | |||
873 | /* Check to see if the association is freed in response to | ||
874 | * the incoming chunk. If so, get out of the while loop. | ||
875 | */ | ||
876 | if (asoc->base.dead) | ||
877 | break; | ||
878 | |||
879 | /* If there is an error on chunk, discard this packet. */ | ||
880 | if (error && chunk) | ||
881 | chunk->pdiscard = 1; | ||
882 | } | ||
883 | sctp_association_put(asoc); | ||
884 | } | ||
885 | |||
886 | /* This routine moves an association from its old sk to a new sk. */ | ||
887 | void sctp_assoc_migrate(struct sctp_association *assoc, struct sock *newsk) | ||
888 | { | ||
889 | struct sctp_sock *newsp = sctp_sk(newsk); | ||
890 | struct sock *oldsk = assoc->base.sk; | ||
891 | |||
892 | /* Delete the association from the old endpoint's list of | ||
893 | * associations. | ||
894 | */ | ||
895 | list_del_init(&assoc->asocs); | ||
896 | |||
897 | /* Decrement the backlog value for a TCP-style socket. */ | ||
898 | if (sctp_style(oldsk, TCP)) | ||
899 | oldsk->sk_ack_backlog--; | ||
900 | |||
901 | /* Release references to the old endpoint and the sock. */ | ||
902 | sctp_endpoint_put(assoc->ep); | ||
903 | sock_put(assoc->base.sk); | ||
904 | |||
905 | /* Get a reference to the new endpoint. */ | ||
906 | assoc->ep = newsp->ep; | ||
907 | sctp_endpoint_hold(assoc->ep); | ||
908 | |||
909 | /* Get a reference to the new sock. */ | ||
910 | assoc->base.sk = newsk; | ||
911 | sock_hold(assoc->base.sk); | ||
912 | |||
913 | /* Add the association to the new endpoint's list of associations. */ | ||
914 | sctp_endpoint_add_asoc(newsp->ep, assoc); | ||
915 | } | ||
916 | |||
917 | /* Update an association (possibly from unexpected COOKIE-ECHO processing). */ | ||
918 | void sctp_assoc_update(struct sctp_association *asoc, | ||
919 | struct sctp_association *new) | ||
920 | { | ||
921 | struct sctp_transport *trans; | ||
922 | struct list_head *pos, *temp; | ||
923 | |||
924 | /* Copy in new parameters of peer. */ | ||
925 | asoc->c = new->c; | ||
926 | asoc->peer.rwnd = new->peer.rwnd; | ||
927 | asoc->peer.sack_needed = new->peer.sack_needed; | ||
928 | asoc->peer.i = new->peer.i; | ||
929 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, | ||
930 | asoc->peer.i.initial_tsn); | ||
931 | |||
932 | /* Remove any peer addresses not present in the new association. */ | ||
933 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
934 | trans = list_entry(pos, struct sctp_transport, transports); | ||
935 | if (!sctp_assoc_lookup_paddr(new, &trans->ipaddr)) | ||
936 | sctp_assoc_del_peer(asoc, &trans->ipaddr); | ||
937 | } | ||
938 | |||
939 | /* If the case is A (association restart), use | ||
940 | * initial_tsn as next_tsn. If the case is B, use | ||
941 | * current next_tsn in case data sent to peer | ||
942 | * has been discarded and needs retransmission. | ||
943 | */ | ||
944 | if (asoc->state >= SCTP_STATE_ESTABLISHED) { | ||
945 | asoc->next_tsn = new->next_tsn; | ||
946 | asoc->ctsn_ack_point = new->ctsn_ack_point; | ||
947 | asoc->adv_peer_ack_point = new->adv_peer_ack_point; | ||
948 | |||
949 | /* Reinitialize SSN for both local streams | ||
950 | * and peer's streams. | ||
951 | */ | ||
952 | sctp_ssnmap_clear(asoc->ssnmap); | ||
953 | |||
954 | } else { | ||
955 | /* Add any peer addresses from the new association. */ | ||
956 | list_for_each(pos, &new->peer.transport_addr_list) { | ||
957 | trans = list_entry(pos, struct sctp_transport, | ||
958 | transports); | ||
959 | if (!sctp_assoc_lookup_paddr(asoc, &trans->ipaddr)) | ||
960 | sctp_assoc_add_peer(asoc, &trans->ipaddr, | ||
961 | GFP_ATOMIC); | ||
962 | } | ||
963 | |||
964 | asoc->ctsn_ack_point = asoc->next_tsn - 1; | ||
965 | asoc->adv_peer_ack_point = asoc->ctsn_ack_point; | ||
966 | if (!asoc->ssnmap) { | ||
967 | /* Move the ssnmap. */ | ||
968 | asoc->ssnmap = new->ssnmap; | ||
969 | new->ssnmap = NULL; | ||
970 | } | ||
971 | } | ||
972 | } | ||
973 | |||
974 | /* Update the retran path for sending a retransmitted packet. | ||
975 | * Round-robin through the active transports, else round-robin | ||
976 | * through the inactive transports as this is the next best thing | ||
977 | * we can try. | ||
978 | */ | ||
979 | void sctp_assoc_update_retran_path(struct sctp_association *asoc) | ||
980 | { | ||
981 | struct sctp_transport *t, *next; | ||
982 | struct list_head *head = &asoc->peer.transport_addr_list; | ||
983 | struct list_head *pos; | ||
984 | |||
985 | /* Find the next transport in a round-robin fashion. */ | ||
986 | t = asoc->peer.retran_path; | ||
987 | pos = &t->transports; | ||
988 | next = NULL; | ||
989 | |||
990 | while (1) { | ||
991 | /* Skip the head. */ | ||
992 | if (pos->next == head) | ||
993 | pos = head->next; | ||
994 | else | ||
995 | pos = pos->next; | ||
996 | |||
997 | t = list_entry(pos, struct sctp_transport, transports); | ||
998 | |||
999 | /* Try to find an active transport. */ | ||
1000 | |||
1001 | if (t->active) { | ||
1002 | break; | ||
1003 | } else { | ||
1004 | /* Keep track of the next transport in case | ||
1005 | * we don't find any active transport. | ||
1006 | */ | ||
1007 | if (!next) | ||
1008 | next = t; | ||
1009 | } | ||
1010 | |||
1011 | /* We have exhausted the list, but didn't find any | ||
1012 | * other active transports. If so, use the next | ||
1013 | * transport. | ||
1014 | */ | ||
1015 | if (t == asoc->peer.retran_path) { | ||
1016 | t = next; | ||
1017 | break; | ||
1018 | } | ||
1019 | } | ||
1020 | |||
1021 | asoc->peer.retran_path = t; | ||
1022 | } | ||
1023 | |||
1024 | /* Choose the transport for sending a SHUTDOWN packet. */ | ||
1025 | struct sctp_transport *sctp_assoc_choose_shutdown_transport( | ||
1026 | struct sctp_association *asoc) | ||
1027 | { | ||
1028 | /* If this is the first time SHUTDOWN is sent, use the active path, | ||
1029 | * else use the retran path. If the last SHUTDOWN was sent over the | ||
1030 | * retran path, update the retran path and use it. | ||
1031 | */ | ||
1032 | if (!asoc->shutdown_last_sent_to) | ||
1033 | return asoc->peer.active_path; | ||
1034 | else { | ||
1035 | if (asoc->shutdown_last_sent_to == asoc->peer.retran_path) | ||
1036 | sctp_assoc_update_retran_path(asoc); | ||
1037 | return asoc->peer.retran_path; | ||
1038 | } | ||
1039 | |||
1040 | } | ||
1041 | |||
1042 | /* Update the association's pmtu and frag_point by going through all the | ||
1043 | * transports. This routine is called when a transport's PMTU has changed. | ||
1044 | */ | ||
1045 | void sctp_assoc_sync_pmtu(struct sctp_association *asoc) | ||
1046 | { | ||
1047 | struct sctp_transport *t; | ||
1048 | struct list_head *pos; | ||
1049 | __u32 pmtu = 0; | ||
1050 | |||
1051 | if (!asoc) | ||
1052 | return; | ||
1053 | |||
1054 | /* Get the lowest pmtu of all the transports. */ | ||
1055 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
1056 | t = list_entry(pos, struct sctp_transport, transports); | ||
1057 | if (!pmtu || (t->pmtu < pmtu)) | ||
1058 | pmtu = t->pmtu; | ||
1059 | } | ||
1060 | |||
1061 | if (pmtu) { | ||
1062 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
1063 | asoc->pmtu = pmtu; | ||
1064 | asoc->frag_point = sctp_frag_point(sp, pmtu); | ||
1065 | } | ||
1066 | |||
1067 | SCTP_DEBUG_PRINTK("%s: asoc:%p, pmtu:%d, frag_point:%d\n", | ||
1068 | __FUNCTION__, asoc, asoc->pmtu, asoc->frag_point); | ||
1069 | } | ||
1070 | |||
1071 | /* Should we send a SACK to update our peer? */ | ||
1072 | static inline int sctp_peer_needs_update(struct sctp_association *asoc) | ||
1073 | { | ||
1074 | switch (asoc->state) { | ||
1075 | case SCTP_STATE_ESTABLISHED: | ||
1076 | case SCTP_STATE_SHUTDOWN_PENDING: | ||
1077 | case SCTP_STATE_SHUTDOWN_RECEIVED: | ||
1078 | case SCTP_STATE_SHUTDOWN_SENT: | ||
1079 | if ((asoc->rwnd > asoc->a_rwnd) && | ||
1080 | ((asoc->rwnd - asoc->a_rwnd) >= | ||
1081 | min_t(__u32, (asoc->base.sk->sk_rcvbuf >> 1), asoc->pmtu))) | ||
1082 | return 1; | ||
1083 | break; | ||
1084 | default: | ||
1085 | break; | ||
1086 | } | ||
1087 | return 0; | ||
1088 | } | ||
1089 | |||
1090 | /* Increase asoc's rwnd by len and send any window update SACK if needed. */ | ||
1091 | void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned len) | ||
1092 | { | ||
1093 | struct sctp_chunk *sack; | ||
1094 | struct timer_list *timer; | ||
1095 | |||
1096 | if (asoc->rwnd_over) { | ||
1097 | if (asoc->rwnd_over >= len) { | ||
1098 | asoc->rwnd_over -= len; | ||
1099 | } else { | ||
1100 | asoc->rwnd += (len - asoc->rwnd_over); | ||
1101 | asoc->rwnd_over = 0; | ||
1102 | } | ||
1103 | } else { | ||
1104 | asoc->rwnd += len; | ||
1105 | } | ||
1106 | |||
1107 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd increased by %d to (%u, %u) " | ||
1108 | "- %u\n", __FUNCTION__, asoc, len, asoc->rwnd, | ||
1109 | asoc->rwnd_over, asoc->a_rwnd); | ||
1110 | |||
1111 | /* Send a window update SACK if the rwnd has increased by at least the | ||
1112 | * minimum of the association's PMTU and half of the receive buffer. | ||
1113 | * The algorithm used is similar to the one described in | ||
1114 | * Section 4.2.3.3 of RFC 1122. | ||
1115 | */ | ||
1116 | if (sctp_peer_needs_update(asoc)) { | ||
1117 | asoc->a_rwnd = asoc->rwnd; | ||
1118 | SCTP_DEBUG_PRINTK("%s: Sending window update SACK- asoc: %p " | ||
1119 | "rwnd: %u a_rwnd: %u\n", __FUNCTION__, | ||
1120 | asoc, asoc->rwnd, asoc->a_rwnd); | ||
1121 | sack = sctp_make_sack(asoc); | ||
1122 | if (!sack) | ||
1123 | return; | ||
1124 | |||
1125 | asoc->peer.sack_needed = 0; | ||
1126 | |||
1127 | sctp_outq_tail(&asoc->outqueue, sack); | ||
1128 | |||
1129 | /* Stop the SACK timer. */ | ||
1130 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | ||
1131 | if (timer_pending(timer) && del_timer(timer)) | ||
1132 | sctp_association_put(asoc); | ||
1133 | } | ||
1134 | } | ||
1135 | |||
1136 | /* Decrease asoc's rwnd by len. */ | ||
1137 | void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len) | ||
1138 | { | ||
1139 | SCTP_ASSERT(asoc->rwnd, "rwnd zero", return); | ||
1140 | SCTP_ASSERT(!asoc->rwnd_over, "rwnd_over not zero", return); | ||
1141 | if (asoc->rwnd >= len) { | ||
1142 | asoc->rwnd -= len; | ||
1143 | } else { | ||
1144 | asoc->rwnd_over = len - asoc->rwnd; | ||
1145 | asoc->rwnd = 0; | ||
1146 | } | ||
1147 | SCTP_DEBUG_PRINTK("%s: asoc %p rwnd decreased by %d to (%u, %u)\n", | ||
1148 | __FUNCTION__, asoc, len, asoc->rwnd, | ||
1149 | asoc->rwnd_over); | ||
1150 | } | ||
1151 | |||
1152 | /* Build the bind address list for the association based on info from the | ||
1153 | * local endpoint and the remote peer. | ||
1154 | */ | ||
1155 | int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc, int gfp) | ||
1156 | { | ||
1157 | sctp_scope_t scope; | ||
1158 | int flags; | ||
1159 | |||
1160 | /* Use scoping rules to determine the subset of addresses from | ||
1161 | * the endpoint. | ||
1162 | */ | ||
1163 | scope = sctp_scope(&asoc->peer.active_path->ipaddr); | ||
1164 | flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0; | ||
1165 | if (asoc->peer.ipv4_address) | ||
1166 | flags |= SCTP_ADDR4_PEERSUPP; | ||
1167 | if (asoc->peer.ipv6_address) | ||
1168 | flags |= SCTP_ADDR6_PEERSUPP; | ||
1169 | |||
1170 | return sctp_bind_addr_copy(&asoc->base.bind_addr, | ||
1171 | &asoc->ep->base.bind_addr, | ||
1172 | scope, gfp, flags); | ||
1173 | } | ||
1174 | |||
1175 | /* Build the association's bind address list from the cookie. */ | ||
1176 | int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *asoc, | ||
1177 | struct sctp_cookie *cookie, int gfp) | ||
1178 | { | ||
1179 | int var_size2 = ntohs(cookie->peer_init->chunk_hdr.length); | ||
1180 | int var_size3 = cookie->raw_addr_list_len; | ||
1181 | __u8 *raw = (__u8 *)cookie->peer_init + var_size2; | ||
1182 | |||
1183 | return sctp_raw_to_bind_addrs(&asoc->base.bind_addr, raw, var_size3, | ||
1184 | asoc->ep->base.bind_addr.port, gfp); | ||
1185 | } | ||
1186 | |||
1187 | /* Lookup laddr in the bind address list of an association. */ | ||
1188 | int sctp_assoc_lookup_laddr(struct sctp_association *asoc, | ||
1189 | const union sctp_addr *laddr) | ||
1190 | { | ||
1191 | int found; | ||
1192 | |||
1193 | sctp_read_lock(&asoc->base.addr_lock); | ||
1194 | if ((asoc->base.bind_addr.port == ntohs(laddr->v4.sin_port)) && | ||
1195 | sctp_bind_addr_match(&asoc->base.bind_addr, laddr, | ||
1196 | sctp_sk(asoc->base.sk))) { | ||
1197 | found = 1; | ||
1198 | goto out; | ||
1199 | } | ||
1200 | |||
1201 | found = 0; | ||
1202 | out: | ||
1203 | sctp_read_unlock(&asoc->base.addr_lock); | ||
1204 | return found; | ||
1205 | } | ||
diff --git a/net/sctp/bind_addr.c b/net/sctp/bind_addr.c new file mode 100644 index 000000000000..f90eadfb60a2 --- /dev/null +++ b/net/sctp/bind_addr.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2003 | ||
3 | * Copyright (c) Cisco 1999,2000 | ||
4 | * Copyright (c) Motorola 1999,2000,2001 | ||
5 | * Copyright (c) La Monte H.P. Yarroll 2001 | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference implementation. | ||
8 | * | ||
9 | * A collection class to handle the storage of transport addresses. | ||
10 | * | ||
11 | * The SCTP reference implementation is free software; | ||
12 | * you can redistribute it and/or modify it under the terms of | ||
13 | * the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * The SCTP reference implementation is distributed in the hope that it | ||
18 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
19 | * ************************ | ||
20 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | * See the GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with GNU CC; see the file COPYING. If not, write to | ||
25 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | * Please send any bug reports or fixes you make to the | ||
29 | * email address(es): | ||
30 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
31 | * | ||
32 | * Or submit a bug report through the following website: | ||
33 | * http://www.sf.net/projects/lksctp | ||
34 | * | ||
35 | * Written or modified by: | ||
36 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
37 | * Karl Knutson <karl@athena.chicago.il.us> | ||
38 | * Jon Grimm <jgrimm@us.ibm.com> | ||
39 | * Daisy Chang <daisyc@us.ibm.com> | ||
40 | * | ||
41 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
42 | * be incorporated into the next SCTP release. | ||
43 | */ | ||
44 | |||
45 | #include <linux/types.h> | ||
46 | #include <linux/sched.h> | ||
47 | #include <linux/in.h> | ||
48 | #include <net/sock.h> | ||
49 | #include <net/ipv6.h> | ||
50 | #include <net/if_inet6.h> | ||
51 | #include <net/sctp/sctp.h> | ||
52 | #include <net/sctp/sm.h> | ||
53 | |||
54 | /* Forward declarations for internal helpers. */ | ||
55 | static int sctp_copy_one_addr(struct sctp_bind_addr *, union sctp_addr *, | ||
56 | sctp_scope_t scope, int gfp, int flags); | ||
57 | static void sctp_bind_addr_clean(struct sctp_bind_addr *); | ||
58 | |||
59 | /* First Level Abstractions. */ | ||
60 | |||
61 | /* Copy 'src' to 'dest' taking 'scope' into account. Omit addresses | ||
62 | * in 'src' which have a broader scope than 'scope'. | ||
63 | */ | ||
64 | int sctp_bind_addr_copy(struct sctp_bind_addr *dest, | ||
65 | const struct sctp_bind_addr *src, | ||
66 | sctp_scope_t scope, int gfp, int flags) | ||
67 | { | ||
68 | struct sctp_sockaddr_entry *addr; | ||
69 | struct list_head *pos; | ||
70 | int error = 0; | ||
71 | |||
72 | /* All addresses share the same port. */ | ||
73 | dest->port = src->port; | ||
74 | |||
75 | /* Extract the addresses which are relevant for this scope. */ | ||
76 | list_for_each(pos, &src->address_list) { | ||
77 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
78 | error = sctp_copy_one_addr(dest, &addr->a, scope, | ||
79 | gfp, flags); | ||
80 | if (error < 0) | ||
81 | goto out; | ||
82 | } | ||
83 | |||
84 | /* If there are no addresses matching the scope and | ||
85 | * this is global scope, try to get a link scope address, with | ||
86 | * the assumption that we must be sitting behind a NAT. | ||
87 | */ | ||
88 | if (list_empty(&dest->address_list) && (SCTP_SCOPE_GLOBAL == scope)) { | ||
89 | list_for_each(pos, &src->address_list) { | ||
90 | addr = list_entry(pos, struct sctp_sockaddr_entry, | ||
91 | list); | ||
92 | error = sctp_copy_one_addr(dest, &addr->a, | ||
93 | SCTP_SCOPE_LINK, gfp, | ||
94 | flags); | ||
95 | if (error < 0) | ||
96 | goto out; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | out: | ||
101 | if (error) | ||
102 | sctp_bind_addr_clean(dest); | ||
103 | |||
104 | return error; | ||
105 | } | ||
106 | |||
107 | /* Initialize the SCTP_bind_addr structure for either an endpoint or | ||
108 | * an association. | ||
109 | */ | ||
110 | void sctp_bind_addr_init(struct sctp_bind_addr *bp, __u16 port) | ||
111 | { | ||
112 | bp->malloced = 0; | ||
113 | |||
114 | INIT_LIST_HEAD(&bp->address_list); | ||
115 | bp->port = port; | ||
116 | } | ||
117 | |||
118 | /* Dispose of the address list. */ | ||
119 | static void sctp_bind_addr_clean(struct sctp_bind_addr *bp) | ||
120 | { | ||
121 | struct sctp_sockaddr_entry *addr; | ||
122 | struct list_head *pos, *temp; | ||
123 | |||
124 | /* Empty the bind address list. */ | ||
125 | list_for_each_safe(pos, temp, &bp->address_list) { | ||
126 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
127 | list_del(pos); | ||
128 | kfree(addr); | ||
129 | SCTP_DBG_OBJCNT_DEC(addr); | ||
130 | } | ||
131 | } | ||
132 | |||
133 | /* Dispose of an SCTP_bind_addr structure */ | ||
134 | void sctp_bind_addr_free(struct sctp_bind_addr *bp) | ||
135 | { | ||
136 | /* Empty the bind address list. */ | ||
137 | sctp_bind_addr_clean(bp); | ||
138 | |||
139 | if (bp->malloced) { | ||
140 | kfree(bp); | ||
141 | SCTP_DBG_OBJCNT_DEC(bind_addr); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | /* Add an address to the bind address list in the SCTP_bind_addr structure. */ | ||
146 | int sctp_add_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *new, | ||
147 | int gfp) | ||
148 | { | ||
149 | struct sctp_sockaddr_entry *addr; | ||
150 | |||
151 | /* Add the address to the bind address list. */ | ||
152 | addr = t_new(struct sctp_sockaddr_entry, gfp); | ||
153 | if (!addr) | ||
154 | return -ENOMEM; | ||
155 | |||
156 | memcpy(&addr->a, new, sizeof(*new)); | ||
157 | |||
158 | /* Fix up the port if it has not yet been set. | ||
159 | * Both v4 and v6 have the port at the same offset. | ||
160 | */ | ||
161 | if (!addr->a.v4.sin_port) | ||
162 | addr->a.v4.sin_port = bp->port; | ||
163 | |||
164 | INIT_LIST_HEAD(&addr->list); | ||
165 | list_add_tail(&addr->list, &bp->address_list); | ||
166 | SCTP_DBG_OBJCNT_INC(addr); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | /* Delete an address from the bind address list in the SCTP_bind_addr | ||
172 | * structure. | ||
173 | */ | ||
174 | int sctp_del_bind_addr(struct sctp_bind_addr *bp, union sctp_addr *del_addr) | ||
175 | { | ||
176 | struct list_head *pos, *temp; | ||
177 | struct sctp_sockaddr_entry *addr; | ||
178 | |||
179 | list_for_each_safe(pos, temp, &bp->address_list) { | ||
180 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
181 | if (sctp_cmp_addr_exact(&addr->a, del_addr)) { | ||
182 | /* Found the exact match. */ | ||
183 | list_del(pos); | ||
184 | kfree(addr); | ||
185 | SCTP_DBG_OBJCNT_DEC(addr); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | return -EINVAL; | ||
192 | } | ||
193 | |||
194 | /* Create a network byte-order representation of all the addresses | ||
195 | * formated as SCTP parameters. | ||
196 | * | ||
197 | * The second argument is the return value for the length. | ||
198 | */ | ||
199 | union sctp_params sctp_bind_addrs_to_raw(const struct sctp_bind_addr *bp, | ||
200 | int *addrs_len, int gfp) | ||
201 | { | ||
202 | union sctp_params addrparms; | ||
203 | union sctp_params retval; | ||
204 | int addrparms_len; | ||
205 | union sctp_addr_param rawaddr; | ||
206 | int len; | ||
207 | struct sctp_sockaddr_entry *addr; | ||
208 | struct list_head *pos; | ||
209 | struct sctp_af *af; | ||
210 | |||
211 | addrparms_len = 0; | ||
212 | len = 0; | ||
213 | |||
214 | /* Allocate enough memory at once. */ | ||
215 | list_for_each(pos, &bp->address_list) { | ||
216 | len += sizeof(union sctp_addr_param); | ||
217 | } | ||
218 | |||
219 | /* Don't even bother embedding an address if there | ||
220 | * is only one. | ||
221 | */ | ||
222 | if (len == sizeof(union sctp_addr_param)) { | ||
223 | retval.v = NULL; | ||
224 | goto end_raw; | ||
225 | } | ||
226 | |||
227 | retval.v = kmalloc(len, gfp); | ||
228 | if (!retval.v) | ||
229 | goto end_raw; | ||
230 | |||
231 | addrparms = retval; | ||
232 | |||
233 | list_for_each(pos, &bp->address_list) { | ||
234 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
235 | af = sctp_get_af_specific(addr->a.v4.sin_family); | ||
236 | len = af->to_addr_param(&addr->a, &rawaddr); | ||
237 | memcpy(addrparms.v, &rawaddr, len); | ||
238 | addrparms.v += len; | ||
239 | addrparms_len += len; | ||
240 | } | ||
241 | |||
242 | end_raw: | ||
243 | *addrs_len = addrparms_len; | ||
244 | return retval; | ||
245 | } | ||
246 | |||
247 | /* | ||
248 | * Create an address list out of the raw address list format (IPv4 and IPv6 | ||
249 | * address parameters). | ||
250 | */ | ||
251 | int sctp_raw_to_bind_addrs(struct sctp_bind_addr *bp, __u8 *raw_addr_list, | ||
252 | int addrs_len, __u16 port, int gfp) | ||
253 | { | ||
254 | union sctp_addr_param *rawaddr; | ||
255 | struct sctp_paramhdr *param; | ||
256 | union sctp_addr addr; | ||
257 | int retval = 0; | ||
258 | int len; | ||
259 | struct sctp_af *af; | ||
260 | |||
261 | /* Convert the raw address to standard address format */ | ||
262 | while (addrs_len) { | ||
263 | param = (struct sctp_paramhdr *)raw_addr_list; | ||
264 | rawaddr = (union sctp_addr_param *)raw_addr_list; | ||
265 | |||
266 | af = sctp_get_af_specific(param_type2af(param->type)); | ||
267 | if (unlikely(!af)) { | ||
268 | retval = -EINVAL; | ||
269 | sctp_bind_addr_clean(bp); | ||
270 | break; | ||
271 | } | ||
272 | |||
273 | af->from_addr_param(&addr, rawaddr, port, 0); | ||
274 | retval = sctp_add_bind_addr(bp, &addr, gfp); | ||
275 | if (retval) { | ||
276 | /* Can't finish building the list, clean up. */ | ||
277 | sctp_bind_addr_clean(bp); | ||
278 | break; | ||
279 | } | ||
280 | |||
281 | len = ntohs(param->length); | ||
282 | addrs_len -= len; | ||
283 | raw_addr_list += len; | ||
284 | } | ||
285 | |||
286 | return retval; | ||
287 | } | ||
288 | |||
289 | /******************************************************************** | ||
290 | * 2nd Level Abstractions | ||
291 | ********************************************************************/ | ||
292 | |||
293 | /* Does this contain a specified address? Allow wildcarding. */ | ||
294 | int sctp_bind_addr_match(struct sctp_bind_addr *bp, | ||
295 | const union sctp_addr *addr, | ||
296 | struct sctp_sock *opt) | ||
297 | { | ||
298 | struct sctp_sockaddr_entry *laddr; | ||
299 | struct list_head *pos; | ||
300 | |||
301 | list_for_each(pos, &bp->address_list) { | ||
302 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
303 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) | ||
304 | return 1; | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | } | ||
309 | |||
310 | /* Find the first address in the bind address list that is not present in | ||
311 | * the addrs packed array. | ||
312 | */ | ||
313 | union sctp_addr *sctp_find_unmatch_addr(struct sctp_bind_addr *bp, | ||
314 | const union sctp_addr *addrs, | ||
315 | int addrcnt, | ||
316 | struct sctp_sock *opt) | ||
317 | { | ||
318 | struct sctp_sockaddr_entry *laddr; | ||
319 | union sctp_addr *addr; | ||
320 | void *addr_buf; | ||
321 | struct sctp_af *af; | ||
322 | struct list_head *pos; | ||
323 | int i; | ||
324 | |||
325 | list_for_each(pos, &bp->address_list) { | ||
326 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
327 | |||
328 | addr_buf = (union sctp_addr *)addrs; | ||
329 | for (i = 0; i < addrcnt; i++) { | ||
330 | addr = (union sctp_addr *)addr_buf; | ||
331 | af = sctp_get_af_specific(addr->v4.sin_family); | ||
332 | if (!af) | ||
333 | return NULL; | ||
334 | |||
335 | if (opt->pf->cmp_addr(&laddr->a, addr, opt)) | ||
336 | break; | ||
337 | |||
338 | addr_buf += af->sockaddr_len; | ||
339 | } | ||
340 | if (i == addrcnt) | ||
341 | return &laddr->a; | ||
342 | } | ||
343 | |||
344 | return NULL; | ||
345 | } | ||
346 | |||
347 | /* Copy out addresses from the global local address list. */ | ||
348 | static int sctp_copy_one_addr(struct sctp_bind_addr *dest, | ||
349 | union sctp_addr *addr, | ||
350 | sctp_scope_t scope, int gfp, int flags) | ||
351 | { | ||
352 | int error = 0; | ||
353 | |||
354 | if (sctp_is_any(addr)) { | ||
355 | error = sctp_copy_local_addr_list(dest, scope, gfp, flags); | ||
356 | } else if (sctp_in_scope(addr, scope)) { | ||
357 | /* Now that the address is in scope, check to see if | ||
358 | * the address type is supported by local sock as | ||
359 | * well as the remote peer. | ||
360 | */ | ||
361 | if ((((AF_INET == addr->sa.sa_family) && | ||
362 | (flags & SCTP_ADDR4_PEERSUPP))) || | ||
363 | (((AF_INET6 == addr->sa.sa_family) && | ||
364 | (flags & SCTP_ADDR6_ALLOWED) && | ||
365 | (flags & SCTP_ADDR6_PEERSUPP)))) | ||
366 | error = sctp_add_bind_addr(dest, addr, gfp); | ||
367 | } | ||
368 | |||
369 | return error; | ||
370 | } | ||
371 | |||
372 | /* Is this a wildcard address? */ | ||
373 | int sctp_is_any(const union sctp_addr *addr) | ||
374 | { | ||
375 | struct sctp_af *af = sctp_get_af_specific(addr->sa.sa_family); | ||
376 | if (!af) | ||
377 | return 0; | ||
378 | return af->is_any(addr); | ||
379 | } | ||
380 | |||
381 | /* Is 'addr' valid for 'scope'? */ | ||
382 | int sctp_in_scope(const union sctp_addr *addr, sctp_scope_t scope) | ||
383 | { | ||
384 | sctp_scope_t addr_scope = sctp_scope(addr); | ||
385 | |||
386 | /* The unusable SCTP addresses will not be considered with | ||
387 | * any defined scopes. | ||
388 | */ | ||
389 | if (SCTP_SCOPE_UNUSABLE == addr_scope) | ||
390 | return 0; | ||
391 | /* | ||
392 | * For INIT and INIT-ACK address list, let L be the level of | ||
393 | * of requested destination address, sender and receiver | ||
394 | * SHOULD include all of its addresses with level greater | ||
395 | * than or equal to L. | ||
396 | */ | ||
397 | if (addr_scope <= scope) | ||
398 | return 1; | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | /******************************************************************** | ||
404 | * 3rd Level Abstractions | ||
405 | ********************************************************************/ | ||
406 | |||
407 | /* What is the scope of 'addr'? */ | ||
408 | sctp_scope_t sctp_scope(const union sctp_addr *addr) | ||
409 | { | ||
410 | struct sctp_af *af; | ||
411 | |||
412 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
413 | if (!af) | ||
414 | return SCTP_SCOPE_UNUSABLE; | ||
415 | |||
416 | return af->scope((union sctp_addr *)addr); | ||
417 | } | ||
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c new file mode 100644 index 000000000000..0c2ab7885058 --- /dev/null +++ b/net/sctp/chunk.c | |||
@@ -0,0 +1,309 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2003, 2004 | ||
3 | * | ||
4 | * This file is part of the SCTP kernel reference Implementation | ||
5 | * | ||
6 | * This file contains the code relating the the chunk abstraction. | ||
7 | * | ||
8 | * The SCTP reference implementation is free software; | ||
9 | * you can redistribute it and/or modify it under the terms of | ||
10 | * the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * The SCTP reference implementation is distributed in the hope that it | ||
15 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
16 | * ************************ | ||
17 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | * See the GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with GNU CC; see the file COPYING. If not, write to | ||
22 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 02111-1307, USA. | ||
24 | * | ||
25 | * Please send any bug reports or fixes you make to the | ||
26 | * email address(es): | ||
27 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
28 | * | ||
29 | * Or submit a bug report through the following website: | ||
30 | * http://www.sf.net/projects/lksctp | ||
31 | * | ||
32 | * Written or modified by: | ||
33 | * Jon Grimm <jgrimm@us.ibm.com> | ||
34 | * Sridhar Samudrala <sri@us.ibm.com> | ||
35 | * | ||
36 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
37 | * be incorporated into the next SCTP release. | ||
38 | */ | ||
39 | |||
40 | #include <linux/types.h> | ||
41 | #include <linux/kernel.h> | ||
42 | #include <linux/net.h> | ||
43 | #include <linux/inet.h> | ||
44 | #include <linux/skbuff.h> | ||
45 | #include <net/sock.h> | ||
46 | #include <net/sctp/sctp.h> | ||
47 | #include <net/sctp/sm.h> | ||
48 | |||
49 | /* This file is mostly in anticipation of future work, but initially | ||
50 | * populate with fragment tracking for an outbound message. | ||
51 | */ | ||
52 | |||
53 | /* Initialize datamsg from memory. */ | ||
54 | static void sctp_datamsg_init(struct sctp_datamsg *msg) | ||
55 | { | ||
56 | atomic_set(&msg->refcnt, 1); | ||
57 | msg->send_failed = 0; | ||
58 | msg->send_error = 0; | ||
59 | msg->can_abandon = 0; | ||
60 | msg->expires_at = 0; | ||
61 | INIT_LIST_HEAD(&msg->chunks); | ||
62 | } | ||
63 | |||
64 | /* Allocate and initialize datamsg. */ | ||
65 | SCTP_STATIC struct sctp_datamsg *sctp_datamsg_new(int gfp) | ||
66 | { | ||
67 | struct sctp_datamsg *msg; | ||
68 | msg = kmalloc(sizeof(struct sctp_datamsg), gfp); | ||
69 | if (msg) | ||
70 | sctp_datamsg_init(msg); | ||
71 | SCTP_DBG_OBJCNT_INC(datamsg); | ||
72 | return msg; | ||
73 | } | ||
74 | |||
75 | /* Final destructruction of datamsg memory. */ | ||
76 | static void sctp_datamsg_destroy(struct sctp_datamsg *msg) | ||
77 | { | ||
78 | struct list_head *pos, *temp; | ||
79 | struct sctp_chunk *chunk; | ||
80 | struct sctp_sock *sp; | ||
81 | struct sctp_ulpevent *ev; | ||
82 | struct sctp_association *asoc = NULL; | ||
83 | int error = 0, notify; | ||
84 | |||
85 | /* If we failed, we may need to notify. */ | ||
86 | notify = msg->send_failed ? -1 : 0; | ||
87 | |||
88 | /* Release all references. */ | ||
89 | list_for_each_safe(pos, temp, &msg->chunks) { | ||
90 | list_del_init(pos); | ||
91 | chunk = list_entry(pos, struct sctp_chunk, frag_list); | ||
92 | /* Check whether we _really_ need to notify. */ | ||
93 | if (notify < 0) { | ||
94 | asoc = chunk->asoc; | ||
95 | if (msg->send_error) | ||
96 | error = msg->send_error; | ||
97 | else | ||
98 | error = asoc->outqueue.error; | ||
99 | |||
100 | sp = sctp_sk(asoc->base.sk); | ||
101 | notify = sctp_ulpevent_type_enabled(SCTP_SEND_FAILED, | ||
102 | &sp->subscribe); | ||
103 | } | ||
104 | |||
105 | /* Generate a SEND FAILED event only if enabled. */ | ||
106 | if (notify > 0) { | ||
107 | int sent; | ||
108 | if (chunk->has_tsn) | ||
109 | sent = SCTP_DATA_SENT; | ||
110 | else | ||
111 | sent = SCTP_DATA_UNSENT; | ||
112 | |||
113 | ev = sctp_ulpevent_make_send_failed(asoc, chunk, sent, | ||
114 | error, GFP_ATOMIC); | ||
115 | if (ev) | ||
116 | sctp_ulpq_tail_event(&asoc->ulpq, ev); | ||
117 | } | ||
118 | |||
119 | sctp_chunk_put(chunk); | ||
120 | } | ||
121 | |||
122 | SCTP_DBG_OBJCNT_DEC(datamsg); | ||
123 | kfree(msg); | ||
124 | } | ||
125 | |||
126 | /* Hold a reference. */ | ||
127 | static void sctp_datamsg_hold(struct sctp_datamsg *msg) | ||
128 | { | ||
129 | atomic_inc(&msg->refcnt); | ||
130 | } | ||
131 | |||
132 | /* Release a reference. */ | ||
133 | void sctp_datamsg_put(struct sctp_datamsg *msg) | ||
134 | { | ||
135 | if (atomic_dec_and_test(&msg->refcnt)) | ||
136 | sctp_datamsg_destroy(msg); | ||
137 | } | ||
138 | |||
139 | /* Free a message. Really just give up a reference, the | ||
140 | * really free happens in sctp_datamsg_destroy(). | ||
141 | */ | ||
142 | void sctp_datamsg_free(struct sctp_datamsg *msg) | ||
143 | { | ||
144 | sctp_datamsg_put(msg); | ||
145 | } | ||
146 | |||
147 | /* Hold on to all the fragments until all chunks have been sent. */ | ||
148 | void sctp_datamsg_track(struct sctp_chunk *chunk) | ||
149 | { | ||
150 | sctp_chunk_hold(chunk); | ||
151 | } | ||
152 | |||
153 | /* Assign a chunk to this datamsg. */ | ||
154 | static void sctp_datamsg_assign(struct sctp_datamsg *msg, struct sctp_chunk *chunk) | ||
155 | { | ||
156 | sctp_datamsg_hold(msg); | ||
157 | chunk->msg = msg; | ||
158 | } | ||
159 | |||
160 | |||
161 | /* A data chunk can have a maximum payload of (2^16 - 20). Break | ||
162 | * down any such message into smaller chunks. Opportunistically, fragment | ||
163 | * the chunks down to the current MTU constraints. We may get refragmented | ||
164 | * later if the PMTU changes, but it is _much better_ to fragment immediately | ||
165 | * with a reasonable guess than always doing our fragmentation on the | ||
166 | * soft-interrupt. | ||
167 | */ | ||
168 | struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc, | ||
169 | struct sctp_sndrcvinfo *sinfo, | ||
170 | struct msghdr *msgh, int msg_len) | ||
171 | { | ||
172 | int max, whole, i, offset, over, err; | ||
173 | int len, first_len; | ||
174 | struct sctp_chunk *chunk; | ||
175 | struct sctp_datamsg *msg; | ||
176 | struct list_head *pos, *temp; | ||
177 | __u8 frag; | ||
178 | |||
179 | msg = sctp_datamsg_new(GFP_KERNEL); | ||
180 | if (!msg) | ||
181 | return NULL; | ||
182 | |||
183 | /* Note: Calculate this outside of the loop, so that all fragments | ||
184 | * have the same expiration. | ||
185 | */ | ||
186 | if (sinfo->sinfo_timetolive) { | ||
187 | /* sinfo_timetolive is in milliseconds */ | ||
188 | msg->expires_at = jiffies + | ||
189 | msecs_to_jiffies(sinfo->sinfo_timetolive); | ||
190 | msg->can_abandon = 1; | ||
191 | SCTP_DEBUG_PRINTK("%s: msg:%p expires_at: %ld jiffies:%ld\n", | ||
192 | __FUNCTION__, msg, msg->expires_at, jiffies); | ||
193 | } | ||
194 | |||
195 | max = asoc->frag_point; | ||
196 | |||
197 | whole = 0; | ||
198 | first_len = max; | ||
199 | |||
200 | /* Encourage Cookie-ECHO bundling. */ | ||
201 | if (asoc->state < SCTP_STATE_COOKIE_ECHOED) { | ||
202 | whole = msg_len / (max - SCTP_ARBITRARY_COOKIE_ECHO_LEN); | ||
203 | |||
204 | /* Account for the DATA to be bundled with the COOKIE-ECHO. */ | ||
205 | if (whole) { | ||
206 | first_len = max - SCTP_ARBITRARY_COOKIE_ECHO_LEN; | ||
207 | msg_len -= first_len; | ||
208 | whole = 1; | ||
209 | } | ||
210 | } | ||
211 | |||
212 | /* How many full sized? How many bytes leftover? */ | ||
213 | whole += msg_len / max; | ||
214 | over = msg_len % max; | ||
215 | offset = 0; | ||
216 | |||
217 | if ((whole > 1) || (whole && over)) | ||
218 | SCTP_INC_STATS_USER(SCTP_MIB_FRAGUSRMSGS); | ||
219 | |||
220 | /* Create chunks for all the full sized DATA chunks. */ | ||
221 | for (i=0, len=first_len; i < whole; i++) { | ||
222 | frag = SCTP_DATA_MIDDLE_FRAG; | ||
223 | |||
224 | if (0 == i) | ||
225 | frag |= SCTP_DATA_FIRST_FRAG; | ||
226 | |||
227 | if ((i == (whole - 1)) && !over) | ||
228 | frag |= SCTP_DATA_LAST_FRAG; | ||
229 | |||
230 | chunk = sctp_make_datafrag_empty(asoc, sinfo, len, frag, 0); | ||
231 | |||
232 | if (!chunk) | ||
233 | goto errout; | ||
234 | err = sctp_user_addto_chunk(chunk, offset, len, msgh->msg_iov); | ||
235 | if (err < 0) | ||
236 | goto errout; | ||
237 | |||
238 | offset += len; | ||
239 | |||
240 | /* Put the chunk->skb back into the form expected by send. */ | ||
241 | __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr | ||
242 | - (__u8 *)chunk->skb->data); | ||
243 | |||
244 | sctp_datamsg_assign(msg, chunk); | ||
245 | list_add_tail(&chunk->frag_list, &msg->chunks); | ||
246 | |||
247 | /* The first chunk, the first chunk was likely short | ||
248 | * to allow bundling, so reset to full size. | ||
249 | */ | ||
250 | if (0 == i) | ||
251 | len = max; | ||
252 | } | ||
253 | |||
254 | /* .. now the leftover bytes. */ | ||
255 | if (over) { | ||
256 | if (!whole) | ||
257 | frag = SCTP_DATA_NOT_FRAG; | ||
258 | else | ||
259 | frag = SCTP_DATA_LAST_FRAG; | ||
260 | |||
261 | chunk = sctp_make_datafrag_empty(asoc, sinfo, over, frag, 0); | ||
262 | |||
263 | if (!chunk) | ||
264 | goto errout; | ||
265 | |||
266 | err = sctp_user_addto_chunk(chunk, offset, over,msgh->msg_iov); | ||
267 | |||
268 | /* Put the chunk->skb back into the form expected by send. */ | ||
269 | __skb_pull(chunk->skb, (__u8 *)chunk->chunk_hdr | ||
270 | - (__u8 *)chunk->skb->data); | ||
271 | if (err < 0) | ||
272 | goto errout; | ||
273 | |||
274 | sctp_datamsg_assign(msg, chunk); | ||
275 | list_add_tail(&chunk->frag_list, &msg->chunks); | ||
276 | } | ||
277 | |||
278 | return msg; | ||
279 | |||
280 | errout: | ||
281 | list_for_each_safe(pos, temp, &msg->chunks) { | ||
282 | list_del_init(pos); | ||
283 | chunk = list_entry(pos, struct sctp_chunk, frag_list); | ||
284 | sctp_chunk_free(chunk); | ||
285 | } | ||
286 | sctp_datamsg_free(msg); | ||
287 | return NULL; | ||
288 | } | ||
289 | |||
290 | /* Check whether this message has expired. */ | ||
291 | int sctp_chunk_abandoned(struct sctp_chunk *chunk) | ||
292 | { | ||
293 | struct sctp_datamsg *msg = chunk->msg; | ||
294 | |||
295 | if (!msg->can_abandon) | ||
296 | return 0; | ||
297 | |||
298 | if (time_after(jiffies, msg->expires_at)) | ||
299 | return 1; | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | /* This chunk (and consequently entire message) has failed in its sending. */ | ||
305 | void sctp_chunk_fail(struct sctp_chunk *chunk, int error) | ||
306 | { | ||
307 | chunk->msg->send_failed = 1; | ||
308 | chunk->msg->send_error = error; | ||
309 | } | ||
diff --git a/net/sctp/command.c b/net/sctp/command.c new file mode 100644 index 000000000000..3ff804757f4a --- /dev/null +++ b/net/sctp/command.c | |||
@@ -0,0 +1,81 @@ | |||
1 | /* SCTP kernel reference Implementation Copyright (C) 1999-2001 | ||
2 | * Cisco, Motorola, and IBM | ||
3 | * Copyright 2001 La Monte H.P. Yarroll | ||
4 | * | ||
5 | * This file is part of the SCTP kernel reference Implementation | ||
6 | * | ||
7 | * These functions manipulate sctp command sequences. | ||
8 | * | ||
9 | * The SCTP reference implementation is free software; | ||
10 | * you can redistribute it and/or modify it under the terms of | ||
11 | * the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * The SCTP reference implementation is distributed in the hope that it | ||
16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
17 | * ************************ | ||
18 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | * See the GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with GNU CC; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
24 | * Boston, MA 02111-1307, USA. | ||
25 | * | ||
26 | * Please send any bug reports or fixes you make to the | ||
27 | * email address(es): | ||
28 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
29 | * | ||
30 | * Or submit a bug report through the following website: | ||
31 | * http://www.sf.net/projects/lksctp | ||
32 | * | ||
33 | * Written or modified by: | ||
34 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
35 | * Karl Knutson <karl@athena.chicago.il.us> | ||
36 | * | ||
37 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
38 | * be incorporated into the next SCTP release. | ||
39 | */ | ||
40 | |||
41 | #include <linux/types.h> | ||
42 | #include <net/sctp/sctp.h> | ||
43 | #include <net/sctp/sm.h> | ||
44 | |||
45 | /* Initialize a block of memory as a command sequence. */ | ||
46 | int sctp_init_cmd_seq(sctp_cmd_seq_t *seq) | ||
47 | { | ||
48 | memset(seq, 0, sizeof(sctp_cmd_seq_t)); | ||
49 | return 1; /* We always succeed. */ | ||
50 | } | ||
51 | |||
52 | /* Add a command to a sctp_cmd_seq_t. | ||
53 | * Return 0 if the command sequence is full. | ||
54 | */ | ||
55 | int sctp_add_cmd(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj) | ||
56 | { | ||
57 | if (seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS) | ||
58 | goto fail; | ||
59 | |||
60 | seq->cmds[seq->next_free_slot].verb = verb; | ||
61 | seq->cmds[seq->next_free_slot++].obj = obj; | ||
62 | |||
63 | return 1; | ||
64 | |||
65 | fail: | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | /* Return the next command structure in a sctp_cmd_seq. | ||
70 | * Returns NULL at the end of the sequence. | ||
71 | */ | ||
72 | sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq) | ||
73 | { | ||
74 | sctp_cmd_t *retval = NULL; | ||
75 | |||
76 | if (seq->next_cmd < seq->next_free_slot) | ||
77 | retval = &seq->cmds[seq->next_cmd++]; | ||
78 | |||
79 | return retval; | ||
80 | } | ||
81 | |||
diff --git a/net/sctp/crc32c.c b/net/sctp/crc32c.c new file mode 100644 index 000000000000..31f05ec8e1d3 --- /dev/null +++ b/net/sctp/crc32c.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
3 | * Copyright (c) 2001-2003 International Business Machines, Corp. | ||
4 | * | ||
5 | * This file is part of the SCTP kernel reference Implementation | ||
6 | * | ||
7 | * SCTP Checksum functions | ||
8 | * | ||
9 | * The SCTP reference implementation is free software; | ||
10 | * you can redistribute it and/or modify it under the terms of | ||
11 | * the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * The SCTP reference implementation is distributed in the hope that it | ||
16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
17 | * ************************ | ||
18 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | * See the GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with GNU CC; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
24 | * Boston, MA 02111-1307, USA. | ||
25 | * | ||
26 | * Please send any bug reports or fixes you make to the | ||
27 | * email address(es): | ||
28 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
29 | * | ||
30 | * Or submit a bug report through the following website: | ||
31 | * http://www.sf.net/projects/lksctp | ||
32 | * | ||
33 | * Written or modified by: | ||
34 | * Dinakaran Joseph | ||
35 | * Jon Grimm <jgrimm@us.ibm.com> | ||
36 | * Sridhar Samudrala <sri@us.ibm.com> | ||
37 | * | ||
38 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
39 | * be incorporated into the next SCTP release. | ||
40 | */ | ||
41 | |||
42 | /* The following code has been taken directly from | ||
43 | * draft-ietf-tsvwg-sctpcsum-03.txt | ||
44 | * | ||
45 | * The code has now been modified specifically for SCTP knowledge. | ||
46 | */ | ||
47 | |||
48 | #include <linux/types.h> | ||
49 | #include <net/sctp/sctp.h> | ||
50 | |||
51 | #define CRC32C_POLY 0x1EDC6F41 | ||
52 | #define CRC32C(c,d) (c=(c>>8)^crc_c[(c^(d))&0xFF]) | ||
53 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | ||
54 | /* Copyright 2001, D. Otis. Use this program, code or tables */ | ||
55 | /* extracted from it, as desired without restriction. */ | ||
56 | /* */ | ||
57 | /* 32 Bit Reflected CRC table generation for SCTP. */ | ||
58 | /* To accommodate serial byte data being shifted out least */ | ||
59 | /* significant bit first, the table's 32 bit words are reflected */ | ||
60 | /* which flips both byte and bit MS and LS positions. The CRC */ | ||
61 | /* is calculated MS bits first from the perspective of the serial*/ | ||
62 | /* stream. The x^32 term is implied and the x^0 term may also */ | ||
63 | /* be shown as +1. The polynomial code used is 0x1EDC6F41. */ | ||
64 | /* Castagnoli93 */ | ||
65 | /* x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+ */ | ||
66 | /* x^11+x^10+x^9+x^8+x^6+x^0 */ | ||
67 | /* Guy Castagnoli Stefan Braeuer and Martin Herrman */ | ||
68 | /* "Optimization of Cyclic Redundancy-Check Codes */ | ||
69 | /* with 24 and 32 Parity Bits", */ | ||
70 | /* IEEE Transactions on Communications, Vol.41, No.6, June 1993 */ | ||
71 | /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ | ||
72 | static const __u32 crc_c[256] = { | ||
73 | 0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, | ||
74 | 0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, | ||
75 | 0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B, | ||
76 | 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24, | ||
77 | 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B, | ||
78 | 0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, | ||
79 | 0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, | ||
80 | 0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B, | ||
81 | 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A, | ||
82 | 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35, | ||
83 | 0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, | ||
84 | 0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, | ||
85 | 0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45, | ||
86 | 0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A, | ||
87 | 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A, | ||
88 | 0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, | ||
89 | 0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48, | ||
90 | 0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957, | ||
91 | 0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687, | ||
92 | 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198, | ||
93 | 0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, | ||
94 | 0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38, | ||
95 | 0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8, | ||
96 | 0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7, | ||
97 | 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096, | ||
98 | 0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, | ||
99 | 0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859, | ||
100 | 0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46, | ||
101 | 0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9, | ||
102 | 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6, | ||
103 | 0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, | ||
104 | 0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, | ||
105 | 0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C, | ||
106 | 0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93, | ||
107 | 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043, | ||
108 | 0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, | ||
109 | 0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3, | ||
110 | 0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC, | ||
111 | 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C, | ||
112 | 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033, | ||
113 | 0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, | ||
114 | 0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D, | ||
115 | 0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D, | ||
116 | 0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982, | ||
117 | 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D, | ||
118 | 0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, | ||
119 | 0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, | ||
120 | 0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED, | ||
121 | 0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530, | ||
122 | 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F, | ||
123 | 0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, | ||
124 | 0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, | ||
125 | 0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F, | ||
126 | 0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540, | ||
127 | 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90, | ||
128 | 0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, | ||
129 | 0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE, | ||
130 | 0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1, | ||
131 | 0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321, | ||
132 | 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E, | ||
133 | 0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, | ||
134 | 0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E, | ||
135 | 0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E, | ||
136 | 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351, | ||
137 | }; | ||
138 | |||
139 | __u32 sctp_start_cksum(__u8 *buffer, __u16 length) | ||
140 | { | ||
141 | __u32 crc32 = ~(__u32) 0; | ||
142 | __u32 i; | ||
143 | |||
144 | /* Optimize this routine to be SCTP specific, knowing how | ||
145 | * to skip the checksum field of the SCTP header. | ||
146 | */ | ||
147 | |||
148 | /* Calculate CRC up to the checksum. */ | ||
149 | for (i = 0; i < (sizeof(struct sctphdr) - sizeof(__u32)); i++) | ||
150 | CRC32C(crc32, buffer[i]); | ||
151 | |||
152 | /* Skip checksum field of the header. */ | ||
153 | for (i = 0; i < sizeof(__u32); i++) | ||
154 | CRC32C(crc32, 0); | ||
155 | |||
156 | /* Calculate the rest of the CRC. */ | ||
157 | for (i = sizeof(struct sctphdr); i < length ; i++) | ||
158 | CRC32C(crc32, buffer[i]); | ||
159 | |||
160 | return crc32; | ||
161 | } | ||
162 | |||
163 | __u32 sctp_update_cksum(__u8 *buffer, __u16 length, __u32 crc32) | ||
164 | { | ||
165 | __u32 i; | ||
166 | |||
167 | for (i = 0; i < length ; i++) | ||
168 | CRC32C(crc32, buffer[i]); | ||
169 | |||
170 | return crc32; | ||
171 | } | ||
172 | |||
173 | __u32 sctp_update_copy_cksum(__u8 *to, __u8 *from, __u16 length, __u32 crc32) | ||
174 | { | ||
175 | __u32 i; | ||
176 | __u32 *_to = (__u32 *)to; | ||
177 | __u32 *_from = (__u32 *)from; | ||
178 | |||
179 | for (i = 0; i < (length/4); i++) { | ||
180 | _to[i] = _from[i]; | ||
181 | CRC32C(crc32, from[i*4]); | ||
182 | CRC32C(crc32, from[i*4+1]); | ||
183 | CRC32C(crc32, from[i*4+2]); | ||
184 | CRC32C(crc32, from[i*4+3]); | ||
185 | } | ||
186 | |||
187 | return crc32; | ||
188 | } | ||
189 | |||
190 | __u32 sctp_end_cksum(__u32 crc32) | ||
191 | { | ||
192 | __u32 result; | ||
193 | __u8 byte0, byte1, byte2, byte3; | ||
194 | |||
195 | result = ~crc32; | ||
196 | |||
197 | /* result now holds the negated polynomial remainder; | ||
198 | * since the table and algorithm is "reflected" [williams95]. | ||
199 | * That is, result has the same value as if we mapped the message | ||
200 | * to a polyomial, computed the host-bit-order polynomial | ||
201 | * remainder, performed final negation, then did an end-for-end | ||
202 | * bit-reversal. | ||
203 | * Note that a 32-bit bit-reversal is identical to four inplace | ||
204 | * 8-bit reversals followed by an end-for-end byteswap. | ||
205 | * In other words, the bytes of each bit are in the right order, | ||
206 | * but the bytes have been byteswapped. So we now do an explicit | ||
207 | * byteswap. On a little-endian machine, this byteswap and | ||
208 | * the final ntohl cancel out and could be elided. | ||
209 | */ | ||
210 | byte0 = result & 0xff; | ||
211 | byte1 = (result>>8) & 0xff; | ||
212 | byte2 = (result>>16) & 0xff; | ||
213 | byte3 = (result>>24) & 0xff; | ||
214 | |||
215 | crc32 = ((byte0 << 24) | | ||
216 | (byte1 << 16) | | ||
217 | (byte2 << 8) | | ||
218 | byte3); | ||
219 | return crc32; | ||
220 | } | ||
diff --git a/net/sctp/debug.c b/net/sctp/debug.c new file mode 100644 index 000000000000..aa8340373af7 --- /dev/null +++ b/net/sctp/debug.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * This file is part of the implementation of the add-IP extension, | ||
10 | * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001, | ||
11 | * for the SCTP kernel reference Implementation. | ||
12 | * | ||
13 | * This file converts numerical ID value to alphabetical names for SCTP | ||
14 | * terms such as chunk type, parameter time, event type, etc. | ||
15 | * | ||
16 | * The SCTP reference implementation is free software; | ||
17 | * you can redistribute it and/or modify it under the terms of | ||
18 | * the GNU General Public License as published by | ||
19 | * the Free Software Foundation; either version 2, or (at your option) | ||
20 | * any later version. | ||
21 | * | ||
22 | * The SCTP reference implementation is distributed in the hope that it | ||
23 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
24 | * ************************ | ||
25 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
26 | * See the GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with GNU CC; see the file COPYING. If not, write to | ||
30 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
31 | * Boston, MA 02111-1307, USA. | ||
32 | * | ||
33 | * Please send any bug reports or fixes you make to the | ||
34 | * email address(es): | ||
35 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
36 | * | ||
37 | * Or submit a bug report through the following website: | ||
38 | * http://www.sf.net/projects/lksctp | ||
39 | * | ||
40 | * Written or modified by: | ||
41 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
42 | * Karl Knutson <karl@athena.chicago.il.us> | ||
43 | * Xingang Guo <xingang.guo@intel.com> | ||
44 | * Jon Grimm <jgrimm@us.ibm.com> | ||
45 | * Daisy Chang <daisyc@us.ibm.com> | ||
46 | * Sridhar Samudrala <sri@us.ibm.com> | ||
47 | * | ||
48 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
49 | * be incorporated into the next SCTP release. | ||
50 | */ | ||
51 | |||
52 | #include <net/sctp/sctp.h> | ||
53 | |||
54 | #if SCTP_DEBUG | ||
55 | int sctp_debug_flag = 1; /* Initially enable DEBUG */ | ||
56 | #endif /* SCTP_DEBUG */ | ||
57 | |||
58 | /* These are printable forms of Chunk ID's from section 3.1. */ | ||
59 | static const char *sctp_cid_tbl[SCTP_NUM_BASE_CHUNK_TYPES] = { | ||
60 | "DATA", | ||
61 | "INIT", | ||
62 | "INIT_ACK", | ||
63 | "SACK", | ||
64 | "HEARTBEAT", | ||
65 | "HEARTBEAT_ACK", | ||
66 | "ABORT", | ||
67 | "SHUTDOWN", | ||
68 | "SHUTDOWN_ACK", | ||
69 | "ERROR", | ||
70 | "COOKIE_ECHO", | ||
71 | "COOKIE_ACK", | ||
72 | "ECN_ECNE", | ||
73 | "ECN_CWR", | ||
74 | "SHUTDOWN_COMPLETE", | ||
75 | }; | ||
76 | |||
77 | /* Lookup "chunk type" debug name. */ | ||
78 | const char *sctp_cname(const sctp_subtype_t cid) | ||
79 | { | ||
80 | if (cid.chunk < 0) | ||
81 | return "illegal chunk id"; | ||
82 | if (cid.chunk <= SCTP_CID_BASE_MAX) | ||
83 | return sctp_cid_tbl[cid.chunk]; | ||
84 | |||
85 | switch (cid.chunk) { | ||
86 | case SCTP_CID_ASCONF: | ||
87 | return "ASCONF"; | ||
88 | |||
89 | case SCTP_CID_ASCONF_ACK: | ||
90 | return "ASCONF_ACK"; | ||
91 | |||
92 | case SCTP_CID_FWD_TSN: | ||
93 | return "FWD_TSN"; | ||
94 | |||
95 | default: | ||
96 | return "unknown chunk"; | ||
97 | }; | ||
98 | return "unknown chunk"; | ||
99 | } | ||
100 | |||
101 | /* These are printable forms of the states. */ | ||
102 | const char *sctp_state_tbl[SCTP_STATE_NUM_STATES] = { | ||
103 | "STATE_EMPTY", | ||
104 | "STATE_CLOSED", | ||
105 | "STATE_COOKIE_WAIT", | ||
106 | "STATE_COOKIE_ECHOED", | ||
107 | "STATE_ESTABLISHED", | ||
108 | "STATE_SHUTDOWN_PENDING", | ||
109 | "STATE_SHUTDOWN_SENT", | ||
110 | "STATE_SHUTDOWN_RECEIVED", | ||
111 | "STATE_SHUTDOWN_ACK_SENT", | ||
112 | }; | ||
113 | |||
114 | /* Events that could change the state of an association. */ | ||
115 | const char *sctp_evttype_tbl[] = { | ||
116 | "EVENT_T_unknown", | ||
117 | "EVENT_T_CHUNK", | ||
118 | "EVENT_T_TIMEOUT", | ||
119 | "EVENT_T_OTHER", | ||
120 | "EVENT_T_PRIMITIVE" | ||
121 | }; | ||
122 | |||
123 | /* Return value of a state function */ | ||
124 | const char *sctp_status_tbl[] = { | ||
125 | "DISPOSITION_DISCARD", | ||
126 | "DISPOSITION_CONSUME", | ||
127 | "DISPOSITION_NOMEM", | ||
128 | "DISPOSITION_DELETE_TCB", | ||
129 | "DISPOSITION_ABORT", | ||
130 | "DISPOSITION_VIOLATION", | ||
131 | "DISPOSITION_NOT_IMPL", | ||
132 | "DISPOSITION_ERROR", | ||
133 | "DISPOSITION_BUG" | ||
134 | }; | ||
135 | |||
136 | /* Printable forms of primitives */ | ||
137 | static const char *sctp_primitive_tbl[SCTP_NUM_PRIMITIVE_TYPES] = { | ||
138 | "PRIMITIVE_ASSOCIATE", | ||
139 | "PRIMITIVE_SHUTDOWN", | ||
140 | "PRIMITIVE_ABORT", | ||
141 | "PRIMITIVE_SEND", | ||
142 | "PRIMITIVE_REQUESTHEARTBEAT", | ||
143 | }; | ||
144 | |||
145 | /* Lookup primitive debug name. */ | ||
146 | const char *sctp_pname(const sctp_subtype_t id) | ||
147 | { | ||
148 | if (id.primitive < 0) | ||
149 | return "illegal primitive"; | ||
150 | if (id.primitive <= SCTP_EVENT_PRIMITIVE_MAX) | ||
151 | return sctp_primitive_tbl[id.primitive]; | ||
152 | return "unknown_primitive"; | ||
153 | } | ||
154 | |||
155 | static const char *sctp_other_tbl[] = { | ||
156 | "NO_PENDING_TSN", | ||
157 | "ICMP_PROTO_UNREACH", | ||
158 | }; | ||
159 | |||
160 | /* Lookup "other" debug name. */ | ||
161 | const char *sctp_oname(const sctp_subtype_t id) | ||
162 | { | ||
163 | if (id.other < 0) | ||
164 | return "illegal 'other' event"; | ||
165 | if (id.other <= SCTP_EVENT_OTHER_MAX) | ||
166 | return sctp_other_tbl[id.other]; | ||
167 | return "unknown 'other' event"; | ||
168 | } | ||
169 | |||
170 | static const char *sctp_timer_tbl[] = { | ||
171 | "TIMEOUT_NONE", | ||
172 | "TIMEOUT_T1_COOKIE", | ||
173 | "TIMEOUT_T1_INIT", | ||
174 | "TIMEOUT_T2_SHUTDOWN", | ||
175 | "TIMEOUT_T3_RTX", | ||
176 | "TIMEOUT_T4_RTO", | ||
177 | "TIMEOUT_T5_SHUTDOWN_GUARD", | ||
178 | "TIMEOUT_HEARTBEAT", | ||
179 | "TIMEOUT_SACK", | ||
180 | "TIMEOUT_AUTOCLOSE", | ||
181 | }; | ||
182 | |||
183 | /* Lookup timer debug name. */ | ||
184 | const char *sctp_tname(const sctp_subtype_t id) | ||
185 | { | ||
186 | if (id.timeout < 0) | ||
187 | return "illegal 'timer' event"; | ||
188 | if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX) | ||
189 | return sctp_timer_tbl[id.timeout]; | ||
190 | return "unknown_timer"; | ||
191 | } | ||
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c new file mode 100644 index 000000000000..544b75077dbd --- /dev/null +++ b/net/sctp/endpointola.c | |||
@@ -0,0 +1,389 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
3 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
4 | * Copyright (c) 2001-2002 International Business Machines, Corp. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * This file is part of the SCTP kernel reference Implementation | ||
10 | * | ||
11 | * This abstraction represents an SCTP endpoint. | ||
12 | * | ||
13 | * This file is part of the implementation of the add-IP extension, | ||
14 | * based on <draft-ietf-tsvwg-addip-sctp-02.txt> June 29, 2001, | ||
15 | * for the SCTP kernel reference Implementation. | ||
16 | * | ||
17 | * The SCTP reference implementation is free software; | ||
18 | * you can redistribute it and/or modify it under the terms of | ||
19 | * the GNU General Public License as published by | ||
20 | * the Free Software Foundation; either version 2, or (at your option) | ||
21 | * any later version. | ||
22 | * | ||
23 | * The SCTP reference implementation is distributed in the hope that it | ||
24 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
25 | * ************************ | ||
26 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
27 | * See the GNU General Public License for more details. | ||
28 | * | ||
29 | * You should have received a copy of the GNU General Public License | ||
30 | * along with GNU CC; see the file COPYING. If not, write to | ||
31 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
32 | * Boston, MA 02111-1307, USA. | ||
33 | * | ||
34 | * Please send any bug reports or fixes you make to the | ||
35 | * email address(es): | ||
36 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
37 | * | ||
38 | * Or submit a bug report through the following website: | ||
39 | * http://www.sf.net/projects/lksctp | ||
40 | * | ||
41 | * Written or modified by: | ||
42 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
43 | * Karl Knutson <karl@athena.chicago.il.us> | ||
44 | * Jon Grimm <jgrimm@austin.ibm.com> | ||
45 | * Daisy Chang <daisyc@us.ibm.com> | ||
46 | * Dajiang Zhang <dajiang.zhang@nokia.com> | ||
47 | * | ||
48 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
49 | * be incorporated into the next SCTP release. | ||
50 | */ | ||
51 | |||
52 | #include <linux/types.h> | ||
53 | #include <linux/sched.h> | ||
54 | #include <linux/slab.h> | ||
55 | #include <linux/in.h> | ||
56 | #include <linux/random.h> /* get_random_bytes() */ | ||
57 | #include <linux/crypto.h> | ||
58 | #include <net/sock.h> | ||
59 | #include <net/ipv6.h> | ||
60 | #include <net/sctp/sctp.h> | ||
61 | #include <net/sctp/sm.h> | ||
62 | |||
63 | /* Forward declarations for internal helpers. */ | ||
64 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep); | ||
65 | |||
66 | /* | ||
67 | * Initialize the base fields of the endpoint structure. | ||
68 | */ | ||
69 | static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep, | ||
70 | struct sock *sk, int gfp) | ||
71 | { | ||
72 | struct sctp_sock *sp = sctp_sk(sk); | ||
73 | memset(ep, 0, sizeof(struct sctp_endpoint)); | ||
74 | |||
75 | /* Initialize the base structure. */ | ||
76 | /* What type of endpoint are we? */ | ||
77 | ep->base.type = SCTP_EP_TYPE_SOCKET; | ||
78 | |||
79 | /* Initialize the basic object fields. */ | ||
80 | atomic_set(&ep->base.refcnt, 1); | ||
81 | ep->base.dead = 0; | ||
82 | ep->base.malloced = 1; | ||
83 | |||
84 | /* Create an input queue. */ | ||
85 | sctp_inq_init(&ep->base.inqueue); | ||
86 | |||
87 | /* Set its top-half handler */ | ||
88 | sctp_inq_set_th_handler(&ep->base.inqueue, | ||
89 | (void (*)(void *))sctp_endpoint_bh_rcv, ep); | ||
90 | |||
91 | /* Initialize the bind addr area */ | ||
92 | sctp_bind_addr_init(&ep->base.bind_addr, 0); | ||
93 | rwlock_init(&ep->base.addr_lock); | ||
94 | |||
95 | /* Remember who we are attached to. */ | ||
96 | ep->base.sk = sk; | ||
97 | sock_hold(ep->base.sk); | ||
98 | |||
99 | /* Create the lists of associations. */ | ||
100 | INIT_LIST_HEAD(&ep->asocs); | ||
101 | |||
102 | /* Set up the base timeout information. */ | ||
103 | ep->timeouts[SCTP_EVENT_TIMEOUT_NONE] = 0; | ||
104 | ep->timeouts[SCTP_EVENT_TIMEOUT_T1_COOKIE] = | ||
105 | SCTP_DEFAULT_TIMEOUT_T1_COOKIE; | ||
106 | ep->timeouts[SCTP_EVENT_TIMEOUT_T1_INIT] = | ||
107 | SCTP_DEFAULT_TIMEOUT_T1_INIT; | ||
108 | ep->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = | ||
109 | msecs_to_jiffies(sp->rtoinfo.srto_initial); | ||
110 | ep->timeouts[SCTP_EVENT_TIMEOUT_T3_RTX] = 0; | ||
111 | ep->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = 0; | ||
112 | |||
113 | /* sctpimpguide-05 Section 2.12.2 | ||
114 | * If the 'T5-shutdown-guard' timer is used, it SHOULD be set to the | ||
115 | * recommended value of 5 times 'RTO.Max'. | ||
116 | */ | ||
117 | ep->timeouts[SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD] | ||
118 | = 5 * msecs_to_jiffies(sp->rtoinfo.srto_max); | ||
119 | |||
120 | ep->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = | ||
121 | SCTP_DEFAULT_TIMEOUT_HEARTBEAT; | ||
122 | ep->timeouts[SCTP_EVENT_TIMEOUT_SACK] = | ||
123 | SCTP_DEFAULT_TIMEOUT_SACK; | ||
124 | ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = | ||
125 | sp->autoclose * HZ; | ||
126 | |||
127 | /* Use SCTP specific send buffer space queues. */ | ||
128 | sk->sk_write_space = sctp_write_space; | ||
129 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | ||
130 | |||
131 | /* Initialize the secret key used with cookie. */ | ||
132 | get_random_bytes(&ep->secret_key[0], SCTP_SECRET_SIZE); | ||
133 | ep->last_key = ep->current_key = 0; | ||
134 | ep->key_changed_at = jiffies; | ||
135 | |||
136 | ep->debug_name = "unnamedEndpoint"; | ||
137 | return ep; | ||
138 | } | ||
139 | |||
140 | /* Create a sctp_endpoint with all that boring stuff initialized. | ||
141 | * Returns NULL if there isn't enough memory. | ||
142 | */ | ||
143 | struct sctp_endpoint *sctp_endpoint_new(struct sock *sk, int gfp) | ||
144 | { | ||
145 | struct sctp_endpoint *ep; | ||
146 | |||
147 | /* Build a local endpoint. */ | ||
148 | ep = t_new(struct sctp_endpoint, gfp); | ||
149 | if (!ep) | ||
150 | goto fail; | ||
151 | if (!sctp_endpoint_init(ep, sk, gfp)) | ||
152 | goto fail_init; | ||
153 | ep->base.malloced = 1; | ||
154 | SCTP_DBG_OBJCNT_INC(ep); | ||
155 | return ep; | ||
156 | |||
157 | fail_init: | ||
158 | kfree(ep); | ||
159 | fail: | ||
160 | return NULL; | ||
161 | } | ||
162 | |||
163 | /* Add an association to an endpoint. */ | ||
164 | void sctp_endpoint_add_asoc(struct sctp_endpoint *ep, | ||
165 | struct sctp_association *asoc) | ||
166 | { | ||
167 | struct sock *sk = ep->base.sk; | ||
168 | |||
169 | /* Now just add it to our list of asocs */ | ||
170 | list_add_tail(&asoc->asocs, &ep->asocs); | ||
171 | |||
172 | /* Increment the backlog value for a TCP-style listening socket. */ | ||
173 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) | ||
174 | sk->sk_ack_backlog++; | ||
175 | } | ||
176 | |||
177 | /* Free the endpoint structure. Delay cleanup until | ||
178 | * all users have released their reference count on this structure. | ||
179 | */ | ||
180 | void sctp_endpoint_free(struct sctp_endpoint *ep) | ||
181 | { | ||
182 | ep->base.dead = 1; | ||
183 | sctp_endpoint_put(ep); | ||
184 | } | ||
185 | |||
186 | /* Final destructor for endpoint. */ | ||
187 | static void sctp_endpoint_destroy(struct sctp_endpoint *ep) | ||
188 | { | ||
189 | SCTP_ASSERT(ep->base.dead, "Endpoint is not dead", return); | ||
190 | |||
191 | ep->base.sk->sk_state = SCTP_SS_CLOSED; | ||
192 | |||
193 | /* Unlink this endpoint, so we can't find it again! */ | ||
194 | sctp_unhash_endpoint(ep); | ||
195 | |||
196 | /* Free up the HMAC transform. */ | ||
197 | if (sctp_sk(ep->base.sk)->hmac) | ||
198 | sctp_crypto_free_tfm(sctp_sk(ep->base.sk)->hmac); | ||
199 | |||
200 | /* Cleanup. */ | ||
201 | sctp_inq_free(&ep->base.inqueue); | ||
202 | sctp_bind_addr_free(&ep->base.bind_addr); | ||
203 | |||
204 | /* Remove and free the port */ | ||
205 | if (sctp_sk(ep->base.sk)->bind_hash) | ||
206 | sctp_put_port(ep->base.sk); | ||
207 | |||
208 | /* Give up our hold on the sock. */ | ||
209 | if (ep->base.sk) | ||
210 | sock_put(ep->base.sk); | ||
211 | |||
212 | /* Finally, free up our memory. */ | ||
213 | if (ep->base.malloced) { | ||
214 | kfree(ep); | ||
215 | SCTP_DBG_OBJCNT_DEC(ep); | ||
216 | } | ||
217 | } | ||
218 | |||
219 | /* Hold a reference to an endpoint. */ | ||
220 | void sctp_endpoint_hold(struct sctp_endpoint *ep) | ||
221 | { | ||
222 | atomic_inc(&ep->base.refcnt); | ||
223 | } | ||
224 | |||
225 | /* Release a reference to an endpoint and clean up if there are | ||
226 | * no more references. | ||
227 | */ | ||
228 | void sctp_endpoint_put(struct sctp_endpoint *ep) | ||
229 | { | ||
230 | if (atomic_dec_and_test(&ep->base.refcnt)) | ||
231 | sctp_endpoint_destroy(ep); | ||
232 | } | ||
233 | |||
234 | /* Is this the endpoint we are looking for? */ | ||
235 | struct sctp_endpoint *sctp_endpoint_is_match(struct sctp_endpoint *ep, | ||
236 | const union sctp_addr *laddr) | ||
237 | { | ||
238 | struct sctp_endpoint *retval; | ||
239 | |||
240 | sctp_read_lock(&ep->base.addr_lock); | ||
241 | if (ep->base.bind_addr.port == laddr->v4.sin_port) { | ||
242 | if (sctp_bind_addr_match(&ep->base.bind_addr, laddr, | ||
243 | sctp_sk(ep->base.sk))) { | ||
244 | retval = ep; | ||
245 | goto out; | ||
246 | } | ||
247 | } | ||
248 | |||
249 | retval = NULL; | ||
250 | |||
251 | out: | ||
252 | sctp_read_unlock(&ep->base.addr_lock); | ||
253 | return retval; | ||
254 | } | ||
255 | |||
256 | /* Find the association that goes with this chunk. | ||
257 | * We do a linear search of the associations for this endpoint. | ||
258 | * We return the matching transport address too. | ||
259 | */ | ||
260 | static struct sctp_association *__sctp_endpoint_lookup_assoc( | ||
261 | const struct sctp_endpoint *ep, | ||
262 | const union sctp_addr *paddr, | ||
263 | struct sctp_transport **transport) | ||
264 | { | ||
265 | int rport; | ||
266 | struct sctp_association *asoc; | ||
267 | struct list_head *pos; | ||
268 | |||
269 | rport = paddr->v4.sin_port; | ||
270 | |||
271 | list_for_each(pos, &ep->asocs) { | ||
272 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
273 | if (rport == asoc->peer.port) { | ||
274 | sctp_read_lock(&asoc->base.addr_lock); | ||
275 | *transport = sctp_assoc_lookup_paddr(asoc, paddr); | ||
276 | sctp_read_unlock(&asoc->base.addr_lock); | ||
277 | |||
278 | if (*transport) | ||
279 | return asoc; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | *transport = NULL; | ||
284 | return NULL; | ||
285 | } | ||
286 | |||
287 | /* Lookup association on an endpoint based on a peer address. BH-safe. */ | ||
288 | struct sctp_association *sctp_endpoint_lookup_assoc( | ||
289 | const struct sctp_endpoint *ep, | ||
290 | const union sctp_addr *paddr, | ||
291 | struct sctp_transport **transport) | ||
292 | { | ||
293 | struct sctp_association *asoc; | ||
294 | |||
295 | sctp_local_bh_disable(); | ||
296 | asoc = __sctp_endpoint_lookup_assoc(ep, paddr, transport); | ||
297 | sctp_local_bh_enable(); | ||
298 | |||
299 | return asoc; | ||
300 | } | ||
301 | |||
302 | /* Look for any peeled off association from the endpoint that matches the | ||
303 | * given peer address. | ||
304 | */ | ||
305 | int sctp_endpoint_is_peeled_off(struct sctp_endpoint *ep, | ||
306 | const union sctp_addr *paddr) | ||
307 | { | ||
308 | struct list_head *pos; | ||
309 | struct sctp_sockaddr_entry *addr; | ||
310 | struct sctp_bind_addr *bp; | ||
311 | |||
312 | sctp_read_lock(&ep->base.addr_lock); | ||
313 | bp = &ep->base.bind_addr; | ||
314 | list_for_each(pos, &bp->address_list) { | ||
315 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
316 | if (sctp_has_association(&addr->a, paddr)) { | ||
317 | sctp_read_unlock(&ep->base.addr_lock); | ||
318 | return 1; | ||
319 | } | ||
320 | } | ||
321 | sctp_read_unlock(&ep->base.addr_lock); | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
326 | /* Do delayed input processing. This is scheduled by sctp_rcv(). | ||
327 | * This may be called on BH or task time. | ||
328 | */ | ||
329 | static void sctp_endpoint_bh_rcv(struct sctp_endpoint *ep) | ||
330 | { | ||
331 | struct sctp_association *asoc; | ||
332 | struct sock *sk; | ||
333 | struct sctp_transport *transport; | ||
334 | struct sctp_chunk *chunk; | ||
335 | struct sctp_inq *inqueue; | ||
336 | sctp_subtype_t subtype; | ||
337 | sctp_state_t state; | ||
338 | int error = 0; | ||
339 | |||
340 | if (ep->base.dead) | ||
341 | return; | ||
342 | |||
343 | asoc = NULL; | ||
344 | inqueue = &ep->base.inqueue; | ||
345 | sk = ep->base.sk; | ||
346 | |||
347 | while (NULL != (chunk = sctp_inq_pop(inqueue))) { | ||
348 | subtype = SCTP_ST_CHUNK(chunk->chunk_hdr->type); | ||
349 | |||
350 | /* We might have grown an association since last we | ||
351 | * looked, so try again. | ||
352 | * | ||
353 | * This happens when we've just processed our | ||
354 | * COOKIE-ECHO chunk. | ||
355 | */ | ||
356 | if (NULL == chunk->asoc) { | ||
357 | asoc = sctp_endpoint_lookup_assoc(ep, | ||
358 | sctp_source(chunk), | ||
359 | &transport); | ||
360 | chunk->asoc = asoc; | ||
361 | chunk->transport = transport; | ||
362 | } | ||
363 | |||
364 | state = asoc ? asoc->state : SCTP_STATE_CLOSED; | ||
365 | |||
366 | /* Remember where the last DATA chunk came from so we | ||
367 | * know where to send the SACK. | ||
368 | */ | ||
369 | if (asoc && sctp_chunk_is_data(chunk)) | ||
370 | asoc->peer.last_data_from = chunk->transport; | ||
371 | else | ||
372 | SCTP_INC_STATS(SCTP_MIB_INCTRLCHUNKS); | ||
373 | |||
374 | if (chunk->transport) | ||
375 | chunk->transport->last_time_heard = jiffies; | ||
376 | |||
377 | error = sctp_do_sm(SCTP_EVENT_T_CHUNK, subtype, state, | ||
378 | ep, asoc, chunk, GFP_ATOMIC); | ||
379 | |||
380 | if (error && chunk) | ||
381 | chunk->pdiscard = 1; | ||
382 | |||
383 | /* Check to see if the endpoint is freed in response to | ||
384 | * the incoming chunk. If so, get out of the while loop. | ||
385 | */ | ||
386 | if (!sctp_sk(sk)->ep) | ||
387 | break; | ||
388 | } | ||
389 | } | ||
diff --git a/net/sctp/input.c b/net/sctp/input.c new file mode 100644 index 000000000000..b719a77d66b4 --- /dev/null +++ b/net/sctp/input.c | |||
@@ -0,0 +1,913 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
3 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
4 | * Copyright (c) 2001-2003 International Business Machines, Corp. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * This file is part of the SCTP kernel reference Implementation | ||
10 | * | ||
11 | * These functions handle all input from the IP layer into SCTP. | ||
12 | * | ||
13 | * The SCTP reference implementation is free software; | ||
14 | * you can redistribute it and/or modify it under the terms of | ||
15 | * the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The SCTP reference implementation is distributed in the hope that it | ||
20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
21 | * ************************ | ||
22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
23 | * See the GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with GNU CC; see the file COPYING. If not, write to | ||
27 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
28 | * Boston, MA 02111-1307, USA. | ||
29 | * | ||
30 | * Please send any bug reports or fixes you make to the | ||
31 | * email address(es): | ||
32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
33 | * | ||
34 | * Or submit a bug report through the following website: | ||
35 | * http://www.sf.net/projects/lksctp | ||
36 | * | ||
37 | * Written or modified by: | ||
38 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
39 | * Karl Knutson <karl@athena.chicago.il.us> | ||
40 | * Xingang Guo <xingang.guo@intel.com> | ||
41 | * Jon Grimm <jgrimm@us.ibm.com> | ||
42 | * Hui Huang <hui.huang@nokia.com> | ||
43 | * Daisy Chang <daisyc@us.ibm.com> | ||
44 | * Sridhar Samudrala <sri@us.ibm.com> | ||
45 | * Ardelle Fan <ardelle.fan@intel.com> | ||
46 | * | ||
47 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
48 | * be incorporated into the next SCTP release. | ||
49 | */ | ||
50 | |||
51 | #include <linux/types.h> | ||
52 | #include <linux/list.h> /* For struct list_head */ | ||
53 | #include <linux/socket.h> | ||
54 | #include <linux/ip.h> | ||
55 | #include <linux/time.h> /* For struct timeval */ | ||
56 | #include <net/ip.h> | ||
57 | #include <net/icmp.h> | ||
58 | #include <net/snmp.h> | ||
59 | #include <net/sock.h> | ||
60 | #include <net/xfrm.h> | ||
61 | #include <net/sctp/sctp.h> | ||
62 | #include <net/sctp/sm.h> | ||
63 | |||
64 | /* Forward declarations for internal helpers. */ | ||
65 | static int sctp_rcv_ootb(struct sk_buff *); | ||
66 | static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, | ||
67 | const union sctp_addr *laddr, | ||
68 | const union sctp_addr *paddr, | ||
69 | struct sctp_transport **transportp); | ||
70 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr); | ||
71 | static struct sctp_association *__sctp_lookup_association( | ||
72 | const union sctp_addr *local, | ||
73 | const union sctp_addr *peer, | ||
74 | struct sctp_transport **pt); | ||
75 | |||
76 | |||
77 | /* Calculate the SCTP checksum of an SCTP packet. */ | ||
78 | static inline int sctp_rcv_checksum(struct sk_buff *skb) | ||
79 | { | ||
80 | struct sctphdr *sh; | ||
81 | __u32 cmp, val; | ||
82 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | ||
83 | |||
84 | sh = (struct sctphdr *) skb->h.raw; | ||
85 | cmp = ntohl(sh->checksum); | ||
86 | |||
87 | val = sctp_start_cksum((__u8 *)sh, skb_headlen(skb)); | ||
88 | |||
89 | for (; list; list = list->next) | ||
90 | val = sctp_update_cksum((__u8 *)list->data, skb_headlen(list), | ||
91 | val); | ||
92 | |||
93 | val = sctp_end_cksum(val); | ||
94 | |||
95 | if (val != cmp) { | ||
96 | /* CRC failure, dump it. */ | ||
97 | SCTP_INC_STATS_BH(SCTP_MIB_CHECKSUMERRORS); | ||
98 | return -1; | ||
99 | } | ||
100 | return 0; | ||
101 | } | ||
102 | |||
103 | /* The free routine for skbuffs that sctp receives */ | ||
104 | static void sctp_rfree(struct sk_buff *skb) | ||
105 | { | ||
106 | atomic_sub(sizeof(struct sctp_chunk),&skb->sk->sk_rmem_alloc); | ||
107 | sock_rfree(skb); | ||
108 | } | ||
109 | |||
110 | /* The ownership wrapper routine to do receive buffer accounting */ | ||
111 | static void sctp_rcv_set_owner_r(struct sk_buff *skb, struct sock *sk) | ||
112 | { | ||
113 | skb_set_owner_r(skb,sk); | ||
114 | skb->destructor = sctp_rfree; | ||
115 | atomic_add(sizeof(struct sctp_chunk),&sk->sk_rmem_alloc); | ||
116 | } | ||
117 | |||
118 | /* | ||
119 | * This is the routine which IP calls when receiving an SCTP packet. | ||
120 | */ | ||
121 | int sctp_rcv(struct sk_buff *skb) | ||
122 | { | ||
123 | struct sock *sk; | ||
124 | struct sctp_association *asoc; | ||
125 | struct sctp_endpoint *ep = NULL; | ||
126 | struct sctp_ep_common *rcvr; | ||
127 | struct sctp_transport *transport = NULL; | ||
128 | struct sctp_chunk *chunk; | ||
129 | struct sctphdr *sh; | ||
130 | union sctp_addr src; | ||
131 | union sctp_addr dest; | ||
132 | int family; | ||
133 | struct sctp_af *af; | ||
134 | int ret = 0; | ||
135 | |||
136 | if (skb->pkt_type!=PACKET_HOST) | ||
137 | goto discard_it; | ||
138 | |||
139 | SCTP_INC_STATS_BH(SCTP_MIB_INSCTPPACKS); | ||
140 | |||
141 | sh = (struct sctphdr *) skb->h.raw; | ||
142 | |||
143 | /* Pull up the IP and SCTP headers. */ | ||
144 | __skb_pull(skb, skb->h.raw - skb->data); | ||
145 | if (skb->len < sizeof(struct sctphdr)) | ||
146 | goto discard_it; | ||
147 | if (sctp_rcv_checksum(skb) < 0) | ||
148 | goto discard_it; | ||
149 | |||
150 | skb_pull(skb, sizeof(struct sctphdr)); | ||
151 | |||
152 | /* Make sure we at least have chunk headers worth of data left. */ | ||
153 | if (skb->len < sizeof(struct sctp_chunkhdr)) | ||
154 | goto discard_it; | ||
155 | |||
156 | family = ipver2af(skb->nh.iph->version); | ||
157 | af = sctp_get_af_specific(family); | ||
158 | if (unlikely(!af)) | ||
159 | goto discard_it; | ||
160 | |||
161 | /* Initialize local addresses for lookups. */ | ||
162 | af->from_skb(&src, skb, 1); | ||
163 | af->from_skb(&dest, skb, 0); | ||
164 | |||
165 | /* If the packet is to or from a non-unicast address, | ||
166 | * silently discard the packet. | ||
167 | * | ||
168 | * This is not clearly defined in the RFC except in section | ||
169 | * 8.4 - OOTB handling. However, based on the book "Stream Control | ||
170 | * Transmission Protocol" 2.1, "It is important to note that the | ||
171 | * IP address of an SCTP transport address must be a routable | ||
172 | * unicast address. In other words, IP multicast addresses and | ||
173 | * IP broadcast addresses cannot be used in an SCTP transport | ||
174 | * address." | ||
175 | */ | ||
176 | if (!af->addr_valid(&src, NULL) || !af->addr_valid(&dest, NULL)) | ||
177 | goto discard_it; | ||
178 | |||
179 | asoc = __sctp_rcv_lookup(skb, &src, &dest, &transport); | ||
180 | |||
181 | /* | ||
182 | * RFC 2960, 8.4 - Handle "Out of the blue" Packets. | ||
183 | * An SCTP packet is called an "out of the blue" (OOTB) | ||
184 | * packet if it is correctly formed, i.e., passed the | ||
185 | * receiver's checksum check, but the receiver is not | ||
186 | * able to identify the association to which this | ||
187 | * packet belongs. | ||
188 | */ | ||
189 | if (!asoc) { | ||
190 | ep = __sctp_rcv_lookup_endpoint(&dest); | ||
191 | if (sctp_rcv_ootb(skb)) { | ||
192 | SCTP_INC_STATS_BH(SCTP_MIB_OUTOFBLUES); | ||
193 | goto discard_release; | ||
194 | } | ||
195 | } | ||
196 | |||
197 | /* Retrieve the common input handling substructure. */ | ||
198 | rcvr = asoc ? &asoc->base : &ep->base; | ||
199 | sk = rcvr->sk; | ||
200 | |||
201 | if ((sk) && (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)) { | ||
202 | goto discard_release; | ||
203 | } | ||
204 | |||
205 | |||
206 | /* SCTP seems to always need a timestamp right now (FIXME) */ | ||
207 | if (skb->stamp.tv_sec == 0) { | ||
208 | do_gettimeofday(&skb->stamp); | ||
209 | sock_enable_timestamp(sk); | ||
210 | } | ||
211 | |||
212 | if (!xfrm_policy_check(sk, XFRM_POLICY_IN, skb, family)) | ||
213 | goto discard_release; | ||
214 | |||
215 | ret = sk_filter(sk, skb, 1); | ||
216 | if (ret) | ||
217 | goto discard_release; | ||
218 | |||
219 | /* Create an SCTP packet structure. */ | ||
220 | chunk = sctp_chunkify(skb, asoc, sk); | ||
221 | if (!chunk) { | ||
222 | ret = -ENOMEM; | ||
223 | goto discard_release; | ||
224 | } | ||
225 | |||
226 | sctp_rcv_set_owner_r(skb,sk); | ||
227 | |||
228 | /* Remember what endpoint is to handle this packet. */ | ||
229 | chunk->rcvr = rcvr; | ||
230 | |||
231 | /* Remember the SCTP header. */ | ||
232 | chunk->sctp_hdr = sh; | ||
233 | |||
234 | /* Set the source and destination addresses of the incoming chunk. */ | ||
235 | sctp_init_addrs(chunk, &src, &dest); | ||
236 | |||
237 | /* Remember where we came from. */ | ||
238 | chunk->transport = transport; | ||
239 | |||
240 | /* Acquire access to the sock lock. Note: We are safe from other | ||
241 | * bottom halves on this lock, but a user may be in the lock too, | ||
242 | * so check if it is busy. | ||
243 | */ | ||
244 | sctp_bh_lock_sock(sk); | ||
245 | |||
246 | if (sock_owned_by_user(sk)) | ||
247 | sk_add_backlog(sk, (struct sk_buff *) chunk); | ||
248 | else | ||
249 | sctp_backlog_rcv(sk, (struct sk_buff *) chunk); | ||
250 | |||
251 | /* Release the sock and any reference counts we took in the | ||
252 | * lookup calls. | ||
253 | */ | ||
254 | sctp_bh_unlock_sock(sk); | ||
255 | if (asoc) | ||
256 | sctp_association_put(asoc); | ||
257 | else | ||
258 | sctp_endpoint_put(ep); | ||
259 | sock_put(sk); | ||
260 | return ret; | ||
261 | |||
262 | discard_it: | ||
263 | kfree_skb(skb); | ||
264 | return ret; | ||
265 | |||
266 | discard_release: | ||
267 | /* Release any structures we may be holding. */ | ||
268 | if (asoc) { | ||
269 | sock_put(asoc->base.sk); | ||
270 | sctp_association_put(asoc); | ||
271 | } else { | ||
272 | sock_put(ep->base.sk); | ||
273 | sctp_endpoint_put(ep); | ||
274 | } | ||
275 | |||
276 | goto discard_it; | ||
277 | } | ||
278 | |||
279 | /* Handle second half of inbound skb processing. If the sock was busy, | ||
280 | * we may have need to delay processing until later when the sock is | ||
281 | * released (on the backlog). If not busy, we call this routine | ||
282 | * directly from the bottom half. | ||
283 | */ | ||
284 | int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb) | ||
285 | { | ||
286 | struct sctp_chunk *chunk; | ||
287 | struct sctp_inq *inqueue; | ||
288 | |||
289 | /* One day chunk will live inside the skb, but for | ||
290 | * now this works. | ||
291 | */ | ||
292 | chunk = (struct sctp_chunk *) skb; | ||
293 | inqueue = &chunk->rcvr->inqueue; | ||
294 | |||
295 | sctp_inq_push(inqueue, chunk); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | /* Handle icmp frag needed error. */ | ||
300 | void sctp_icmp_frag_needed(struct sock *sk, struct sctp_association *asoc, | ||
301 | struct sctp_transport *t, __u32 pmtu) | ||
302 | { | ||
303 | if (unlikely(pmtu < SCTP_DEFAULT_MINSEGMENT)) { | ||
304 | printk(KERN_WARNING "%s: Reported pmtu %d too low, " | ||
305 | "using default minimum of %d\n", __FUNCTION__, pmtu, | ||
306 | SCTP_DEFAULT_MINSEGMENT); | ||
307 | pmtu = SCTP_DEFAULT_MINSEGMENT; | ||
308 | } | ||
309 | |||
310 | if (!sock_owned_by_user(sk) && t && (t->pmtu != pmtu)) { | ||
311 | t->pmtu = pmtu; | ||
312 | sctp_assoc_sync_pmtu(asoc); | ||
313 | sctp_retransmit(&asoc->outqueue, t, SCTP_RTXR_PMTUD); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * SCTP Implementer's Guide, 2.37 ICMP handling procedures | ||
319 | * | ||
320 | * ICMP8) If the ICMP code is a "Unrecognized next header type encountered" | ||
321 | * or a "Protocol Unreachable" treat this message as an abort | ||
322 | * with the T bit set. | ||
323 | * | ||
324 | * This function sends an event to the state machine, which will abort the | ||
325 | * association. | ||
326 | * | ||
327 | */ | ||
328 | void sctp_icmp_proto_unreachable(struct sock *sk, | ||
329 | struct sctp_endpoint *ep, | ||
330 | struct sctp_association *asoc, | ||
331 | struct sctp_transport *t) | ||
332 | { | ||
333 | SCTP_DEBUG_PRINTK("%s\n", __FUNCTION__); | ||
334 | |||
335 | sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
336 | SCTP_ST_OTHER(SCTP_EVENT_ICMP_PROTO_UNREACH), | ||
337 | asoc->state, asoc->ep, asoc, NULL, | ||
338 | GFP_ATOMIC); | ||
339 | |||
340 | } | ||
341 | |||
342 | /* Common lookup code for icmp/icmpv6 error handler. */ | ||
343 | struct sock *sctp_err_lookup(int family, struct sk_buff *skb, | ||
344 | struct sctphdr *sctphdr, | ||
345 | struct sctp_endpoint **epp, | ||
346 | struct sctp_association **app, | ||
347 | struct sctp_transport **tpp) | ||
348 | { | ||
349 | union sctp_addr saddr; | ||
350 | union sctp_addr daddr; | ||
351 | struct sctp_af *af; | ||
352 | struct sock *sk = NULL; | ||
353 | struct sctp_endpoint *ep = NULL; | ||
354 | struct sctp_association *asoc = NULL; | ||
355 | struct sctp_transport *transport = NULL; | ||
356 | |||
357 | *app = NULL; *epp = NULL; *tpp = NULL; | ||
358 | |||
359 | af = sctp_get_af_specific(family); | ||
360 | if (unlikely(!af)) { | ||
361 | return NULL; | ||
362 | } | ||
363 | |||
364 | /* Initialize local addresses for lookups. */ | ||
365 | af->from_skb(&saddr, skb, 1); | ||
366 | af->from_skb(&daddr, skb, 0); | ||
367 | |||
368 | /* Look for an association that matches the incoming ICMP error | ||
369 | * packet. | ||
370 | */ | ||
371 | asoc = __sctp_lookup_association(&saddr, &daddr, &transport); | ||
372 | if (!asoc) { | ||
373 | /* If there is no matching association, see if it matches any | ||
374 | * endpoint. This may happen for an ICMP error generated in | ||
375 | * response to an INIT_ACK. | ||
376 | */ | ||
377 | ep = __sctp_rcv_lookup_endpoint(&daddr); | ||
378 | if (!ep) { | ||
379 | return NULL; | ||
380 | } | ||
381 | } | ||
382 | |||
383 | if (asoc) { | ||
384 | sk = asoc->base.sk; | ||
385 | |||
386 | if (ntohl(sctphdr->vtag) != asoc->c.peer_vtag) { | ||
387 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | ||
388 | goto out; | ||
389 | } | ||
390 | } else | ||
391 | sk = ep->base.sk; | ||
392 | |||
393 | sctp_bh_lock_sock(sk); | ||
394 | |||
395 | /* If too many ICMPs get dropped on busy | ||
396 | * servers this needs to be solved differently. | ||
397 | */ | ||
398 | if (sock_owned_by_user(sk)) | ||
399 | NET_INC_STATS_BH(LINUX_MIB_LOCKDROPPEDICMPS); | ||
400 | |||
401 | *epp = ep; | ||
402 | *app = asoc; | ||
403 | *tpp = transport; | ||
404 | return sk; | ||
405 | |||
406 | out: | ||
407 | sock_put(sk); | ||
408 | if (asoc) | ||
409 | sctp_association_put(asoc); | ||
410 | if (ep) | ||
411 | sctp_endpoint_put(ep); | ||
412 | return NULL; | ||
413 | } | ||
414 | |||
415 | /* Common cleanup code for icmp/icmpv6 error handler. */ | ||
416 | void sctp_err_finish(struct sock *sk, struct sctp_endpoint *ep, | ||
417 | struct sctp_association *asoc) | ||
418 | { | ||
419 | sctp_bh_unlock_sock(sk); | ||
420 | sock_put(sk); | ||
421 | if (asoc) | ||
422 | sctp_association_put(asoc); | ||
423 | if (ep) | ||
424 | sctp_endpoint_put(ep); | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * This routine is called by the ICMP module when it gets some | ||
429 | * sort of error condition. If err < 0 then the socket should | ||
430 | * be closed and the error returned to the user. If err > 0 | ||
431 | * it's just the icmp type << 8 | icmp code. After adjustment | ||
432 | * header points to the first 8 bytes of the sctp header. We need | ||
433 | * to find the appropriate port. | ||
434 | * | ||
435 | * The locking strategy used here is very "optimistic". When | ||
436 | * someone else accesses the socket the ICMP is just dropped | ||
437 | * and for some paths there is no check at all. | ||
438 | * A more general error queue to queue errors for later handling | ||
439 | * is probably better. | ||
440 | * | ||
441 | */ | ||
442 | void sctp_v4_err(struct sk_buff *skb, __u32 info) | ||
443 | { | ||
444 | struct iphdr *iph = (struct iphdr *)skb->data; | ||
445 | struct sctphdr *sh = (struct sctphdr *)(skb->data + (iph->ihl <<2)); | ||
446 | int type = skb->h.icmph->type; | ||
447 | int code = skb->h.icmph->code; | ||
448 | struct sock *sk; | ||
449 | struct sctp_endpoint *ep; | ||
450 | struct sctp_association *asoc; | ||
451 | struct sctp_transport *transport; | ||
452 | struct inet_sock *inet; | ||
453 | char *saveip, *savesctp; | ||
454 | int err; | ||
455 | |||
456 | if (skb->len < ((iph->ihl << 2) + 8)) { | ||
457 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | ||
458 | return; | ||
459 | } | ||
460 | |||
461 | /* Fix up skb to look at the embedded net header. */ | ||
462 | saveip = skb->nh.raw; | ||
463 | savesctp = skb->h.raw; | ||
464 | skb->nh.iph = iph; | ||
465 | skb->h.raw = (char *)sh; | ||
466 | sk = sctp_err_lookup(AF_INET, skb, sh, &ep, &asoc, &transport); | ||
467 | /* Put back, the original pointers. */ | ||
468 | skb->nh.raw = saveip; | ||
469 | skb->h.raw = savesctp; | ||
470 | if (!sk) { | ||
471 | ICMP_INC_STATS_BH(ICMP_MIB_INERRORS); | ||
472 | return; | ||
473 | } | ||
474 | /* Warning: The sock lock is held. Remember to call | ||
475 | * sctp_err_finish! | ||
476 | */ | ||
477 | |||
478 | switch (type) { | ||
479 | case ICMP_PARAMETERPROB: | ||
480 | err = EPROTO; | ||
481 | break; | ||
482 | case ICMP_DEST_UNREACH: | ||
483 | if (code > NR_ICMP_UNREACH) | ||
484 | goto out_unlock; | ||
485 | |||
486 | /* PMTU discovery (RFC1191) */ | ||
487 | if (ICMP_FRAG_NEEDED == code) { | ||
488 | sctp_icmp_frag_needed(sk, asoc, transport, info); | ||
489 | goto out_unlock; | ||
490 | } | ||
491 | else { | ||
492 | if (ICMP_PROT_UNREACH == code) { | ||
493 | sctp_icmp_proto_unreachable(sk, ep, asoc, | ||
494 | transport); | ||
495 | goto out_unlock; | ||
496 | } | ||
497 | } | ||
498 | err = icmp_err_convert[code].errno; | ||
499 | break; | ||
500 | case ICMP_TIME_EXCEEDED: | ||
501 | /* Ignore any time exceeded errors due to fragment reassembly | ||
502 | * timeouts. | ||
503 | */ | ||
504 | if (ICMP_EXC_FRAGTIME == code) | ||
505 | goto out_unlock; | ||
506 | |||
507 | err = EHOSTUNREACH; | ||
508 | break; | ||
509 | default: | ||
510 | goto out_unlock; | ||
511 | } | ||
512 | |||
513 | inet = inet_sk(sk); | ||
514 | if (!sock_owned_by_user(sk) && inet->recverr) { | ||
515 | sk->sk_err = err; | ||
516 | sk->sk_error_report(sk); | ||
517 | } else { /* Only an error on timeout */ | ||
518 | sk->sk_err_soft = err; | ||
519 | } | ||
520 | |||
521 | out_unlock: | ||
522 | sctp_err_finish(sk, ep, asoc); | ||
523 | } | ||
524 | |||
525 | /* | ||
526 | * RFC 2960, 8.4 - Handle "Out of the blue" Packets. | ||
527 | * | ||
528 | * This function scans all the chunks in the OOTB packet to determine if | ||
529 | * the packet should be discarded right away. If a response might be needed | ||
530 | * for this packet, or, if further processing is possible, the packet will | ||
531 | * be queued to a proper inqueue for the next phase of handling. | ||
532 | * | ||
533 | * Output: | ||
534 | * Return 0 - If further processing is needed. | ||
535 | * Return 1 - If the packet can be discarded right away. | ||
536 | */ | ||
537 | int sctp_rcv_ootb(struct sk_buff *skb) | ||
538 | { | ||
539 | sctp_chunkhdr_t *ch; | ||
540 | __u8 *ch_end; | ||
541 | sctp_errhdr_t *err; | ||
542 | |||
543 | ch = (sctp_chunkhdr_t *) skb->data; | ||
544 | ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); | ||
545 | |||
546 | /* Scan through all the chunks in the packet. */ | ||
547 | while (ch_end > (__u8 *)ch && ch_end < skb->tail) { | ||
548 | |||
549 | /* RFC 8.4, 2) If the OOTB packet contains an ABORT chunk, the | ||
550 | * receiver MUST silently discard the OOTB packet and take no | ||
551 | * further action. | ||
552 | */ | ||
553 | if (SCTP_CID_ABORT == ch->type) | ||
554 | goto discard; | ||
555 | |||
556 | /* RFC 8.4, 6) If the packet contains a SHUTDOWN COMPLETE | ||
557 | * chunk, the receiver should silently discard the packet | ||
558 | * and take no further action. | ||
559 | */ | ||
560 | if (SCTP_CID_SHUTDOWN_COMPLETE == ch->type) | ||
561 | goto discard; | ||
562 | |||
563 | /* RFC 8.4, 7) If the packet contains a "Stale cookie" ERROR | ||
564 | * or a COOKIE ACK the SCTP Packet should be silently | ||
565 | * discarded. | ||
566 | */ | ||
567 | if (SCTP_CID_COOKIE_ACK == ch->type) | ||
568 | goto discard; | ||
569 | |||
570 | if (SCTP_CID_ERROR == ch->type) { | ||
571 | sctp_walk_errors(err, ch) { | ||
572 | if (SCTP_ERROR_STALE_COOKIE == err->cause) | ||
573 | goto discard; | ||
574 | } | ||
575 | } | ||
576 | |||
577 | ch = (sctp_chunkhdr_t *) ch_end; | ||
578 | ch_end = ((__u8 *) ch) + WORD_ROUND(ntohs(ch->length)); | ||
579 | } | ||
580 | |||
581 | return 0; | ||
582 | |||
583 | discard: | ||
584 | return 1; | ||
585 | } | ||
586 | |||
587 | /* Insert endpoint into the hash table. */ | ||
588 | static void __sctp_hash_endpoint(struct sctp_endpoint *ep) | ||
589 | { | ||
590 | struct sctp_ep_common **epp; | ||
591 | struct sctp_ep_common *epb; | ||
592 | struct sctp_hashbucket *head; | ||
593 | |||
594 | epb = &ep->base; | ||
595 | |||
596 | epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); | ||
597 | head = &sctp_ep_hashtable[epb->hashent]; | ||
598 | |||
599 | sctp_write_lock(&head->lock); | ||
600 | epp = &head->chain; | ||
601 | epb->next = *epp; | ||
602 | if (epb->next) | ||
603 | (*epp)->pprev = &epb->next; | ||
604 | *epp = epb; | ||
605 | epb->pprev = epp; | ||
606 | sctp_write_unlock(&head->lock); | ||
607 | } | ||
608 | |||
609 | /* Add an endpoint to the hash. Local BH-safe. */ | ||
610 | void sctp_hash_endpoint(struct sctp_endpoint *ep) | ||
611 | { | ||
612 | sctp_local_bh_disable(); | ||
613 | __sctp_hash_endpoint(ep); | ||
614 | sctp_local_bh_enable(); | ||
615 | } | ||
616 | |||
617 | /* Remove endpoint from the hash table. */ | ||
618 | static void __sctp_unhash_endpoint(struct sctp_endpoint *ep) | ||
619 | { | ||
620 | struct sctp_hashbucket *head; | ||
621 | struct sctp_ep_common *epb; | ||
622 | |||
623 | epb = &ep->base; | ||
624 | |||
625 | epb->hashent = sctp_ep_hashfn(epb->bind_addr.port); | ||
626 | |||
627 | head = &sctp_ep_hashtable[epb->hashent]; | ||
628 | |||
629 | sctp_write_lock(&head->lock); | ||
630 | |||
631 | if (epb->pprev) { | ||
632 | if (epb->next) | ||
633 | epb->next->pprev = epb->pprev; | ||
634 | *epb->pprev = epb->next; | ||
635 | epb->pprev = NULL; | ||
636 | } | ||
637 | |||
638 | sctp_write_unlock(&head->lock); | ||
639 | } | ||
640 | |||
641 | /* Remove endpoint from the hash. Local BH-safe. */ | ||
642 | void sctp_unhash_endpoint(struct sctp_endpoint *ep) | ||
643 | { | ||
644 | sctp_local_bh_disable(); | ||
645 | __sctp_unhash_endpoint(ep); | ||
646 | sctp_local_bh_enable(); | ||
647 | } | ||
648 | |||
649 | /* Look up an endpoint. */ | ||
650 | static struct sctp_endpoint *__sctp_rcv_lookup_endpoint(const union sctp_addr *laddr) | ||
651 | { | ||
652 | struct sctp_hashbucket *head; | ||
653 | struct sctp_ep_common *epb; | ||
654 | struct sctp_endpoint *ep; | ||
655 | int hash; | ||
656 | |||
657 | hash = sctp_ep_hashfn(laddr->v4.sin_port); | ||
658 | head = &sctp_ep_hashtable[hash]; | ||
659 | read_lock(&head->lock); | ||
660 | for (epb = head->chain; epb; epb = epb->next) { | ||
661 | ep = sctp_ep(epb); | ||
662 | if (sctp_endpoint_is_match(ep, laddr)) | ||
663 | goto hit; | ||
664 | } | ||
665 | |||
666 | ep = sctp_sk((sctp_get_ctl_sock()))->ep; | ||
667 | epb = &ep->base; | ||
668 | |||
669 | hit: | ||
670 | sctp_endpoint_hold(ep); | ||
671 | sock_hold(epb->sk); | ||
672 | read_unlock(&head->lock); | ||
673 | return ep; | ||
674 | } | ||
675 | |||
676 | /* Insert association into the hash table. */ | ||
677 | static void __sctp_hash_established(struct sctp_association *asoc) | ||
678 | { | ||
679 | struct sctp_ep_common **epp; | ||
680 | struct sctp_ep_common *epb; | ||
681 | struct sctp_hashbucket *head; | ||
682 | |||
683 | epb = &asoc->base; | ||
684 | |||
685 | /* Calculate which chain this entry will belong to. */ | ||
686 | epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, asoc->peer.port); | ||
687 | |||
688 | head = &sctp_assoc_hashtable[epb->hashent]; | ||
689 | |||
690 | sctp_write_lock(&head->lock); | ||
691 | epp = &head->chain; | ||
692 | epb->next = *epp; | ||
693 | if (epb->next) | ||
694 | (*epp)->pprev = &epb->next; | ||
695 | *epp = epb; | ||
696 | epb->pprev = epp; | ||
697 | sctp_write_unlock(&head->lock); | ||
698 | } | ||
699 | |||
700 | /* Add an association to the hash. Local BH-safe. */ | ||
701 | void sctp_hash_established(struct sctp_association *asoc) | ||
702 | { | ||
703 | sctp_local_bh_disable(); | ||
704 | __sctp_hash_established(asoc); | ||
705 | sctp_local_bh_enable(); | ||
706 | } | ||
707 | |||
708 | /* Remove association from the hash table. */ | ||
709 | static void __sctp_unhash_established(struct sctp_association *asoc) | ||
710 | { | ||
711 | struct sctp_hashbucket *head; | ||
712 | struct sctp_ep_common *epb; | ||
713 | |||
714 | epb = &asoc->base; | ||
715 | |||
716 | epb->hashent = sctp_assoc_hashfn(epb->bind_addr.port, | ||
717 | asoc->peer.port); | ||
718 | |||
719 | head = &sctp_assoc_hashtable[epb->hashent]; | ||
720 | |||
721 | sctp_write_lock(&head->lock); | ||
722 | |||
723 | if (epb->pprev) { | ||
724 | if (epb->next) | ||
725 | epb->next->pprev = epb->pprev; | ||
726 | *epb->pprev = epb->next; | ||
727 | epb->pprev = NULL; | ||
728 | } | ||
729 | |||
730 | sctp_write_unlock(&head->lock); | ||
731 | } | ||
732 | |||
733 | /* Remove association from the hash table. Local BH-safe. */ | ||
734 | void sctp_unhash_established(struct sctp_association *asoc) | ||
735 | { | ||
736 | sctp_local_bh_disable(); | ||
737 | __sctp_unhash_established(asoc); | ||
738 | sctp_local_bh_enable(); | ||
739 | } | ||
740 | |||
741 | /* Look up an association. */ | ||
742 | static struct sctp_association *__sctp_lookup_association( | ||
743 | const union sctp_addr *local, | ||
744 | const union sctp_addr *peer, | ||
745 | struct sctp_transport **pt) | ||
746 | { | ||
747 | struct sctp_hashbucket *head; | ||
748 | struct sctp_ep_common *epb; | ||
749 | struct sctp_association *asoc; | ||
750 | struct sctp_transport *transport; | ||
751 | int hash; | ||
752 | |||
753 | /* Optimize here for direct hit, only listening connections can | ||
754 | * have wildcards anyways. | ||
755 | */ | ||
756 | hash = sctp_assoc_hashfn(local->v4.sin_port, peer->v4.sin_port); | ||
757 | head = &sctp_assoc_hashtable[hash]; | ||
758 | read_lock(&head->lock); | ||
759 | for (epb = head->chain; epb; epb = epb->next) { | ||
760 | asoc = sctp_assoc(epb); | ||
761 | transport = sctp_assoc_is_match(asoc, local, peer); | ||
762 | if (transport) | ||
763 | goto hit; | ||
764 | } | ||
765 | |||
766 | read_unlock(&head->lock); | ||
767 | |||
768 | return NULL; | ||
769 | |||
770 | hit: | ||
771 | *pt = transport; | ||
772 | sctp_association_hold(asoc); | ||
773 | sock_hold(epb->sk); | ||
774 | read_unlock(&head->lock); | ||
775 | return asoc; | ||
776 | } | ||
777 | |||
778 | /* Look up an association. BH-safe. */ | ||
779 | SCTP_STATIC | ||
780 | struct sctp_association *sctp_lookup_association(const union sctp_addr *laddr, | ||
781 | const union sctp_addr *paddr, | ||
782 | struct sctp_transport **transportp) | ||
783 | { | ||
784 | struct sctp_association *asoc; | ||
785 | |||
786 | sctp_local_bh_disable(); | ||
787 | asoc = __sctp_lookup_association(laddr, paddr, transportp); | ||
788 | sctp_local_bh_enable(); | ||
789 | |||
790 | return asoc; | ||
791 | } | ||
792 | |||
793 | /* Is there an association matching the given local and peer addresses? */ | ||
794 | int sctp_has_association(const union sctp_addr *laddr, | ||
795 | const union sctp_addr *paddr) | ||
796 | { | ||
797 | struct sctp_association *asoc; | ||
798 | struct sctp_transport *transport; | ||
799 | |||
800 | if ((asoc = sctp_lookup_association(laddr, paddr, &transport))) { | ||
801 | sock_put(asoc->base.sk); | ||
802 | sctp_association_put(asoc); | ||
803 | return 1; | ||
804 | } | ||
805 | |||
806 | return 0; | ||
807 | } | ||
808 | |||
809 | /* | ||
810 | * SCTP Implementors Guide, 2.18 Handling of address | ||
811 | * parameters within the INIT or INIT-ACK. | ||
812 | * | ||
813 | * D) When searching for a matching TCB upon reception of an INIT | ||
814 | * or INIT-ACK chunk the receiver SHOULD use not only the | ||
815 | * source address of the packet (containing the INIT or | ||
816 | * INIT-ACK) but the receiver SHOULD also use all valid | ||
817 | * address parameters contained within the chunk. | ||
818 | * | ||
819 | * 2.18.3 Solution description | ||
820 | * | ||
821 | * This new text clearly specifies to an implementor the need | ||
822 | * to look within the INIT or INIT-ACK. Any implementation that | ||
823 | * does not do this, may not be able to establish associations | ||
824 | * in certain circumstances. | ||
825 | * | ||
826 | */ | ||
827 | static struct sctp_association *__sctp_rcv_init_lookup(struct sk_buff *skb, | ||
828 | const union sctp_addr *laddr, struct sctp_transport **transportp) | ||
829 | { | ||
830 | struct sctp_association *asoc; | ||
831 | union sctp_addr addr; | ||
832 | union sctp_addr *paddr = &addr; | ||
833 | struct sctphdr *sh = (struct sctphdr *) skb->h.raw; | ||
834 | sctp_chunkhdr_t *ch; | ||
835 | union sctp_params params; | ||
836 | sctp_init_chunk_t *init; | ||
837 | struct sctp_transport *transport; | ||
838 | struct sctp_af *af; | ||
839 | |||
840 | ch = (sctp_chunkhdr_t *) skb->data; | ||
841 | |||
842 | /* If this is INIT/INIT-ACK look inside the chunk too. */ | ||
843 | switch (ch->type) { | ||
844 | case SCTP_CID_INIT: | ||
845 | case SCTP_CID_INIT_ACK: | ||
846 | break; | ||
847 | default: | ||
848 | return NULL; | ||
849 | } | ||
850 | |||
851 | /* The code below will attempt to walk the chunk and extract | ||
852 | * parameter information. Before we do that, we need to verify | ||
853 | * that the chunk length doesn't cause overflow. Otherwise, we'll | ||
854 | * walk off the end. | ||
855 | */ | ||
856 | if (WORD_ROUND(ntohs(ch->length)) > skb->len) | ||
857 | return NULL; | ||
858 | |||
859 | /* | ||
860 | * This code will NOT touch anything inside the chunk--it is | ||
861 | * strictly READ-ONLY. | ||
862 | * | ||
863 | * RFC 2960 3 SCTP packet Format | ||
864 | * | ||
865 | * Multiple chunks can be bundled into one SCTP packet up to | ||
866 | * the MTU size, except for the INIT, INIT ACK, and SHUTDOWN | ||
867 | * COMPLETE chunks. These chunks MUST NOT be bundled with any | ||
868 | * other chunk in a packet. See Section 6.10 for more details | ||
869 | * on chunk bundling. | ||
870 | */ | ||
871 | |||
872 | /* Find the start of the TLVs and the end of the chunk. This is | ||
873 | * the region we search for address parameters. | ||
874 | */ | ||
875 | init = (sctp_init_chunk_t *)skb->data; | ||
876 | |||
877 | /* Walk the parameters looking for embedded addresses. */ | ||
878 | sctp_walk_params(params, init, init_hdr.params) { | ||
879 | |||
880 | /* Note: Ignoring hostname addresses. */ | ||
881 | af = sctp_get_af_specific(param_type2af(params.p->type)); | ||
882 | if (!af) | ||
883 | continue; | ||
884 | |||
885 | af->from_addr_param(paddr, params.addr, ntohs(sh->source), 0); | ||
886 | |||
887 | asoc = __sctp_lookup_association(laddr, paddr, &transport); | ||
888 | if (asoc) | ||
889 | return asoc; | ||
890 | } | ||
891 | |||
892 | return NULL; | ||
893 | } | ||
894 | |||
895 | /* Lookup an association for an inbound skb. */ | ||
896 | static struct sctp_association *__sctp_rcv_lookup(struct sk_buff *skb, | ||
897 | const union sctp_addr *paddr, | ||
898 | const union sctp_addr *laddr, | ||
899 | struct sctp_transport **transportp) | ||
900 | { | ||
901 | struct sctp_association *asoc; | ||
902 | |||
903 | asoc = __sctp_lookup_association(laddr, paddr, transportp); | ||
904 | |||
905 | /* Further lookup for INIT/INIT-ACK packets. | ||
906 | * SCTP Implementors Guide, 2.18 Handling of address | ||
907 | * parameters within the INIT or INIT-ACK. | ||
908 | */ | ||
909 | if (!asoc) | ||
910 | asoc = __sctp_rcv_init_lookup(skb, laddr, transportp); | ||
911 | |||
912 | return asoc; | ||
913 | } | ||
diff --git a/net/sctp/inqueue.c b/net/sctp/inqueue.c new file mode 100644 index 000000000000..cedf4351556c --- /dev/null +++ b/net/sctp/inqueue.c | |||
@@ -0,0 +1,204 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
3 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
4 | * Copyright (c) 2002 International Business Machines, Corp. | ||
5 | * | ||
6 | * This file is part of the SCTP kernel reference Implementation | ||
7 | * | ||
8 | * These functions are the methods for accessing the SCTP inqueue. | ||
9 | * | ||
10 | * An SCTP inqueue is a queue into which you push SCTP packets | ||
11 | * (which might be bundles or fragments of chunks) and out of which you | ||
12 | * pop SCTP whole chunks. | ||
13 | * | ||
14 | * The SCTP reference implementation is free software; | ||
15 | * you can redistribute it and/or modify it under the terms of | ||
16 | * the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2, or (at your option) | ||
18 | * any later version. | ||
19 | * | ||
20 | * The SCTP reference implementation is distributed in the hope that it | ||
21 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
22 | * ************************ | ||
23 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
24 | * See the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with GNU CC; see the file COPYING. If not, write to | ||
28 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
29 | * Boston, MA 02111-1307, USA. | ||
30 | * | ||
31 | * Please send any bug reports or fixes you make to the | ||
32 | * email address(es): | ||
33 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
34 | * | ||
35 | * Or submit a bug report through the following website: | ||
36 | * http://www.sf.net/projects/lksctp | ||
37 | * | ||
38 | * Written or modified by: | ||
39 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
40 | * Karl Knutson <karl@athena.chicago.il.us> | ||
41 | * | ||
42 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
43 | * be incorporated into the next SCTP release. | ||
44 | */ | ||
45 | |||
46 | #include <net/sctp/sctp.h> | ||
47 | #include <net/sctp/sm.h> | ||
48 | #include <linux/interrupt.h> | ||
49 | |||
50 | /* Initialize an SCTP inqueue. */ | ||
51 | void sctp_inq_init(struct sctp_inq *queue) | ||
52 | { | ||
53 | skb_queue_head_init(&queue->in); | ||
54 | queue->in_progress = NULL; | ||
55 | |||
56 | /* Create a task for delivering data. */ | ||
57 | INIT_WORK(&queue->immediate, NULL, NULL); | ||
58 | |||
59 | queue->malloced = 0; | ||
60 | } | ||
61 | |||
62 | /* Release the memory associated with an SCTP inqueue. */ | ||
63 | void sctp_inq_free(struct sctp_inq *queue) | ||
64 | { | ||
65 | struct sctp_chunk *chunk; | ||
66 | |||
67 | /* Empty the queue. */ | ||
68 | while ((chunk = (struct sctp_chunk *) skb_dequeue(&queue->in)) != NULL) | ||
69 | sctp_chunk_free(chunk); | ||
70 | |||
71 | /* If there is a packet which is currently being worked on, | ||
72 | * free it as well. | ||
73 | */ | ||
74 | if (queue->in_progress) | ||
75 | sctp_chunk_free(queue->in_progress); | ||
76 | |||
77 | if (queue->malloced) { | ||
78 | /* Dump the master memory segment. */ | ||
79 | kfree(queue); | ||
80 | } | ||
81 | } | ||
82 | |||
83 | /* Put a new packet in an SCTP inqueue. | ||
84 | * We assume that packet->sctp_hdr is set and in host byte order. | ||
85 | */ | ||
86 | void sctp_inq_push(struct sctp_inq *q, struct sctp_chunk *packet) | ||
87 | { | ||
88 | /* Directly call the packet handling routine. */ | ||
89 | |||
90 | /* We are now calling this either from the soft interrupt | ||
91 | * or from the backlog processing. | ||
92 | * Eventually, we should clean up inqueue to not rely | ||
93 | * on the BH related data structures. | ||
94 | */ | ||
95 | skb_queue_tail(&(q->in), (struct sk_buff *) packet); | ||
96 | q->immediate.func(q->immediate.data); | ||
97 | } | ||
98 | |||
99 | /* Extract a chunk from an SCTP inqueue. | ||
100 | * | ||
101 | * WARNING: If you need to put the chunk on another queue, you need to | ||
102 | * make a shallow copy (clone) of it. | ||
103 | */ | ||
104 | struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue) | ||
105 | { | ||
106 | struct sctp_chunk *chunk; | ||
107 | sctp_chunkhdr_t *ch = NULL; | ||
108 | |||
109 | /* The assumption is that we are safe to process the chunks | ||
110 | * at this time. | ||
111 | */ | ||
112 | |||
113 | if ((chunk = queue->in_progress)) { | ||
114 | /* There is a packet that we have been working on. | ||
115 | * Any post processing work to do before we move on? | ||
116 | */ | ||
117 | if (chunk->singleton || | ||
118 | chunk->end_of_packet || | ||
119 | chunk->pdiscard) { | ||
120 | sctp_chunk_free(chunk); | ||
121 | chunk = queue->in_progress = NULL; | ||
122 | } else { | ||
123 | /* Nothing to do. Next chunk in the packet, please. */ | ||
124 | ch = (sctp_chunkhdr_t *) chunk->chunk_end; | ||
125 | |||
126 | /* Force chunk->skb->data to chunk->chunk_end. */ | ||
127 | skb_pull(chunk->skb, | ||
128 | chunk->chunk_end - chunk->skb->data); | ||
129 | } | ||
130 | } | ||
131 | |||
132 | /* Do we need to take the next packet out of the queue to process? */ | ||
133 | if (!chunk) { | ||
134 | /* Is the queue empty? */ | ||
135 | if (skb_queue_empty(&queue->in)) | ||
136 | return NULL; | ||
137 | |||
138 | chunk = queue->in_progress = | ||
139 | (struct sctp_chunk *) skb_dequeue(&queue->in); | ||
140 | |||
141 | /* This is the first chunk in the packet. */ | ||
142 | chunk->singleton = 1; | ||
143 | ch = (sctp_chunkhdr_t *) chunk->skb->data; | ||
144 | } | ||
145 | |||
146 | chunk->chunk_hdr = ch; | ||
147 | chunk->chunk_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
148 | /* In the unlikely case of an IP reassembly, the skb could be | ||
149 | * non-linear. If so, update chunk_end so that it doesn't go past | ||
150 | * the skb->tail. | ||
151 | */ | ||
152 | if (unlikely(skb_is_nonlinear(chunk->skb))) { | ||
153 | if (chunk->chunk_end > chunk->skb->tail) | ||
154 | chunk->chunk_end = chunk->skb->tail; | ||
155 | } | ||
156 | skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t)); | ||
157 | chunk->subh.v = NULL; /* Subheader is no longer valid. */ | ||
158 | |||
159 | if (chunk->chunk_end < chunk->skb->tail) { | ||
160 | /* This is not a singleton */ | ||
161 | chunk->singleton = 0; | ||
162 | } else if (chunk->chunk_end > chunk->skb->tail) { | ||
163 | /* RFC 2960, Section 6.10 Bundling | ||
164 | * | ||
165 | * Partial chunks MUST NOT be placed in an SCTP packet. | ||
166 | * If the receiver detects a partial chunk, it MUST drop | ||
167 | * the chunk. | ||
168 | * | ||
169 | * Since the end of the chunk is past the end of our buffer | ||
170 | * (which contains the whole packet, we can freely discard | ||
171 | * the whole packet. | ||
172 | */ | ||
173 | sctp_chunk_free(chunk); | ||
174 | chunk = queue->in_progress = NULL; | ||
175 | |||
176 | return NULL; | ||
177 | } else { | ||
178 | /* We are at the end of the packet, so mark the chunk | ||
179 | * in case we need to send a SACK. | ||
180 | */ | ||
181 | chunk->end_of_packet = 1; | ||
182 | } | ||
183 | |||
184 | SCTP_DEBUG_PRINTK("+++sctp_inq_pop+++ chunk %p[%s]," | ||
185 | " length %d, skb->len %d\n",chunk, | ||
186 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)), | ||
187 | ntohs(chunk->chunk_hdr->length), chunk->skb->len); | ||
188 | return chunk; | ||
189 | } | ||
190 | |||
191 | /* Set a top-half handler. | ||
192 | * | ||
193 | * Originally, we the top-half handler was scheduled as a BH. We now | ||
194 | * call the handler directly in sctp_inq_push() at a time that | ||
195 | * we know we are lock safe. | ||
196 | * The intent is that this routine will pull stuff out of the | ||
197 | * inqueue and process it. | ||
198 | */ | ||
199 | void sctp_inq_set_th_handler(struct sctp_inq *q, | ||
200 | void (*callback)(void *), void *arg) | ||
201 | { | ||
202 | INIT_WORK(&q->immediate, callback, arg); | ||
203 | } | ||
204 | |||
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c new file mode 100644 index 000000000000..e42c74e3ec1e --- /dev/null +++ b/net/sctp/ipv6.c | |||
@@ -0,0 +1,1013 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2002, 2004 | ||
3 | * Copyright (c) 2001 Nokia, Inc. | ||
4 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
5 | * Copyright (c) 2002-2003 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * SCTP over IPv6. | ||
10 | * | ||
11 | * The SCTP reference implementation is free software; | ||
12 | * you can redistribute it and/or modify it under the terms of | ||
13 | * the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * The SCTP reference implementation is distributed in the hope that it | ||
18 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
19 | * ************************ | ||
20 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | * See the GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with GNU CC; see the file COPYING. If not, write to | ||
25 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | * Please send any bug reports or fixes you make to the | ||
29 | * email address(es): | ||
30 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
31 | * | ||
32 | * Or submit a bug report through the following website: | ||
33 | * http://www.sf.net/projects/lksctp | ||
34 | * | ||
35 | * Written or modified by: | ||
36 | * Le Yanqun <yanqun.le@nokia.com> | ||
37 | * Hui Huang <hui.huang@nokia.com> | ||
38 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
39 | * Sridhar Samudrala <sri@us.ibm.com> | ||
40 | * Jon Grimm <jgrimm@us.ibm.com> | ||
41 | * Ardelle Fan <ardelle.fan@intel.com> | ||
42 | * | ||
43 | * Based on: | ||
44 | * linux/net/ipv6/tcp_ipv6.c | ||
45 | * | ||
46 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
47 | * be incorporated into the next SCTP release. | ||
48 | */ | ||
49 | |||
50 | #include <linux/module.h> | ||
51 | #include <linux/errno.h> | ||
52 | #include <linux/types.h> | ||
53 | #include <linux/socket.h> | ||
54 | #include <linux/sockios.h> | ||
55 | #include <linux/net.h> | ||
56 | #include <linux/sched.h> | ||
57 | #include <linux/in.h> | ||
58 | #include <linux/in6.h> | ||
59 | #include <linux/netdevice.h> | ||
60 | #include <linux/init.h> | ||
61 | #include <linux/ipsec.h> | ||
62 | |||
63 | #include <linux/ipv6.h> | ||
64 | #include <linux/icmpv6.h> | ||
65 | #include <linux/random.h> | ||
66 | #include <linux/seq_file.h> | ||
67 | |||
68 | #include <net/protocol.h> | ||
69 | #include <net/tcp.h> | ||
70 | #include <net/ndisc.h> | ||
71 | #include <net/ipv6.h> | ||
72 | #include <net/transp_v6.h> | ||
73 | #include <net/addrconf.h> | ||
74 | #include <net/ip6_route.h> | ||
75 | #include <net/inet_common.h> | ||
76 | #include <net/inet_ecn.h> | ||
77 | #include <net/sctp/sctp.h> | ||
78 | |||
79 | #include <asm/uaccess.h> | ||
80 | |||
81 | extern int sctp_inetaddr_event(struct notifier_block *, unsigned long, void *); | ||
82 | static struct notifier_block sctp_inet6addr_notifier = { | ||
83 | .notifier_call = sctp_inetaddr_event, | ||
84 | }; | ||
85 | |||
86 | /* ICMP error handler. */ | ||
87 | SCTP_STATIC void sctp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | ||
88 | int type, int code, int offset, __u32 info) | ||
89 | { | ||
90 | struct inet6_dev *idev; | ||
91 | struct ipv6hdr *iph = (struct ipv6hdr *)skb->data; | ||
92 | struct sctphdr *sh = (struct sctphdr *)(skb->data + offset); | ||
93 | struct sock *sk; | ||
94 | struct sctp_endpoint *ep; | ||
95 | struct sctp_association *asoc; | ||
96 | struct sctp_transport *transport; | ||
97 | struct ipv6_pinfo *np; | ||
98 | char *saveip, *savesctp; | ||
99 | int err; | ||
100 | |||
101 | idev = in6_dev_get(skb->dev); | ||
102 | |||
103 | /* Fix up skb to look at the embedded net header. */ | ||
104 | saveip = skb->nh.raw; | ||
105 | savesctp = skb->h.raw; | ||
106 | skb->nh.ipv6h = iph; | ||
107 | skb->h.raw = (char *)sh; | ||
108 | sk = sctp_err_lookup(AF_INET6, skb, sh, &ep, &asoc, &transport); | ||
109 | /* Put back, the original pointers. */ | ||
110 | skb->nh.raw = saveip; | ||
111 | skb->h.raw = savesctp; | ||
112 | if (!sk) { | ||
113 | ICMP6_INC_STATS_BH(idev, ICMP6_MIB_INERRORS); | ||
114 | goto out; | ||
115 | } | ||
116 | |||
117 | /* Warning: The sock lock is held. Remember to call | ||
118 | * sctp_err_finish! | ||
119 | */ | ||
120 | |||
121 | switch (type) { | ||
122 | case ICMPV6_PKT_TOOBIG: | ||
123 | sctp_icmp_frag_needed(sk, asoc, transport, ntohl(info)); | ||
124 | goto out_unlock; | ||
125 | case ICMPV6_PARAMPROB: | ||
126 | if (ICMPV6_UNK_NEXTHDR == code) { | ||
127 | sctp_icmp_proto_unreachable(sk, ep, asoc, transport); | ||
128 | goto out_unlock; | ||
129 | } | ||
130 | break; | ||
131 | default: | ||
132 | break; | ||
133 | } | ||
134 | |||
135 | np = inet6_sk(sk); | ||
136 | icmpv6_err_convert(type, code, &err); | ||
137 | if (!sock_owned_by_user(sk) && np->recverr) { | ||
138 | sk->sk_err = err; | ||
139 | sk->sk_error_report(sk); | ||
140 | } else { /* Only an error on timeout */ | ||
141 | sk->sk_err_soft = err; | ||
142 | } | ||
143 | |||
144 | out_unlock: | ||
145 | sctp_err_finish(sk, ep, asoc); | ||
146 | out: | ||
147 | if (likely(idev != NULL)) | ||
148 | in6_dev_put(idev); | ||
149 | } | ||
150 | |||
151 | /* Based on tcp_v6_xmit() in tcp_ipv6.c. */ | ||
152 | static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport, | ||
153 | int ipfragok) | ||
154 | { | ||
155 | struct sock *sk = skb->sk; | ||
156 | struct ipv6_pinfo *np = inet6_sk(sk); | ||
157 | struct flowi fl; | ||
158 | |||
159 | memset(&fl, 0, sizeof(fl)); | ||
160 | |||
161 | fl.proto = sk->sk_protocol; | ||
162 | |||
163 | /* Fill in the dest address from the route entry passed with the skb | ||
164 | * and the source address from the transport. | ||
165 | */ | ||
166 | ipv6_addr_copy(&fl.fl6_dst, &transport->ipaddr.v6.sin6_addr); | ||
167 | ipv6_addr_copy(&fl.fl6_src, &transport->saddr.v6.sin6_addr); | ||
168 | |||
169 | fl.fl6_flowlabel = np->flow_label; | ||
170 | IP6_ECN_flow_xmit(sk, fl.fl6_flowlabel); | ||
171 | if (ipv6_addr_type(&fl.fl6_src) & IPV6_ADDR_LINKLOCAL) | ||
172 | fl.oif = transport->saddr.v6.sin6_scope_id; | ||
173 | else | ||
174 | fl.oif = sk->sk_bound_dev_if; | ||
175 | fl.fl_ip_sport = inet_sk(sk)->sport; | ||
176 | fl.fl_ip_dport = transport->ipaddr.v6.sin6_port; | ||
177 | |||
178 | if (np->opt && np->opt->srcrt) { | ||
179 | struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; | ||
180 | ipv6_addr_copy(&fl.fl6_dst, rt0->addr); | ||
181 | } | ||
182 | |||
183 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " | ||
184 | "src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " | ||
185 | "dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | ||
186 | __FUNCTION__, skb, skb->len, | ||
187 | NIP6(fl.fl6_src), NIP6(fl.fl6_dst)); | ||
188 | |||
189 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | ||
190 | |||
191 | return ip6_xmit(sk, skb, &fl, np->opt, ipfragok); | ||
192 | } | ||
193 | |||
194 | /* Returns the dst cache entry for the given source and destination ip | ||
195 | * addresses. | ||
196 | */ | ||
197 | static struct dst_entry *sctp_v6_get_dst(struct sctp_association *asoc, | ||
198 | union sctp_addr *daddr, | ||
199 | union sctp_addr *saddr) | ||
200 | { | ||
201 | struct dst_entry *dst; | ||
202 | struct flowi fl; | ||
203 | |||
204 | memset(&fl, 0, sizeof(fl)); | ||
205 | ipv6_addr_copy(&fl.fl6_dst, &daddr->v6.sin6_addr); | ||
206 | if (ipv6_addr_type(&daddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) | ||
207 | fl.oif = daddr->v6.sin6_scope_id; | ||
208 | |||
209 | |||
210 | SCTP_DEBUG_PRINTK("%s: DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", | ||
211 | __FUNCTION__, NIP6(fl.fl6_dst)); | ||
212 | |||
213 | if (saddr) { | ||
214 | ipv6_addr_copy(&fl.fl6_src, &saddr->v6.sin6_addr); | ||
215 | SCTP_DEBUG_PRINTK( | ||
216 | "SRC=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x - ", | ||
217 | NIP6(fl.fl6_src)); | ||
218 | } | ||
219 | |||
220 | dst = ip6_route_output(NULL, &fl); | ||
221 | if (dst) { | ||
222 | struct rt6_info *rt; | ||
223 | rt = (struct rt6_info *)dst; | ||
224 | SCTP_DEBUG_PRINTK( | ||
225 | "rt6_dst:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " | ||
226 | "rt6_src:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | ||
227 | NIP6(rt->rt6i_dst.addr), NIP6(rt->rt6i_src.addr)); | ||
228 | } else { | ||
229 | SCTP_DEBUG_PRINTK("NO ROUTE\n"); | ||
230 | } | ||
231 | |||
232 | return dst; | ||
233 | } | ||
234 | |||
235 | /* Returns the number of consecutive initial bits that match in the 2 ipv6 | ||
236 | * addresses. | ||
237 | */ | ||
238 | static inline int sctp_v6_addr_match_len(union sctp_addr *s1, | ||
239 | union sctp_addr *s2) | ||
240 | { | ||
241 | struct in6_addr *a1 = &s1->v6.sin6_addr; | ||
242 | struct in6_addr *a2 = &s2->v6.sin6_addr; | ||
243 | int i, j; | ||
244 | |||
245 | for (i = 0; i < 4 ; i++) { | ||
246 | __u32 a1xora2; | ||
247 | |||
248 | a1xora2 = a1->s6_addr32[i] ^ a2->s6_addr32[i]; | ||
249 | |||
250 | if ((j = fls(ntohl(a1xora2)))) | ||
251 | return (i * 32 + 32 - j); | ||
252 | } | ||
253 | |||
254 | return (i*32); | ||
255 | } | ||
256 | |||
257 | /* Fills in the source address(saddr) based on the destination address(daddr) | ||
258 | * and asoc's bind address list. | ||
259 | */ | ||
260 | static void sctp_v6_get_saddr(struct sctp_association *asoc, | ||
261 | struct dst_entry *dst, | ||
262 | union sctp_addr *daddr, | ||
263 | union sctp_addr *saddr) | ||
264 | { | ||
265 | struct sctp_bind_addr *bp; | ||
266 | rwlock_t *addr_lock; | ||
267 | struct sctp_sockaddr_entry *laddr; | ||
268 | struct list_head *pos; | ||
269 | sctp_scope_t scope; | ||
270 | union sctp_addr *baddr = NULL; | ||
271 | __u8 matchlen = 0; | ||
272 | __u8 bmatchlen; | ||
273 | |||
274 | SCTP_DEBUG_PRINTK("%s: asoc:%p dst:%p " | ||
275 | "daddr:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", | ||
276 | __FUNCTION__, asoc, dst, NIP6(daddr->v6.sin6_addr)); | ||
277 | |||
278 | if (!asoc) { | ||
279 | ipv6_get_saddr(dst, &daddr->v6.sin6_addr,&saddr->v6.sin6_addr); | ||
280 | SCTP_DEBUG_PRINTK("saddr from ipv6_get_saddr: " | ||
281 | "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | ||
282 | NIP6(saddr->v6.sin6_addr)); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | scope = sctp_scope(daddr); | ||
287 | |||
288 | bp = &asoc->base.bind_addr; | ||
289 | addr_lock = &asoc->base.addr_lock; | ||
290 | |||
291 | /* Go through the bind address list and find the best source address | ||
292 | * that matches the scope of the destination address. | ||
293 | */ | ||
294 | sctp_read_lock(addr_lock); | ||
295 | list_for_each(pos, &bp->address_list) { | ||
296 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
297 | if ((laddr->a.sa.sa_family == AF_INET6) && | ||
298 | (scope <= sctp_scope(&laddr->a))) { | ||
299 | bmatchlen = sctp_v6_addr_match_len(daddr, &laddr->a); | ||
300 | if (!baddr || (matchlen < bmatchlen)) { | ||
301 | baddr = &laddr->a; | ||
302 | matchlen = bmatchlen; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | if (baddr) { | ||
308 | memcpy(saddr, baddr, sizeof(union sctp_addr)); | ||
309 | SCTP_DEBUG_PRINTK("saddr: " | ||
310 | "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | ||
311 | NIP6(saddr->v6.sin6_addr)); | ||
312 | } else { | ||
313 | printk(KERN_ERR "%s: asoc:%p Could not find a valid source " | ||
314 | "address for the " | ||
315 | "dest:%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x\n", | ||
316 | __FUNCTION__, asoc, NIP6(daddr->v6.sin6_addr)); | ||
317 | } | ||
318 | |||
319 | sctp_read_unlock(addr_lock); | ||
320 | } | ||
321 | |||
322 | /* Make a copy of all potential local addresses. */ | ||
323 | static void sctp_v6_copy_addrlist(struct list_head *addrlist, | ||
324 | struct net_device *dev) | ||
325 | { | ||
326 | struct inet6_dev *in6_dev; | ||
327 | struct inet6_ifaddr *ifp; | ||
328 | struct sctp_sockaddr_entry *addr; | ||
329 | |||
330 | read_lock(&addrconf_lock); | ||
331 | if ((in6_dev = __in6_dev_get(dev)) == NULL) { | ||
332 | read_unlock(&addrconf_lock); | ||
333 | return; | ||
334 | } | ||
335 | |||
336 | read_lock(&in6_dev->lock); | ||
337 | for (ifp = in6_dev->addr_list; ifp; ifp = ifp->if_next) { | ||
338 | /* Add the address to the local list. */ | ||
339 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | ||
340 | if (addr) { | ||
341 | addr->a.v6.sin6_family = AF_INET6; | ||
342 | addr->a.v6.sin6_port = 0; | ||
343 | addr->a.v6.sin6_addr = ifp->addr; | ||
344 | addr->a.v6.sin6_scope_id = dev->ifindex; | ||
345 | INIT_LIST_HEAD(&addr->list); | ||
346 | list_add_tail(&addr->list, addrlist); | ||
347 | } | ||
348 | } | ||
349 | |||
350 | read_unlock(&in6_dev->lock); | ||
351 | read_unlock(&addrconf_lock); | ||
352 | } | ||
353 | |||
354 | /* Initialize a sockaddr_storage from in incoming skb. */ | ||
355 | static void sctp_v6_from_skb(union sctp_addr *addr,struct sk_buff *skb, | ||
356 | int is_saddr) | ||
357 | { | ||
358 | void *from; | ||
359 | __u16 *port; | ||
360 | struct sctphdr *sh; | ||
361 | |||
362 | port = &addr->v6.sin6_port; | ||
363 | addr->v6.sin6_family = AF_INET6; | ||
364 | addr->v6.sin6_flowinfo = 0; /* FIXME */ | ||
365 | addr->v6.sin6_scope_id = ((struct inet6_skb_parm *)skb->cb)->iif; | ||
366 | |||
367 | sh = (struct sctphdr *) skb->h.raw; | ||
368 | if (is_saddr) { | ||
369 | *port = ntohs(sh->source); | ||
370 | from = &skb->nh.ipv6h->saddr; | ||
371 | } else { | ||
372 | *port = ntohs(sh->dest); | ||
373 | from = &skb->nh.ipv6h->daddr; | ||
374 | } | ||
375 | ipv6_addr_copy(&addr->v6.sin6_addr, from); | ||
376 | } | ||
377 | |||
378 | /* Initialize an sctp_addr from a socket. */ | ||
379 | static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk) | ||
380 | { | ||
381 | addr->v6.sin6_family = AF_INET6; | ||
382 | addr->v6.sin6_port = inet_sk(sk)->num; | ||
383 | addr->v6.sin6_addr = inet6_sk(sk)->rcv_saddr; | ||
384 | } | ||
385 | |||
386 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | ||
387 | static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | ||
388 | { | ||
389 | if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) { | ||
390 | inet6_sk(sk)->rcv_saddr.s6_addr32[0] = 0; | ||
391 | inet6_sk(sk)->rcv_saddr.s6_addr32[1] = 0; | ||
392 | inet6_sk(sk)->rcv_saddr.s6_addr32[2] = htonl(0x0000ffff); | ||
393 | inet6_sk(sk)->rcv_saddr.s6_addr32[3] = | ||
394 | addr->v4.sin_addr.s_addr; | ||
395 | } else { | ||
396 | inet6_sk(sk)->rcv_saddr = addr->v6.sin6_addr; | ||
397 | } | ||
398 | } | ||
399 | |||
400 | /* Initialize sk->sk_daddr from sctp_addr. */ | ||
401 | static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | ||
402 | { | ||
403 | if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) { | ||
404 | inet6_sk(sk)->daddr.s6_addr32[0] = 0; | ||
405 | inet6_sk(sk)->daddr.s6_addr32[1] = 0; | ||
406 | inet6_sk(sk)->daddr.s6_addr32[2] = htonl(0x0000ffff); | ||
407 | inet6_sk(sk)->daddr.s6_addr32[3] = addr->v4.sin_addr.s_addr; | ||
408 | } else { | ||
409 | inet6_sk(sk)->daddr = addr->v6.sin6_addr; | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* Initialize a sctp_addr from an address parameter. */ | ||
414 | static void sctp_v6_from_addr_param(union sctp_addr *addr, | ||
415 | union sctp_addr_param *param, | ||
416 | __u16 port, int iif) | ||
417 | { | ||
418 | addr->v6.sin6_family = AF_INET6; | ||
419 | addr->v6.sin6_port = port; | ||
420 | addr->v6.sin6_flowinfo = 0; /* BUG */ | ||
421 | ipv6_addr_copy(&addr->v6.sin6_addr, ¶m->v6.addr); | ||
422 | addr->v6.sin6_scope_id = iif; | ||
423 | } | ||
424 | |||
425 | /* Initialize an address parameter from a sctp_addr and return the length | ||
426 | * of the address parameter. | ||
427 | */ | ||
428 | static int sctp_v6_to_addr_param(const union sctp_addr *addr, | ||
429 | union sctp_addr_param *param) | ||
430 | { | ||
431 | int length = sizeof(sctp_ipv6addr_param_t); | ||
432 | |||
433 | param->v6.param_hdr.type = SCTP_PARAM_IPV6_ADDRESS; | ||
434 | param->v6.param_hdr.length = ntohs(length); | ||
435 | ipv6_addr_copy(¶m->v6.addr, &addr->v6.sin6_addr); | ||
436 | |||
437 | return length; | ||
438 | } | ||
439 | |||
440 | /* Initialize a sctp_addr from a dst_entry. */ | ||
441 | static void sctp_v6_dst_saddr(union sctp_addr *addr, struct dst_entry *dst, | ||
442 | unsigned short port) | ||
443 | { | ||
444 | struct rt6_info *rt = (struct rt6_info *)dst; | ||
445 | addr->sa.sa_family = AF_INET6; | ||
446 | addr->v6.sin6_port = port; | ||
447 | ipv6_addr_copy(&addr->v6.sin6_addr, &rt->rt6i_src.addr); | ||
448 | } | ||
449 | |||
450 | /* Compare addresses exactly. | ||
451 | * v4-mapped-v6 is also in consideration. | ||
452 | */ | ||
453 | static int sctp_v6_cmp_addr(const union sctp_addr *addr1, | ||
454 | const union sctp_addr *addr2) | ||
455 | { | ||
456 | if (addr1->sa.sa_family != addr2->sa.sa_family) { | ||
457 | if (addr1->sa.sa_family == AF_INET && | ||
458 | addr2->sa.sa_family == AF_INET6 && | ||
459 | IPV6_ADDR_MAPPED == ipv6_addr_type(&addr2->v6.sin6_addr)) { | ||
460 | if (addr2->v6.sin6_port == addr1->v4.sin_port && | ||
461 | addr2->v6.sin6_addr.s6_addr32[3] == | ||
462 | addr1->v4.sin_addr.s_addr) | ||
463 | return 1; | ||
464 | } | ||
465 | if (addr2->sa.sa_family == AF_INET && | ||
466 | addr1->sa.sa_family == AF_INET6 && | ||
467 | IPV6_ADDR_MAPPED == ipv6_addr_type(&addr1->v6.sin6_addr)) { | ||
468 | if (addr1->v6.sin6_port == addr2->v4.sin_port && | ||
469 | addr1->v6.sin6_addr.s6_addr32[3] == | ||
470 | addr2->v4.sin_addr.s_addr) | ||
471 | return 1; | ||
472 | } | ||
473 | return 0; | ||
474 | } | ||
475 | if (!ipv6_addr_equal(&addr1->v6.sin6_addr, &addr2->v6.sin6_addr)) | ||
476 | return 0; | ||
477 | /* If this is a linklocal address, compare the scope_id. */ | ||
478 | if (ipv6_addr_type(&addr1->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) { | ||
479 | if (addr1->v6.sin6_scope_id && addr2->v6.sin6_scope_id && | ||
480 | (addr1->v6.sin6_scope_id != addr2->v6.sin6_scope_id)) { | ||
481 | return 0; | ||
482 | } | ||
483 | } | ||
484 | |||
485 | return 1; | ||
486 | } | ||
487 | |||
488 | /* Initialize addr struct to INADDR_ANY. */ | ||
489 | static void sctp_v6_inaddr_any(union sctp_addr *addr, unsigned short port) | ||
490 | { | ||
491 | memset(addr, 0x00, sizeof(union sctp_addr)); | ||
492 | addr->v6.sin6_family = AF_INET6; | ||
493 | addr->v6.sin6_port = port; | ||
494 | } | ||
495 | |||
496 | /* Is this a wildcard address? */ | ||
497 | static int sctp_v6_is_any(const union sctp_addr *addr) | ||
498 | { | ||
499 | int type; | ||
500 | type = ipv6_addr_type((struct in6_addr *)&addr->v6.sin6_addr); | ||
501 | return IPV6_ADDR_ANY == type; | ||
502 | } | ||
503 | |||
504 | /* Should this be available for binding? */ | ||
505 | static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp) | ||
506 | { | ||
507 | int type; | ||
508 | struct in6_addr *in6 = (struct in6_addr *)&addr->v6.sin6_addr; | ||
509 | |||
510 | type = ipv6_addr_type(in6); | ||
511 | if (IPV6_ADDR_ANY == type) | ||
512 | return 1; | ||
513 | if (type == IPV6_ADDR_MAPPED) { | ||
514 | if (sp && !sp->v4mapped) | ||
515 | return 0; | ||
516 | if (sp && ipv6_only_sock(sctp_opt2sk(sp))) | ||
517 | return 0; | ||
518 | sctp_v6_map_v4(addr); | ||
519 | return sctp_get_af_specific(AF_INET)->available(addr, sp); | ||
520 | } | ||
521 | if (!(type & IPV6_ADDR_UNICAST)) | ||
522 | return 0; | ||
523 | |||
524 | return ipv6_chk_addr(in6, NULL, 0); | ||
525 | } | ||
526 | |||
527 | /* This function checks if the address is a valid address to be used for | ||
528 | * SCTP. | ||
529 | * | ||
530 | * Output: | ||
531 | * Return 0 - If the address is a non-unicast or an illegal address. | ||
532 | * Return 1 - If the address is a unicast. | ||
533 | */ | ||
534 | static int sctp_v6_addr_valid(union sctp_addr *addr, struct sctp_sock *sp) | ||
535 | { | ||
536 | int ret = ipv6_addr_type(&addr->v6.sin6_addr); | ||
537 | |||
538 | /* Support v4-mapped-v6 address. */ | ||
539 | if (ret == IPV6_ADDR_MAPPED) { | ||
540 | /* Note: This routine is used in input, so v4-mapped-v6 | ||
541 | * are disallowed here when there is no sctp_sock. | ||
542 | */ | ||
543 | if (!sp || !sp->v4mapped) | ||
544 | return 0; | ||
545 | if (sp && ipv6_only_sock(sctp_opt2sk(sp))) | ||
546 | return 0; | ||
547 | sctp_v6_map_v4(addr); | ||
548 | return sctp_get_af_specific(AF_INET)->addr_valid(addr, sp); | ||
549 | } | ||
550 | |||
551 | /* Is this a non-unicast address */ | ||
552 | if (!(ret & IPV6_ADDR_UNICAST)) | ||
553 | return 0; | ||
554 | |||
555 | return 1; | ||
556 | } | ||
557 | |||
558 | /* What is the scope of 'addr'? */ | ||
559 | static sctp_scope_t sctp_v6_scope(union sctp_addr *addr) | ||
560 | { | ||
561 | int v6scope; | ||
562 | sctp_scope_t retval; | ||
563 | |||
564 | /* The IPv6 scope is really a set of bit fields. | ||
565 | * See IFA_* in <net/if_inet6.h>. Map to a generic SCTP scope. | ||
566 | */ | ||
567 | |||
568 | v6scope = ipv6_addr_scope(&addr->v6.sin6_addr); | ||
569 | switch (v6scope) { | ||
570 | case IFA_HOST: | ||
571 | retval = SCTP_SCOPE_LOOPBACK; | ||
572 | break; | ||
573 | case IFA_LINK: | ||
574 | retval = SCTP_SCOPE_LINK; | ||
575 | break; | ||
576 | case IFA_SITE: | ||
577 | retval = SCTP_SCOPE_PRIVATE; | ||
578 | break; | ||
579 | default: | ||
580 | retval = SCTP_SCOPE_GLOBAL; | ||
581 | break; | ||
582 | }; | ||
583 | |||
584 | return retval; | ||
585 | } | ||
586 | |||
587 | /* Create and initialize a new sk for the socket to be returned by accept(). */ | ||
588 | static struct sock *sctp_v6_create_accept_sk(struct sock *sk, | ||
589 | struct sctp_association *asoc) | ||
590 | { | ||
591 | struct inet_sock *inet = inet_sk(sk); | ||
592 | struct sock *newsk; | ||
593 | struct inet_sock *newinet; | ||
594 | struct ipv6_pinfo *newnp, *np = inet6_sk(sk); | ||
595 | struct sctp6_sock *newsctp6sk; | ||
596 | |||
597 | newsk = sk_alloc(PF_INET6, GFP_KERNEL, sk->sk_prot, 1); | ||
598 | if (!newsk) | ||
599 | goto out; | ||
600 | |||
601 | sock_init_data(NULL, newsk); | ||
602 | |||
603 | newsk->sk_type = SOCK_STREAM; | ||
604 | |||
605 | newsk->sk_prot = sk->sk_prot; | ||
606 | newsk->sk_no_check = sk->sk_no_check; | ||
607 | newsk->sk_reuse = sk->sk_reuse; | ||
608 | |||
609 | newsk->sk_destruct = inet_sock_destruct; | ||
610 | newsk->sk_family = PF_INET6; | ||
611 | newsk->sk_protocol = IPPROTO_SCTP; | ||
612 | newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; | ||
613 | newsk->sk_shutdown = sk->sk_shutdown; | ||
614 | sock_reset_flag(sk, SOCK_ZAPPED); | ||
615 | |||
616 | newsctp6sk = (struct sctp6_sock *)newsk; | ||
617 | inet_sk(newsk)->pinet6 = &newsctp6sk->inet6; | ||
618 | |||
619 | newinet = inet_sk(newsk); | ||
620 | newnp = inet6_sk(newsk); | ||
621 | |||
622 | memcpy(newnp, np, sizeof(struct ipv6_pinfo)); | ||
623 | |||
624 | /* Initialize sk's sport, dport, rcv_saddr and daddr for getsockname() | ||
625 | * and getpeername(). | ||
626 | */ | ||
627 | newinet->sport = inet->sport; | ||
628 | newnp->saddr = np->saddr; | ||
629 | newnp->rcv_saddr = np->rcv_saddr; | ||
630 | newinet->dport = htons(asoc->peer.port); | ||
631 | sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk); | ||
632 | |||
633 | /* Init the ipv4 part of the socket since we can have sockets | ||
634 | * using v6 API for ipv4. | ||
635 | */ | ||
636 | newinet->uc_ttl = -1; | ||
637 | newinet->mc_loop = 1; | ||
638 | newinet->mc_ttl = 1; | ||
639 | newinet->mc_index = 0; | ||
640 | newinet->mc_list = NULL; | ||
641 | |||
642 | if (ipv4_config.no_pmtu_disc) | ||
643 | newinet->pmtudisc = IP_PMTUDISC_DONT; | ||
644 | else | ||
645 | newinet->pmtudisc = IP_PMTUDISC_WANT; | ||
646 | |||
647 | #ifdef INET_REFCNT_DEBUG | ||
648 | atomic_inc(&inet6_sock_nr); | ||
649 | atomic_inc(&inet_sock_nr); | ||
650 | #endif | ||
651 | |||
652 | if (newsk->sk_prot->init(newsk)) { | ||
653 | sk_common_release(newsk); | ||
654 | newsk = NULL; | ||
655 | } | ||
656 | |||
657 | out: | ||
658 | return newsk; | ||
659 | } | ||
660 | |||
661 | /* Map v4 address to mapped v6 address */ | ||
662 | static void sctp_v6_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr) | ||
663 | { | ||
664 | if (sp->v4mapped && AF_INET == addr->sa.sa_family) | ||
665 | sctp_v4_map_v6(addr); | ||
666 | } | ||
667 | |||
668 | /* Where did this skb come from? */ | ||
669 | static int sctp_v6_skb_iif(const struct sk_buff *skb) | ||
670 | { | ||
671 | struct inet6_skb_parm *opt = (struct inet6_skb_parm *) skb->cb; | ||
672 | return opt->iif; | ||
673 | } | ||
674 | |||
675 | /* Was this packet marked by Explicit Congestion Notification? */ | ||
676 | static int sctp_v6_is_ce(const struct sk_buff *skb) | ||
677 | { | ||
678 | return *((__u32 *)(skb->nh.ipv6h)) & htonl(1<<20); | ||
679 | } | ||
680 | |||
681 | /* Dump the v6 addr to the seq file. */ | ||
682 | static void sctp_v6_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | ||
683 | { | ||
684 | seq_printf(seq, "%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", | ||
685 | NIP6(addr->v6.sin6_addr)); | ||
686 | } | ||
687 | |||
688 | /* Initialize a PF_INET6 socket msg_name. */ | ||
689 | static void sctp_inet6_msgname(char *msgname, int *addr_len) | ||
690 | { | ||
691 | struct sockaddr_in6 *sin6; | ||
692 | |||
693 | sin6 = (struct sockaddr_in6 *)msgname; | ||
694 | sin6->sin6_family = AF_INET6; | ||
695 | sin6->sin6_flowinfo = 0; | ||
696 | sin6->sin6_scope_id = 0; /*FIXME */ | ||
697 | *addr_len = sizeof(struct sockaddr_in6); | ||
698 | } | ||
699 | |||
700 | /* Initialize a PF_INET msgname from a ulpevent. */ | ||
701 | static void sctp_inet6_event_msgname(struct sctp_ulpevent *event, | ||
702 | char *msgname, int *addrlen) | ||
703 | { | ||
704 | struct sockaddr_in6 *sin6, *sin6from; | ||
705 | |||
706 | if (msgname) { | ||
707 | union sctp_addr *addr; | ||
708 | struct sctp_association *asoc; | ||
709 | |||
710 | asoc = event->asoc; | ||
711 | sctp_inet6_msgname(msgname, addrlen); | ||
712 | sin6 = (struct sockaddr_in6 *)msgname; | ||
713 | sin6->sin6_port = htons(asoc->peer.port); | ||
714 | addr = &asoc->peer.primary_addr; | ||
715 | |||
716 | /* Note: If we go to a common v6 format, this code | ||
717 | * will change. | ||
718 | */ | ||
719 | |||
720 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | ||
721 | if (sctp_sk(asoc->base.sk)->v4mapped && | ||
722 | AF_INET == addr->sa.sa_family) { | ||
723 | sctp_v4_map_v6((union sctp_addr *)sin6); | ||
724 | sin6->sin6_addr.s6_addr32[3] = | ||
725 | addr->v4.sin_addr.s_addr; | ||
726 | return; | ||
727 | } | ||
728 | |||
729 | sin6from = &asoc->peer.primary_addr.v6; | ||
730 | ipv6_addr_copy(&sin6->sin6_addr, &sin6from->sin6_addr); | ||
731 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) | ||
732 | sin6->sin6_scope_id = sin6from->sin6_scope_id; | ||
733 | } | ||
734 | } | ||
735 | |||
736 | /* Initialize a msg_name from an inbound skb. */ | ||
737 | static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname, | ||
738 | int *addr_len) | ||
739 | { | ||
740 | struct sctphdr *sh; | ||
741 | struct sockaddr_in6 *sin6; | ||
742 | |||
743 | if (msgname) { | ||
744 | sctp_inet6_msgname(msgname, addr_len); | ||
745 | sin6 = (struct sockaddr_in6 *)msgname; | ||
746 | sh = (struct sctphdr *)skb->h.raw; | ||
747 | sin6->sin6_port = sh->source; | ||
748 | |||
749 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | ||
750 | if (sctp_sk(skb->sk)->v4mapped && | ||
751 | skb->nh.iph->version == 4) { | ||
752 | sctp_v4_map_v6((union sctp_addr *)sin6); | ||
753 | sin6->sin6_addr.s6_addr32[3] = skb->nh.iph->saddr; | ||
754 | return; | ||
755 | } | ||
756 | |||
757 | /* Otherwise, just copy the v6 address. */ | ||
758 | ipv6_addr_copy(&sin6->sin6_addr, &skb->nh.ipv6h->saddr); | ||
759 | if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) { | ||
760 | struct sctp_ulpevent *ev = sctp_skb2event(skb); | ||
761 | sin6->sin6_scope_id = ev->iif; | ||
762 | } | ||
763 | } | ||
764 | } | ||
765 | |||
766 | /* Do we support this AF? */ | ||
767 | static int sctp_inet6_af_supported(sa_family_t family, struct sctp_sock *sp) | ||
768 | { | ||
769 | switch (family) { | ||
770 | case AF_INET6: | ||
771 | return 1; | ||
772 | /* v4-mapped-v6 addresses */ | ||
773 | case AF_INET: | ||
774 | if (!__ipv6_only_sock(sctp_opt2sk(sp)) && sp->v4mapped) | ||
775 | return 1; | ||
776 | default: | ||
777 | return 0; | ||
778 | } | ||
779 | } | ||
780 | |||
781 | /* Address matching with wildcards allowed. This extra level | ||
782 | * of indirection lets us choose whether a PF_INET6 should | ||
783 | * disallow any v4 addresses if we so choose. | ||
784 | */ | ||
785 | static int sctp_inet6_cmp_addr(const union sctp_addr *addr1, | ||
786 | const union sctp_addr *addr2, | ||
787 | struct sctp_sock *opt) | ||
788 | { | ||
789 | struct sctp_af *af1, *af2; | ||
790 | |||
791 | af1 = sctp_get_af_specific(addr1->sa.sa_family); | ||
792 | af2 = sctp_get_af_specific(addr2->sa.sa_family); | ||
793 | |||
794 | if (!af1 || !af2) | ||
795 | return 0; | ||
796 | /* Today, wildcard AF_INET/AF_INET6. */ | ||
797 | if (sctp_is_any(addr1) || sctp_is_any(addr2)) | ||
798 | return 1; | ||
799 | |||
800 | if (addr1->sa.sa_family != addr2->sa.sa_family) | ||
801 | return 0; | ||
802 | |||
803 | return af1->cmp_addr(addr1, addr2); | ||
804 | } | ||
805 | |||
806 | /* Verify that the provided sockaddr looks bindable. Common verification, | ||
807 | * has already been taken care of. | ||
808 | */ | ||
809 | static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | ||
810 | { | ||
811 | struct sctp_af *af; | ||
812 | |||
813 | /* ASSERT: address family has already been verified. */ | ||
814 | if (addr->sa.sa_family != AF_INET6) | ||
815 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
816 | else { | ||
817 | struct sock *sk; | ||
818 | int type = ipv6_addr_type(&addr->v6.sin6_addr); | ||
819 | sk = sctp_opt2sk(opt); | ||
820 | if (type & IPV6_ADDR_LINKLOCAL) { | ||
821 | /* Note: Behavior similar to af_inet6.c: | ||
822 | * 1) Overrides previous bound_dev_if | ||
823 | * 2) Destructive even if bind isn't successful. | ||
824 | */ | ||
825 | |||
826 | if (addr->v6.sin6_scope_id) | ||
827 | sk->sk_bound_dev_if = addr->v6.sin6_scope_id; | ||
828 | if (!sk->sk_bound_dev_if) | ||
829 | return 0; | ||
830 | } | ||
831 | af = opt->pf->af; | ||
832 | } | ||
833 | return af->available(addr, opt); | ||
834 | } | ||
835 | |||
836 | /* Verify that the provided sockaddr looks bindable. Common verification, | ||
837 | * has already been taken care of. | ||
838 | */ | ||
839 | static int sctp_inet6_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | ||
840 | { | ||
841 | struct sctp_af *af = NULL; | ||
842 | |||
843 | /* ASSERT: address family has already been verified. */ | ||
844 | if (addr->sa.sa_family != AF_INET6) | ||
845 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
846 | else { | ||
847 | struct sock *sk; | ||
848 | int type = ipv6_addr_type(&addr->v6.sin6_addr); | ||
849 | sk = sctp_opt2sk(opt); | ||
850 | if (type & IPV6_ADDR_LINKLOCAL) { | ||
851 | /* Note: Behavior similar to af_inet6.c: | ||
852 | * 1) Overrides previous bound_dev_if | ||
853 | * 2) Destructive even if bind isn't successful. | ||
854 | */ | ||
855 | |||
856 | if (addr->v6.sin6_scope_id) | ||
857 | sk->sk_bound_dev_if = addr->v6.sin6_scope_id; | ||
858 | if (!sk->sk_bound_dev_if) | ||
859 | return 0; | ||
860 | } | ||
861 | af = opt->pf->af; | ||
862 | } | ||
863 | |||
864 | return af != NULL; | ||
865 | } | ||
866 | |||
867 | /* Fill in Supported Address Type information for INIT and INIT-ACK | ||
868 | * chunks. Note: In the future, we may want to look at sock options | ||
869 | * to determine whether a PF_INET6 socket really wants to have IPV4 | ||
870 | * addresses. | ||
871 | * Returns number of addresses supported. | ||
872 | */ | ||
873 | static int sctp_inet6_supported_addrs(const struct sctp_sock *opt, | ||
874 | __u16 *types) | ||
875 | { | ||
876 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | ||
877 | types[1] = SCTP_PARAM_IPV6_ADDRESS; | ||
878 | return 2; | ||
879 | } | ||
880 | |||
881 | static struct proto_ops inet6_seqpacket_ops = { | ||
882 | .family = PF_INET6, | ||
883 | .owner = THIS_MODULE, | ||
884 | .release = inet6_release, | ||
885 | .bind = inet6_bind, | ||
886 | .connect = inet_dgram_connect, | ||
887 | .socketpair = sock_no_socketpair, | ||
888 | .accept = inet_accept, | ||
889 | .getname = inet6_getname, | ||
890 | .poll = sctp_poll, | ||
891 | .ioctl = inet6_ioctl, | ||
892 | .listen = sctp_inet_listen, | ||
893 | .shutdown = inet_shutdown, | ||
894 | .setsockopt = sock_common_setsockopt, | ||
895 | .getsockopt = sock_common_getsockopt, | ||
896 | .sendmsg = inet_sendmsg, | ||
897 | .recvmsg = sock_common_recvmsg, | ||
898 | .mmap = sock_no_mmap, | ||
899 | }; | ||
900 | |||
901 | static struct inet_protosw sctpv6_seqpacket_protosw = { | ||
902 | .type = SOCK_SEQPACKET, | ||
903 | .protocol = IPPROTO_SCTP, | ||
904 | .prot = &sctpv6_prot, | ||
905 | .ops = &inet6_seqpacket_ops, | ||
906 | .capability = -1, | ||
907 | .no_check = 0, | ||
908 | .flags = SCTP_PROTOSW_FLAG | ||
909 | }; | ||
910 | static struct inet_protosw sctpv6_stream_protosw = { | ||
911 | .type = SOCK_STREAM, | ||
912 | .protocol = IPPROTO_SCTP, | ||
913 | .prot = &sctpv6_prot, | ||
914 | .ops = &inet6_seqpacket_ops, | ||
915 | .capability = -1, | ||
916 | .no_check = 0, | ||
917 | .flags = SCTP_PROTOSW_FLAG, | ||
918 | }; | ||
919 | |||
920 | static int sctp6_rcv(struct sk_buff **pskb, unsigned int *nhoffp) | ||
921 | { | ||
922 | return sctp_rcv(*pskb) ? -1 : 0; | ||
923 | } | ||
924 | |||
925 | static struct inet6_protocol sctpv6_protocol = { | ||
926 | .handler = sctp6_rcv, | ||
927 | .err_handler = sctp_v6_err, | ||
928 | .flags = INET6_PROTO_NOPOLICY | INET6_PROTO_FINAL, | ||
929 | }; | ||
930 | |||
931 | static struct sctp_af sctp_ipv6_specific = { | ||
932 | .sctp_xmit = sctp_v6_xmit, | ||
933 | .setsockopt = ipv6_setsockopt, | ||
934 | .getsockopt = ipv6_getsockopt, | ||
935 | .get_dst = sctp_v6_get_dst, | ||
936 | .get_saddr = sctp_v6_get_saddr, | ||
937 | .copy_addrlist = sctp_v6_copy_addrlist, | ||
938 | .from_skb = sctp_v6_from_skb, | ||
939 | .from_sk = sctp_v6_from_sk, | ||
940 | .to_sk_saddr = sctp_v6_to_sk_saddr, | ||
941 | .to_sk_daddr = sctp_v6_to_sk_daddr, | ||
942 | .from_addr_param = sctp_v6_from_addr_param, | ||
943 | .to_addr_param = sctp_v6_to_addr_param, | ||
944 | .dst_saddr = sctp_v6_dst_saddr, | ||
945 | .cmp_addr = sctp_v6_cmp_addr, | ||
946 | .scope = sctp_v6_scope, | ||
947 | .addr_valid = sctp_v6_addr_valid, | ||
948 | .inaddr_any = sctp_v6_inaddr_any, | ||
949 | .is_any = sctp_v6_is_any, | ||
950 | .available = sctp_v6_available, | ||
951 | .skb_iif = sctp_v6_skb_iif, | ||
952 | .is_ce = sctp_v6_is_ce, | ||
953 | .seq_dump_addr = sctp_v6_seq_dump_addr, | ||
954 | .net_header_len = sizeof(struct ipv6hdr), | ||
955 | .sockaddr_len = sizeof(struct sockaddr_in6), | ||
956 | .sa_family = AF_INET6, | ||
957 | }; | ||
958 | |||
959 | static struct sctp_pf sctp_pf_inet6_specific = { | ||
960 | .event_msgname = sctp_inet6_event_msgname, | ||
961 | .skb_msgname = sctp_inet6_skb_msgname, | ||
962 | .af_supported = sctp_inet6_af_supported, | ||
963 | .cmp_addr = sctp_inet6_cmp_addr, | ||
964 | .bind_verify = sctp_inet6_bind_verify, | ||
965 | .send_verify = sctp_inet6_send_verify, | ||
966 | .supported_addrs = sctp_inet6_supported_addrs, | ||
967 | .create_accept_sk = sctp_v6_create_accept_sk, | ||
968 | .addr_v4map = sctp_v6_addr_v4map, | ||
969 | .af = &sctp_ipv6_specific, | ||
970 | }; | ||
971 | |||
972 | /* Initialize IPv6 support and register with inet6 stack. */ | ||
973 | int sctp_v6_init(void) | ||
974 | { | ||
975 | int rc = proto_register(&sctpv6_prot, 1); | ||
976 | |||
977 | if (rc) | ||
978 | goto out; | ||
979 | /* Register inet6 protocol. */ | ||
980 | rc = -EAGAIN; | ||
981 | if (inet6_add_protocol(&sctpv6_protocol, IPPROTO_SCTP) < 0) | ||
982 | goto out_unregister_sctp_proto; | ||
983 | |||
984 | /* Add SCTPv6(UDP and TCP style) to inetsw6 linked list. */ | ||
985 | inet6_register_protosw(&sctpv6_seqpacket_protosw); | ||
986 | inet6_register_protosw(&sctpv6_stream_protosw); | ||
987 | |||
988 | /* Register the SCTP specific PF_INET6 functions. */ | ||
989 | sctp_register_pf(&sctp_pf_inet6_specific, PF_INET6); | ||
990 | |||
991 | /* Register the SCTP specific AF_INET6 functions. */ | ||
992 | sctp_register_af(&sctp_ipv6_specific); | ||
993 | |||
994 | /* Register notifier for inet6 address additions/deletions. */ | ||
995 | register_inet6addr_notifier(&sctp_inet6addr_notifier); | ||
996 | rc = 0; | ||
997 | out: | ||
998 | return rc; | ||
999 | out_unregister_sctp_proto: | ||
1000 | proto_unregister(&sctpv6_prot); | ||
1001 | goto out; | ||
1002 | } | ||
1003 | |||
1004 | /* IPv6 specific exit support. */ | ||
1005 | void sctp_v6_exit(void) | ||
1006 | { | ||
1007 | list_del(&sctp_ipv6_specific.list); | ||
1008 | inet6_del_protocol(&sctpv6_protocol, IPPROTO_SCTP); | ||
1009 | inet6_unregister_protosw(&sctpv6_seqpacket_protosw); | ||
1010 | inet6_unregister_protosw(&sctpv6_stream_protosw); | ||
1011 | unregister_inet6addr_notifier(&sctp_inet6addr_notifier); | ||
1012 | proto_unregister(&sctpv6_prot); | ||
1013 | } | ||
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c new file mode 100644 index 000000000000..0781e5d509fd --- /dev/null +++ b/net/sctp/objcnt.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * | ||
4 | * This file is part of the SCTP kernel reference Implementation | ||
5 | * | ||
6 | * Support for memory object debugging. This allows one to monitor the | ||
7 | * object allocations/deallocations for types instrumented for this | ||
8 | * via the proc fs. | ||
9 | * | ||
10 | * The SCTP reference implementation is free software; | ||
11 | * you can redistribute it and/or modify it under the terms of | ||
12 | * the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * The SCTP reference implementation is distributed in the hope that it | ||
17 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
18 | * ************************ | ||
19 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
20 | * See the GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with GNU CC; see the file COPYING. If not, write to | ||
24 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
25 | * Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | * Please send any bug reports or fixes you make to the | ||
28 | * email address(es): | ||
29 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
30 | * | ||
31 | * Or submit a bug report through the following website: | ||
32 | * http://www.sf.net/projects/lksctp | ||
33 | * | ||
34 | * Written or modified by: | ||
35 | * Jon Grimm <jgrimm@us.ibm.com> | ||
36 | * | ||
37 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
38 | * be incorporated into the next SCTP release. | ||
39 | */ | ||
40 | |||
41 | #include <linux/kernel.h> | ||
42 | #include <net/sctp/sctp.h> | ||
43 | |||
44 | /* | ||
45 | * Global counters to count raw object allocation counts. | ||
46 | * To add new counters, choose a unique suffix for the variable | ||
47 | * name as the helper macros key off this suffix to make | ||
48 | * life easier for the programmer. | ||
49 | */ | ||
50 | |||
51 | SCTP_DBG_OBJCNT(sock); | ||
52 | SCTP_DBG_OBJCNT(ep); | ||
53 | SCTP_DBG_OBJCNT(transport); | ||
54 | SCTP_DBG_OBJCNT(assoc); | ||
55 | SCTP_DBG_OBJCNT(bind_addr); | ||
56 | SCTP_DBG_OBJCNT(bind_bucket); | ||
57 | SCTP_DBG_OBJCNT(chunk); | ||
58 | SCTP_DBG_OBJCNT(addr); | ||
59 | SCTP_DBG_OBJCNT(ssnmap); | ||
60 | SCTP_DBG_OBJCNT(datamsg); | ||
61 | |||
62 | /* An array to make it easy to pretty print the debug information | ||
63 | * to the proc fs. | ||
64 | */ | ||
65 | static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = { | ||
66 | SCTP_DBG_OBJCNT_ENTRY(sock), | ||
67 | SCTP_DBG_OBJCNT_ENTRY(ep), | ||
68 | SCTP_DBG_OBJCNT_ENTRY(assoc), | ||
69 | SCTP_DBG_OBJCNT_ENTRY(transport), | ||
70 | SCTP_DBG_OBJCNT_ENTRY(chunk), | ||
71 | SCTP_DBG_OBJCNT_ENTRY(bind_addr), | ||
72 | SCTP_DBG_OBJCNT_ENTRY(bind_bucket), | ||
73 | SCTP_DBG_OBJCNT_ENTRY(addr), | ||
74 | SCTP_DBG_OBJCNT_ENTRY(ssnmap), | ||
75 | SCTP_DBG_OBJCNT_ENTRY(datamsg), | ||
76 | }; | ||
77 | |||
78 | /* Callback from procfs to read out objcount information. | ||
79 | * Walk through the entries in the sctp_dbg_objcnt array, dumping | ||
80 | * the raw object counts for each monitored type. | ||
81 | * | ||
82 | * This code was modified from similar code in route.c | ||
83 | */ | ||
84 | static int sctp_dbg_objcnt_read(char *buffer, char **start, off_t offset, | ||
85 | int length, int *eof, void *data) | ||
86 | { | ||
87 | int len = 0; | ||
88 | off_t pos = 0; | ||
89 | int entries; | ||
90 | int i; | ||
91 | char temp[128]; | ||
92 | |||
93 | /* How many entries? */ | ||
94 | entries = ARRAY_SIZE(sctp_dbg_objcnt); | ||
95 | |||
96 | /* Walk the entries and print out the debug information | ||
97 | * for proc fs. | ||
98 | */ | ||
99 | for (i = 0; i < entries; i++) { | ||
100 | pos += 128; | ||
101 | |||
102 | /* Skip ahead. */ | ||
103 | if (pos <= offset) { | ||
104 | len = 0; | ||
105 | continue; | ||
106 | } | ||
107 | /* Print out each entry. */ | ||
108 | sprintf(temp, "%s: %d", | ||
109 | sctp_dbg_objcnt[i].label, | ||
110 | atomic_read(sctp_dbg_objcnt[i].counter)); | ||
111 | |||
112 | sprintf(buffer + len, "%-127s\n", temp); | ||
113 | len += 128; | ||
114 | if (pos >= offset+length) | ||
115 | goto done; | ||
116 | } | ||
117 | |||
118 | done: | ||
119 | *start = buffer + len - (pos - offset); | ||
120 | len = pos - offset; | ||
121 | if (len > length) | ||
122 | len = length; | ||
123 | |||
124 | return len; | ||
125 | } | ||
126 | |||
127 | /* Initialize the objcount in the proc filesystem. */ | ||
128 | void sctp_dbg_objcnt_init(void) | ||
129 | { | ||
130 | create_proc_read_entry("sctp_dbg_objcnt", 0, proc_net_sctp, | ||
131 | sctp_dbg_objcnt_read, NULL); | ||
132 | } | ||
133 | |||
134 | /* Cleanup the objcount entry in the proc filesystem. */ | ||
135 | void sctp_dbg_objcnt_exit(void) | ||
136 | { | ||
137 | remove_proc_entry("sctp_dbg_objcnt", proc_net_sctp); | ||
138 | } | ||
139 | |||
140 | |||
diff --git a/net/sctp/output.c b/net/sctp/output.c new file mode 100644 index 000000000000..9013f64f5219 --- /dev/null +++ b/net/sctp/output.c | |||
@@ -0,0 +1,646 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * | ||
6 | * This file is part of the SCTP kernel reference Implementation | ||
7 | * | ||
8 | * These functions handle output processing. | ||
9 | * | ||
10 | * The SCTP reference implementation is free software; | ||
11 | * you can redistribute it and/or modify it under the terms of | ||
12 | * the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * The SCTP reference implementation is distributed in the hope that it | ||
17 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
18 | * ************************ | ||
19 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
20 | * See the GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with GNU CC; see the file COPYING. If not, write to | ||
24 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
25 | * Boston, MA 02111-1307, USA. | ||
26 | * | ||
27 | * Please send any bug reports or fixes you make to the | ||
28 | * email address(es): | ||
29 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
30 | * | ||
31 | * Or submit a bug report through the following website: | ||
32 | * http://www.sf.net/projects/lksctp | ||
33 | * | ||
34 | * Written or modified by: | ||
35 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
36 | * Karl Knutson <karl@athena.chicago.il.us> | ||
37 | * Jon Grimm <jgrimm@austin.ibm.com> | ||
38 | * Sridhar Samudrala <sri@us.ibm.com> | ||
39 | * | ||
40 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
41 | * be incorporated into the next SCTP release. | ||
42 | */ | ||
43 | |||
44 | #include <linux/types.h> | ||
45 | #include <linux/kernel.h> | ||
46 | #include <linux/wait.h> | ||
47 | #include <linux/time.h> | ||
48 | #include <linux/ip.h> | ||
49 | #include <linux/ipv6.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <net/inet_ecn.h> | ||
52 | #include <net/icmp.h> | ||
53 | |||
54 | #ifndef TEST_FRAME | ||
55 | #include <net/tcp.h> | ||
56 | #endif /* TEST_FRAME (not defined) */ | ||
57 | |||
58 | #include <linux/socket.h> /* for sa_family_t */ | ||
59 | #include <net/sock.h> | ||
60 | |||
61 | #include <net/sctp/sctp.h> | ||
62 | #include <net/sctp/sm.h> | ||
63 | |||
64 | /* Forward declarations for private helpers. */ | ||
65 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | ||
66 | struct sctp_chunk *chunk); | ||
67 | |||
68 | /* Config a packet. | ||
69 | * This appears to be a followup set of initializations. | ||
70 | */ | ||
71 | struct sctp_packet *sctp_packet_config(struct sctp_packet *packet, | ||
72 | __u32 vtag, int ecn_capable) | ||
73 | { | ||
74 | struct sctp_chunk *chunk = NULL; | ||
75 | |||
76 | SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __FUNCTION__, | ||
77 | packet, vtag); | ||
78 | |||
79 | packet->vtag = vtag; | ||
80 | packet->has_cookie_echo = 0; | ||
81 | packet->has_sack = 0; | ||
82 | packet->ipfragok = 0; | ||
83 | |||
84 | if (ecn_capable && sctp_packet_empty(packet)) { | ||
85 | chunk = sctp_get_ecne_prepend(packet->transport->asoc); | ||
86 | |||
87 | /* If there a is a prepend chunk stick it on the list before | ||
88 | * any other chunks get appended. | ||
89 | */ | ||
90 | if (chunk) | ||
91 | sctp_packet_append_chunk(packet, chunk); | ||
92 | } | ||
93 | |||
94 | return packet; | ||
95 | } | ||
96 | |||
97 | /* Initialize the packet structure. */ | ||
98 | struct sctp_packet *sctp_packet_init(struct sctp_packet *packet, | ||
99 | struct sctp_transport *transport, | ||
100 | __u16 sport, __u16 dport) | ||
101 | { | ||
102 | struct sctp_association *asoc = transport->asoc; | ||
103 | size_t overhead; | ||
104 | |||
105 | SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __FUNCTION__, | ||
106 | packet, transport); | ||
107 | |||
108 | packet->transport = transport; | ||
109 | packet->source_port = sport; | ||
110 | packet->destination_port = dport; | ||
111 | skb_queue_head_init(&packet->chunks); | ||
112 | if (asoc) { | ||
113 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
114 | overhead = sp->pf->af->net_header_len; | ||
115 | } else { | ||
116 | overhead = sizeof(struct ipv6hdr); | ||
117 | } | ||
118 | overhead += sizeof(struct sctphdr); | ||
119 | packet->overhead = overhead; | ||
120 | packet->size = overhead; | ||
121 | packet->vtag = 0; | ||
122 | packet->has_cookie_echo = 0; | ||
123 | packet->has_sack = 0; | ||
124 | packet->ipfragok = 0; | ||
125 | packet->malloced = 0; | ||
126 | return packet; | ||
127 | } | ||
128 | |||
129 | /* Free a packet. */ | ||
130 | void sctp_packet_free(struct sctp_packet *packet) | ||
131 | { | ||
132 | struct sctp_chunk *chunk; | ||
133 | |||
134 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | ||
135 | |||
136 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) | ||
137 | sctp_chunk_free(chunk); | ||
138 | |||
139 | if (packet->malloced) | ||
140 | kfree(packet); | ||
141 | } | ||
142 | |||
143 | /* This routine tries to append the chunk to the offered packet. If adding | ||
144 | * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk | ||
145 | * is not present in the packet, it transmits the input packet. | ||
146 | * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long | ||
147 | * as it can fit in the packet, but any more data that does not fit in this | ||
148 | * packet can be sent only after receiving the COOKIE_ACK. | ||
149 | */ | ||
150 | sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet, | ||
151 | struct sctp_chunk *chunk) | ||
152 | { | ||
153 | sctp_xmit_t retval; | ||
154 | int error = 0; | ||
155 | |||
156 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, | ||
157 | packet, chunk); | ||
158 | |||
159 | switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) { | ||
160 | case SCTP_XMIT_PMTU_FULL: | ||
161 | if (!packet->has_cookie_echo) { | ||
162 | error = sctp_packet_transmit(packet); | ||
163 | if (error < 0) | ||
164 | chunk->skb->sk->sk_err = -error; | ||
165 | |||
166 | /* If we have an empty packet, then we can NOT ever | ||
167 | * return PMTU_FULL. | ||
168 | */ | ||
169 | retval = sctp_packet_append_chunk(packet, chunk); | ||
170 | } | ||
171 | break; | ||
172 | |||
173 | case SCTP_XMIT_RWND_FULL: | ||
174 | case SCTP_XMIT_OK: | ||
175 | case SCTP_XMIT_NAGLE_DELAY: | ||
176 | break; | ||
177 | }; | ||
178 | |||
179 | return retval; | ||
180 | } | ||
181 | |||
182 | /* Try to bundle a SACK with the packet. */ | ||
183 | static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt, | ||
184 | struct sctp_chunk *chunk) | ||
185 | { | ||
186 | sctp_xmit_t retval = SCTP_XMIT_OK; | ||
187 | |||
188 | /* If sending DATA and haven't aleady bundled a SACK, try to | ||
189 | * bundle one in to the packet. | ||
190 | */ | ||
191 | if (sctp_chunk_is_data(chunk) && !pkt->has_sack && | ||
192 | !pkt->has_cookie_echo) { | ||
193 | struct sctp_association *asoc; | ||
194 | asoc = pkt->transport->asoc; | ||
195 | |||
196 | if (asoc->a_rwnd > asoc->rwnd) { | ||
197 | struct sctp_chunk *sack; | ||
198 | asoc->a_rwnd = asoc->rwnd; | ||
199 | sack = sctp_make_sack(asoc); | ||
200 | if (sack) { | ||
201 | struct timer_list *timer; | ||
202 | retval = sctp_packet_append_chunk(pkt, sack); | ||
203 | asoc->peer.sack_needed = 0; | ||
204 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK]; | ||
205 | if (timer_pending(timer) && del_timer(timer)) | ||
206 | sctp_association_put(asoc); | ||
207 | } | ||
208 | } | ||
209 | } | ||
210 | return retval; | ||
211 | } | ||
212 | |||
213 | /* Append a chunk to the offered packet reporting back any inability to do | ||
214 | * so. | ||
215 | */ | ||
216 | sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet, | ||
217 | struct sctp_chunk *chunk) | ||
218 | { | ||
219 | sctp_xmit_t retval = SCTP_XMIT_OK; | ||
220 | __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length)); | ||
221 | size_t psize; | ||
222 | size_t pmtu; | ||
223 | int too_big; | ||
224 | |||
225 | SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __FUNCTION__, packet, | ||
226 | chunk); | ||
227 | |||
228 | retval = sctp_packet_bundle_sack(packet, chunk); | ||
229 | psize = packet->size; | ||
230 | |||
231 | if (retval != SCTP_XMIT_OK) | ||
232 | goto finish; | ||
233 | |||
234 | pmtu = ((packet->transport->asoc) ? | ||
235 | (packet->transport->asoc->pmtu) : | ||
236 | (packet->transport->pmtu)); | ||
237 | |||
238 | too_big = (psize + chunk_len > pmtu); | ||
239 | |||
240 | /* Decide if we need to fragment or resubmit later. */ | ||
241 | if (too_big) { | ||
242 | /* Both control chunks and data chunks with TSNs are | ||
243 | * non-fragmentable. | ||
244 | */ | ||
245 | if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk)) { | ||
246 | /* We no longer do re-fragmentation. | ||
247 | * Just fragment at the IP layer, if we | ||
248 | * actually hit this condition | ||
249 | */ | ||
250 | packet->ipfragok = 1; | ||
251 | goto append; | ||
252 | |||
253 | } else { | ||
254 | retval = SCTP_XMIT_PMTU_FULL; | ||
255 | goto finish; | ||
256 | } | ||
257 | } | ||
258 | |||
259 | append: | ||
260 | /* We believe that this chunk is OK to add to the packet (as | ||
261 | * long as we have the cwnd for it). | ||
262 | */ | ||
263 | |||
264 | /* DATA is a special case since we must examine both rwnd and cwnd | ||
265 | * before we send DATA. | ||
266 | */ | ||
267 | if (sctp_chunk_is_data(chunk)) { | ||
268 | retval = sctp_packet_append_data(packet, chunk); | ||
269 | /* Disallow SACK bundling after DATA. */ | ||
270 | packet->has_sack = 1; | ||
271 | if (SCTP_XMIT_OK != retval) | ||
272 | goto finish; | ||
273 | } else if (SCTP_CID_COOKIE_ECHO == chunk->chunk_hdr->type) | ||
274 | packet->has_cookie_echo = 1; | ||
275 | else if (SCTP_CID_SACK == chunk->chunk_hdr->type) | ||
276 | packet->has_sack = 1; | ||
277 | |||
278 | /* It is OK to send this chunk. */ | ||
279 | __skb_queue_tail(&packet->chunks, (struct sk_buff *)chunk); | ||
280 | packet->size += chunk_len; | ||
281 | chunk->transport = packet->transport; | ||
282 | finish: | ||
283 | return retval; | ||
284 | } | ||
285 | |||
286 | /* All packets are sent to the network through this function from | ||
287 | * sctp_outq_tail(). | ||
288 | * | ||
289 | * The return value is a normal kernel error return value. | ||
290 | */ | ||
291 | int sctp_packet_transmit(struct sctp_packet *packet) | ||
292 | { | ||
293 | struct sctp_transport *tp = packet->transport; | ||
294 | struct sctp_association *asoc = tp->asoc; | ||
295 | struct sctphdr *sh; | ||
296 | __u32 crc32; | ||
297 | struct sk_buff *nskb; | ||
298 | struct sctp_chunk *chunk; | ||
299 | struct sock *sk; | ||
300 | int err = 0; | ||
301 | int padding; /* How much padding do we need? */ | ||
302 | __u8 has_data = 0; | ||
303 | struct dst_entry *dst; | ||
304 | |||
305 | SCTP_DEBUG_PRINTK("%s: packet:%p\n", __FUNCTION__, packet); | ||
306 | |||
307 | /* Do NOT generate a chunkless packet. */ | ||
308 | chunk = (struct sctp_chunk *)skb_peek(&packet->chunks); | ||
309 | if (unlikely(!chunk)) | ||
310 | return err; | ||
311 | |||
312 | /* Set up convenience variables... */ | ||
313 | sk = chunk->skb->sk; | ||
314 | |||
315 | /* Allocate the new skb. */ | ||
316 | nskb = dev_alloc_skb(packet->size); | ||
317 | if (!nskb) | ||
318 | goto nomem; | ||
319 | |||
320 | /* Make sure the outbound skb has enough header room reserved. */ | ||
321 | skb_reserve(nskb, packet->overhead); | ||
322 | |||
323 | /* Set the owning socket so that we know where to get the | ||
324 | * destination IP address. | ||
325 | */ | ||
326 | skb_set_owner_w(nskb, sk); | ||
327 | |||
328 | /* Build the SCTP header. */ | ||
329 | sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr)); | ||
330 | sh->source = htons(packet->source_port); | ||
331 | sh->dest = htons(packet->destination_port); | ||
332 | |||
333 | /* From 6.8 Adler-32 Checksum Calculation: | ||
334 | * After the packet is constructed (containing the SCTP common | ||
335 | * header and one or more control or DATA chunks), the | ||
336 | * transmitter shall: | ||
337 | * | ||
338 | * 1) Fill in the proper Verification Tag in the SCTP common | ||
339 | * header and initialize the checksum field to 0's. | ||
340 | */ | ||
341 | sh->vtag = htonl(packet->vtag); | ||
342 | sh->checksum = 0; | ||
343 | |||
344 | /* 2) Calculate the Adler-32 checksum of the whole packet, | ||
345 | * including the SCTP common header and all the | ||
346 | * chunks. | ||
347 | * | ||
348 | * Note: Adler-32 is no longer applicable, as has been replaced | ||
349 | * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>. | ||
350 | */ | ||
351 | crc32 = sctp_start_cksum((__u8 *)sh, sizeof(struct sctphdr)); | ||
352 | |||
353 | /** | ||
354 | * 6.10 Bundling | ||
355 | * | ||
356 | * An endpoint bundles chunks by simply including multiple | ||
357 | * chunks in one outbound SCTP packet. ... | ||
358 | */ | ||
359 | |||
360 | /** | ||
361 | * 3.2 Chunk Field Descriptions | ||
362 | * | ||
363 | * The total length of a chunk (including Type, Length and | ||
364 | * Value fields) MUST be a multiple of 4 bytes. If the length | ||
365 | * of the chunk is not a multiple of 4 bytes, the sender MUST | ||
366 | * pad the chunk with all zero bytes and this padding is not | ||
367 | * included in the chunk length field. The sender should | ||
368 | * never pad with more than 3 bytes. | ||
369 | * | ||
370 | * [This whole comment explains WORD_ROUND() below.] | ||
371 | */ | ||
372 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n"); | ||
373 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { | ||
374 | if (sctp_chunk_is_data(chunk)) { | ||
375 | |||
376 | if (!chunk->has_tsn) { | ||
377 | sctp_chunk_assign_ssn(chunk); | ||
378 | sctp_chunk_assign_tsn(chunk); | ||
379 | |||
380 | /* 6.3.1 C4) When data is in flight and when allowed | ||
381 | * by rule C5, a new RTT measurement MUST be made each | ||
382 | * round trip. Furthermore, new RTT measurements | ||
383 | * SHOULD be made no more than once per round-trip | ||
384 | * for a given destination transport address. | ||
385 | */ | ||
386 | |||
387 | if (!tp->rto_pending) { | ||
388 | chunk->rtt_in_progress = 1; | ||
389 | tp->rto_pending = 1; | ||
390 | } | ||
391 | } else | ||
392 | chunk->resent = 1; | ||
393 | |||
394 | chunk->sent_at = jiffies; | ||
395 | has_data = 1; | ||
396 | } | ||
397 | |||
398 | padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len; | ||
399 | if (padding) | ||
400 | memset(skb_put(chunk->skb, padding), 0, padding); | ||
401 | |||
402 | crc32 = sctp_update_copy_cksum(skb_put(nskb, chunk->skb->len), | ||
403 | chunk->skb->data, | ||
404 | chunk->skb->len, crc32); | ||
405 | |||
406 | SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n", | ||
407 | "*** Chunk", chunk, | ||
408 | sctp_cname(SCTP_ST_CHUNK( | ||
409 | chunk->chunk_hdr->type)), | ||
410 | chunk->has_tsn ? "TSN" : "No TSN", | ||
411 | chunk->has_tsn ? | ||
412 | ntohl(chunk->subh.data_hdr->tsn) : 0, | ||
413 | "length", ntohs(chunk->chunk_hdr->length), | ||
414 | "chunk->skb->len", chunk->skb->len, | ||
415 | "rtt_in_progress", chunk->rtt_in_progress); | ||
416 | |||
417 | /* | ||
418 | * If this is a control chunk, this is our last | ||
419 | * reference. Free data chunks after they've been | ||
420 | * acknowledged or have failed. | ||
421 | */ | ||
422 | if (!sctp_chunk_is_data(chunk)) | ||
423 | sctp_chunk_free(chunk); | ||
424 | } | ||
425 | |||
426 | /* Perform final transformation on checksum. */ | ||
427 | crc32 = sctp_end_cksum(crc32); | ||
428 | |||
429 | /* 3) Put the resultant value into the checksum field in the | ||
430 | * common header, and leave the rest of the bits unchanged. | ||
431 | */ | ||
432 | sh->checksum = htonl(crc32); | ||
433 | |||
434 | /* IP layer ECN support | ||
435 | * From RFC 2481 | ||
436 | * "The ECN-Capable Transport (ECT) bit would be set by the | ||
437 | * data sender to indicate that the end-points of the | ||
438 | * transport protocol are ECN-capable." | ||
439 | * | ||
440 | * Now setting the ECT bit all the time, as it should not cause | ||
441 | * any problems protocol-wise even if our peer ignores it. | ||
442 | * | ||
443 | * Note: The works for IPv6 layer checks this bit too later | ||
444 | * in transmission. See IP6_ECN_flow_xmit(). | ||
445 | */ | ||
446 | INET_ECN_xmit(nskb->sk); | ||
447 | |||
448 | /* Set up the IP options. */ | ||
449 | /* BUG: not implemented | ||
450 | * For v4 this all lives somewhere in sk->sk_opt... | ||
451 | */ | ||
452 | |||
453 | /* Dump that on IP! */ | ||
454 | if (asoc && asoc->peer.last_sent_to != tp) { | ||
455 | /* Considering the multiple CPU scenario, this is a | ||
456 | * "correcter" place for last_sent_to. --xguo | ||
457 | */ | ||
458 | asoc->peer.last_sent_to = tp; | ||
459 | } | ||
460 | |||
461 | if (has_data) { | ||
462 | struct timer_list *timer; | ||
463 | unsigned long timeout; | ||
464 | |||
465 | tp->last_time_used = jiffies; | ||
466 | |||
467 | /* Restart the AUTOCLOSE timer when sending data. */ | ||
468 | if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) { | ||
469 | timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | ||
470 | timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]; | ||
471 | |||
472 | if (!mod_timer(timer, jiffies + timeout)) | ||
473 | sctp_association_hold(asoc); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | dst = tp->dst; | ||
478 | /* The 'obsolete' field of dst is set to 2 when a dst is freed. */ | ||
479 | if (!dst || (dst->obsolete > 1)) { | ||
480 | dst_release(dst); | ||
481 | sctp_transport_route(tp, NULL, sctp_sk(sk)); | ||
482 | sctp_assoc_sync_pmtu(asoc); | ||
483 | } | ||
484 | |||
485 | nskb->dst = dst_clone(tp->dst); | ||
486 | if (!nskb->dst) | ||
487 | goto no_route; | ||
488 | |||
489 | SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n", | ||
490 | nskb->len); | ||
491 | |||
492 | (*tp->af_specific->sctp_xmit)(nskb, tp, packet->ipfragok); | ||
493 | |||
494 | out: | ||
495 | packet->size = packet->overhead; | ||
496 | return err; | ||
497 | no_route: | ||
498 | kfree_skb(nskb); | ||
499 | IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES); | ||
500 | |||
501 | /* FIXME: Returning the 'err' will effect all the associations | ||
502 | * associated with a socket, although only one of the paths of the | ||
503 | * association is unreachable. | ||
504 | * The real failure of a transport or association can be passed on | ||
505 | * to the user via notifications. So setting this error may not be | ||
506 | * required. | ||
507 | */ | ||
508 | /* err = -EHOSTUNREACH; */ | ||
509 | err: | ||
510 | /* Control chunks are unreliable so just drop them. DATA chunks | ||
511 | * will get resent or dropped later. | ||
512 | */ | ||
513 | |||
514 | while ((chunk = (struct sctp_chunk *)__skb_dequeue(&packet->chunks)) != NULL) { | ||
515 | if (!sctp_chunk_is_data(chunk)) | ||
516 | sctp_chunk_free(chunk); | ||
517 | } | ||
518 | goto out; | ||
519 | nomem: | ||
520 | err = -ENOMEM; | ||
521 | goto err; | ||
522 | } | ||
523 | |||
524 | /******************************************************************** | ||
525 | * 2nd Level Abstractions | ||
526 | ********************************************************************/ | ||
527 | |||
528 | /* This private function handles the specifics of appending DATA chunks. */ | ||
529 | static sctp_xmit_t sctp_packet_append_data(struct sctp_packet *packet, | ||
530 | struct sctp_chunk *chunk) | ||
531 | { | ||
532 | sctp_xmit_t retval = SCTP_XMIT_OK; | ||
533 | size_t datasize, rwnd, inflight; | ||
534 | struct sctp_transport *transport = packet->transport; | ||
535 | __u32 max_burst_bytes; | ||
536 | struct sctp_association *asoc = transport->asoc; | ||
537 | struct sctp_sock *sp = sctp_sk(asoc->base.sk); | ||
538 | struct sctp_outq *q = &asoc->outqueue; | ||
539 | |||
540 | /* RFC 2960 6.1 Transmission of DATA Chunks | ||
541 | * | ||
542 | * A) At any given time, the data sender MUST NOT transmit new data to | ||
543 | * any destination transport address if its peer's rwnd indicates | ||
544 | * that the peer has no buffer space (i.e. rwnd is 0, see Section | ||
545 | * 6.2.1). However, regardless of the value of rwnd (including if it | ||
546 | * is 0), the data sender can always have one DATA chunk in flight to | ||
547 | * the receiver if allowed by cwnd (see rule B below). This rule | ||
548 | * allows the sender to probe for a change in rwnd that the sender | ||
549 | * missed due to the SACK having been lost in transit from the data | ||
550 | * receiver to the data sender. | ||
551 | */ | ||
552 | |||
553 | rwnd = asoc->peer.rwnd; | ||
554 | inflight = asoc->outqueue.outstanding_bytes; | ||
555 | |||
556 | datasize = sctp_data_size(chunk); | ||
557 | |||
558 | if (datasize > rwnd) { | ||
559 | if (inflight > 0) { | ||
560 | /* We have (at least) one data chunk in flight, | ||
561 | * so we can't fall back to rule 6.1 B). | ||
562 | */ | ||
563 | retval = SCTP_XMIT_RWND_FULL; | ||
564 | goto finish; | ||
565 | } | ||
566 | } | ||
567 | |||
568 | /* sctpimpguide-05 2.14.2 | ||
569 | * D) When the time comes for the sender to | ||
570 | * transmit new DATA chunks, the protocol parameter Max.Burst MUST | ||
571 | * first be applied to limit how many new DATA chunks may be sent. | ||
572 | * The limit is applied by adjusting cwnd as follows: | ||
573 | * if ((flightsize + Max.Burst * MTU) < cwnd) | ||
574 | * cwnd = flightsize + Max.Burst * MTU | ||
575 | */ | ||
576 | max_burst_bytes = asoc->max_burst * asoc->pmtu; | ||
577 | if ((transport->flight_size + max_burst_bytes) < transport->cwnd) { | ||
578 | transport->cwnd = transport->flight_size + max_burst_bytes; | ||
579 | SCTP_DEBUG_PRINTK("%s: cwnd limited by max_burst: " | ||
580 | "transport: %p, cwnd: %d, " | ||
581 | "ssthresh: %d, flight_size: %d, " | ||
582 | "pba: %d\n", | ||
583 | __FUNCTION__, transport, | ||
584 | transport->cwnd, | ||
585 | transport->ssthresh, | ||
586 | transport->flight_size, | ||
587 | transport->partial_bytes_acked); | ||
588 | } | ||
589 | |||
590 | /* RFC 2960 6.1 Transmission of DATA Chunks | ||
591 | * | ||
592 | * B) At any given time, the sender MUST NOT transmit new data | ||
593 | * to a given transport address if it has cwnd or more bytes | ||
594 | * of data outstanding to that transport address. | ||
595 | */ | ||
596 | /* RFC 7.2.4 & the Implementers Guide 2.8. | ||
597 | * | ||
598 | * 3) ... | ||
599 | * When a Fast Retransmit is being performed the sender SHOULD | ||
600 | * ignore the value of cwnd and SHOULD NOT delay retransmission. | ||
601 | */ | ||
602 | if (!chunk->fast_retransmit) | ||
603 | if (transport->flight_size >= transport->cwnd) { | ||
604 | retval = SCTP_XMIT_RWND_FULL; | ||
605 | goto finish; | ||
606 | } | ||
607 | |||
608 | /* Nagle's algorithm to solve small-packet problem: | ||
609 | * Inhibit the sending of new chunks when new outgoing data arrives | ||
610 | * if any previously transmitted data on the connection remains | ||
611 | * unacknowledged. | ||
612 | */ | ||
613 | if (!sp->nodelay && sctp_packet_empty(packet) && | ||
614 | q->outstanding_bytes && sctp_state(asoc, ESTABLISHED)) { | ||
615 | unsigned len = datasize + q->out_qlen; | ||
616 | |||
617 | /* Check whether this chunk and all the rest of pending | ||
618 | * data will fit or delay in hopes of bundling a full | ||
619 | * sized packet. | ||
620 | */ | ||
621 | if (len < asoc->pmtu - packet->overhead) { | ||
622 | retval = SCTP_XMIT_NAGLE_DELAY; | ||
623 | goto finish; | ||
624 | } | ||
625 | } | ||
626 | |||
627 | /* Keep track of how many bytes are in flight over this transport. */ | ||
628 | transport->flight_size += datasize; | ||
629 | |||
630 | /* Keep track of how many bytes are in flight to the receiver. */ | ||
631 | asoc->outqueue.outstanding_bytes += datasize; | ||
632 | |||
633 | /* Update our view of the receiver's rwnd. */ | ||
634 | if (datasize < rwnd) | ||
635 | rwnd -= datasize; | ||
636 | else | ||
637 | rwnd = 0; | ||
638 | |||
639 | asoc->peer.rwnd = rwnd; | ||
640 | /* Has been accepted for transmission. */ | ||
641 | if (!asoc->peer.prsctp_capable) | ||
642 | chunk->msg->can_abandon = 0; | ||
643 | |||
644 | finish: | ||
645 | return retval; | ||
646 | } | ||
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c new file mode 100644 index 000000000000..1b2d4adc4ddb --- /dev/null +++ b/net/sctp/outqueue.c | |||
@@ -0,0 +1,1734 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001-2003 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * These functions implement the sctp_outq class. The outqueue handles | ||
10 | * bundling and queueing of outgoing SCTP chunks. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Perry Melange <pmelange@null.cc.uic.edu> | ||
40 | * Xingang Guo <xingang.guo@intel.com> | ||
41 | * Hui Huang <hui.huang@nokia.com> | ||
42 | * Sridhar Samudrala <sri@us.ibm.com> | ||
43 | * Jon Grimm <jgrimm@us.ibm.com> | ||
44 | * | ||
45 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
46 | * be incorporated into the next SCTP release. | ||
47 | */ | ||
48 | |||
49 | #include <linux/types.h> | ||
50 | #include <linux/list.h> /* For struct list_head */ | ||
51 | #include <linux/socket.h> | ||
52 | #include <linux/ip.h> | ||
53 | #include <net/sock.h> /* For skb_set_owner_w */ | ||
54 | |||
55 | #include <net/sctp/sctp.h> | ||
56 | #include <net/sctp/sm.h> | ||
57 | |||
58 | /* Declare internal functions here. */ | ||
59 | static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn); | ||
60 | static void sctp_check_transmitted(struct sctp_outq *q, | ||
61 | struct list_head *transmitted_queue, | ||
62 | struct sctp_transport *transport, | ||
63 | struct sctp_sackhdr *sack, | ||
64 | __u32 highest_new_tsn); | ||
65 | |||
66 | static void sctp_mark_missing(struct sctp_outq *q, | ||
67 | struct list_head *transmitted_queue, | ||
68 | struct sctp_transport *transport, | ||
69 | __u32 highest_new_tsn, | ||
70 | int count_of_newacks); | ||
71 | |||
72 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn); | ||
73 | |||
74 | /* Add data to the front of the queue. */ | ||
75 | static inline void sctp_outq_head_data(struct sctp_outq *q, | ||
76 | struct sctp_chunk *ch) | ||
77 | { | ||
78 | __skb_queue_head(&q->out, (struct sk_buff *)ch); | ||
79 | q->out_qlen += ch->skb->len; | ||
80 | return; | ||
81 | } | ||
82 | |||
83 | /* Take data from the front of the queue. */ | ||
84 | static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) | ||
85 | { | ||
86 | struct sctp_chunk *ch; | ||
87 | ch = (struct sctp_chunk *)__skb_dequeue(&q->out); | ||
88 | if (ch) | ||
89 | q->out_qlen -= ch->skb->len; | ||
90 | return ch; | ||
91 | } | ||
92 | /* Add data chunk to the end of the queue. */ | ||
93 | static inline void sctp_outq_tail_data(struct sctp_outq *q, | ||
94 | struct sctp_chunk *ch) | ||
95 | { | ||
96 | __skb_queue_tail(&q->out, (struct sk_buff *)ch); | ||
97 | q->out_qlen += ch->skb->len; | ||
98 | return; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * SFR-CACC algorithm: | ||
103 | * D) If count_of_newacks is greater than or equal to 2 | ||
104 | * and t was not sent to the current primary then the | ||
105 | * sender MUST NOT increment missing report count for t. | ||
106 | */ | ||
107 | static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary, | ||
108 | struct sctp_transport *transport, | ||
109 | int count_of_newacks) | ||
110 | { | ||
111 | if (count_of_newacks >=2 && transport != primary) | ||
112 | return 1; | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | /* | ||
117 | * SFR-CACC algorithm: | ||
118 | * F) If count_of_newacks is less than 2, let d be the | ||
119 | * destination to which t was sent. If cacc_saw_newack | ||
120 | * is 0 for destination d, then the sender MUST NOT | ||
121 | * increment missing report count for t. | ||
122 | */ | ||
123 | static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport, | ||
124 | int count_of_newacks) | ||
125 | { | ||
126 | if (count_of_newacks < 2 && !transport->cacc.cacc_saw_newack) | ||
127 | return 1; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | /* | ||
132 | * SFR-CACC algorithm: | ||
133 | * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD | ||
134 | * execute steps C, D, F. | ||
135 | * | ||
136 | * C has been implemented in sctp_outq_sack | ||
137 | */ | ||
138 | static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary, | ||
139 | struct sctp_transport *transport, | ||
140 | int count_of_newacks) | ||
141 | { | ||
142 | if (!primary->cacc.cycling_changeover) { | ||
143 | if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks)) | ||
144 | return 1; | ||
145 | if (sctp_cacc_skip_3_1_f(transport, count_of_newacks)) | ||
146 | return 1; | ||
147 | return 0; | ||
148 | } | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * SFR-CACC algorithm: | ||
154 | * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less | ||
155 | * than next_tsn_at_change of the current primary, then | ||
156 | * the sender MUST NOT increment missing report count | ||
157 | * for t. | ||
158 | */ | ||
159 | static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn) | ||
160 | { | ||
161 | if (primary->cacc.cycling_changeover && | ||
162 | TSN_lt(tsn, primary->cacc.next_tsn_at_change)) | ||
163 | return 1; | ||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * SFR-CACC algorithm: | ||
169 | * 3) If the missing report count for TSN t is to be | ||
170 | * incremented according to [RFC2960] and | ||
171 | * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set, | ||
172 | * then the sender MUST futher execute steps 3.1 and | ||
173 | * 3.2 to determine if the missing report count for | ||
174 | * TSN t SHOULD NOT be incremented. | ||
175 | * | ||
176 | * 3.3) If 3.1 and 3.2 do not dictate that the missing | ||
177 | * report count for t should not be incremented, then | ||
178 | * the sender SOULD increment missing report count for | ||
179 | * t (according to [RFC2960] and [SCTP_STEWART_2002]). | ||
180 | */ | ||
181 | static inline int sctp_cacc_skip(struct sctp_transport *primary, | ||
182 | struct sctp_transport *transport, | ||
183 | int count_of_newacks, | ||
184 | __u32 tsn) | ||
185 | { | ||
186 | if (primary->cacc.changeover_active && | ||
187 | (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) | ||
188 | || sctp_cacc_skip_3_2(primary, tsn))) | ||
189 | return 1; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /* Initialize an existing sctp_outq. This does the boring stuff. | ||
194 | * You still need to define handlers if you really want to DO | ||
195 | * something with this structure... | ||
196 | */ | ||
197 | void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) | ||
198 | { | ||
199 | q->asoc = asoc; | ||
200 | skb_queue_head_init(&q->out); | ||
201 | skb_queue_head_init(&q->control); | ||
202 | INIT_LIST_HEAD(&q->retransmit); | ||
203 | INIT_LIST_HEAD(&q->sacked); | ||
204 | INIT_LIST_HEAD(&q->abandoned); | ||
205 | |||
206 | q->outstanding_bytes = 0; | ||
207 | q->empty = 1; | ||
208 | q->cork = 0; | ||
209 | |||
210 | q->malloced = 0; | ||
211 | q->out_qlen = 0; | ||
212 | } | ||
213 | |||
214 | /* Free the outqueue structure and any related pending chunks. | ||
215 | */ | ||
216 | void sctp_outq_teardown(struct sctp_outq *q) | ||
217 | { | ||
218 | struct sctp_transport *transport; | ||
219 | struct list_head *lchunk, *pos, *temp; | ||
220 | struct sctp_chunk *chunk; | ||
221 | |||
222 | /* Throw away unacknowledged chunks. */ | ||
223 | list_for_each(pos, &q->asoc->peer.transport_addr_list) { | ||
224 | transport = list_entry(pos, struct sctp_transport, transports); | ||
225 | while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) { | ||
226 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
227 | transmitted_list); | ||
228 | /* Mark as part of a failed message. */ | ||
229 | sctp_chunk_fail(chunk, q->error); | ||
230 | sctp_chunk_free(chunk); | ||
231 | } | ||
232 | } | ||
233 | |||
234 | /* Throw away chunks that have been gap ACKed. */ | ||
235 | list_for_each_safe(lchunk, temp, &q->sacked) { | ||
236 | list_del_init(lchunk); | ||
237 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
238 | transmitted_list); | ||
239 | sctp_chunk_fail(chunk, q->error); | ||
240 | sctp_chunk_free(chunk); | ||
241 | } | ||
242 | |||
243 | /* Throw away any chunks in the retransmit queue. */ | ||
244 | list_for_each_safe(lchunk, temp, &q->retransmit) { | ||
245 | list_del_init(lchunk); | ||
246 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
247 | transmitted_list); | ||
248 | sctp_chunk_fail(chunk, q->error); | ||
249 | sctp_chunk_free(chunk); | ||
250 | } | ||
251 | |||
252 | /* Throw away any chunks that are in the abandoned queue. */ | ||
253 | list_for_each_safe(lchunk, temp, &q->abandoned) { | ||
254 | list_del_init(lchunk); | ||
255 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
256 | transmitted_list); | ||
257 | sctp_chunk_fail(chunk, q->error); | ||
258 | sctp_chunk_free(chunk); | ||
259 | } | ||
260 | |||
261 | /* Throw away any leftover data chunks. */ | ||
262 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | ||
263 | |||
264 | /* Mark as send failure. */ | ||
265 | sctp_chunk_fail(chunk, q->error); | ||
266 | sctp_chunk_free(chunk); | ||
267 | } | ||
268 | |||
269 | q->error = 0; | ||
270 | |||
271 | /* Throw away any leftover control chunks. */ | ||
272 | while ((chunk = (struct sctp_chunk *) skb_dequeue(&q->control)) != NULL) | ||
273 | sctp_chunk_free(chunk); | ||
274 | } | ||
275 | |||
276 | /* Free the outqueue structure and any related pending chunks. */ | ||
277 | void sctp_outq_free(struct sctp_outq *q) | ||
278 | { | ||
279 | /* Throw away leftover chunks. */ | ||
280 | sctp_outq_teardown(q); | ||
281 | |||
282 | /* If we were kmalloc()'d, free the memory. */ | ||
283 | if (q->malloced) | ||
284 | kfree(q); | ||
285 | } | ||
286 | |||
287 | /* Put a new chunk in an sctp_outq. */ | ||
288 | int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk) | ||
289 | { | ||
290 | int error = 0; | ||
291 | |||
292 | SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n", | ||
293 | q, chunk, chunk && chunk->chunk_hdr ? | ||
294 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) | ||
295 | : "Illegal Chunk"); | ||
296 | |||
297 | /* If it is data, queue it up, otherwise, send it | ||
298 | * immediately. | ||
299 | */ | ||
300 | if (SCTP_CID_DATA == chunk->chunk_hdr->type) { | ||
301 | /* Is it OK to queue data chunks? */ | ||
302 | /* From 9. Termination of Association | ||
303 | * | ||
304 | * When either endpoint performs a shutdown, the | ||
305 | * association on each peer will stop accepting new | ||
306 | * data from its user and only deliver data in queue | ||
307 | * at the time of sending or receiving the SHUTDOWN | ||
308 | * chunk. | ||
309 | */ | ||
310 | switch (q->asoc->state) { | ||
311 | case SCTP_STATE_EMPTY: | ||
312 | case SCTP_STATE_CLOSED: | ||
313 | case SCTP_STATE_SHUTDOWN_PENDING: | ||
314 | case SCTP_STATE_SHUTDOWN_SENT: | ||
315 | case SCTP_STATE_SHUTDOWN_RECEIVED: | ||
316 | case SCTP_STATE_SHUTDOWN_ACK_SENT: | ||
317 | /* Cannot send after transport endpoint shutdown */ | ||
318 | error = -ESHUTDOWN; | ||
319 | break; | ||
320 | |||
321 | default: | ||
322 | SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n", | ||
323 | q, chunk, chunk && chunk->chunk_hdr ? | ||
324 | sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)) | ||
325 | : "Illegal Chunk"); | ||
326 | |||
327 | sctp_outq_tail_data(q, chunk); | ||
328 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | ||
329 | SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS); | ||
330 | else | ||
331 | SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS); | ||
332 | q->empty = 0; | ||
333 | break; | ||
334 | }; | ||
335 | } else { | ||
336 | __skb_queue_tail(&q->control, (struct sk_buff *) chunk); | ||
337 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
338 | } | ||
339 | |||
340 | if (error < 0) | ||
341 | return error; | ||
342 | |||
343 | if (!q->cork) | ||
344 | error = sctp_outq_flush(q, 0); | ||
345 | |||
346 | return error; | ||
347 | } | ||
348 | |||
349 | /* Insert a chunk into the sorted list based on the TSNs. The retransmit list | ||
350 | * and the abandoned list are in ascending order. | ||
351 | */ | ||
352 | static void sctp_insert_list(struct list_head *head, struct list_head *new) | ||
353 | { | ||
354 | struct list_head *pos; | ||
355 | struct sctp_chunk *nchunk, *lchunk; | ||
356 | __u32 ntsn, ltsn; | ||
357 | int done = 0; | ||
358 | |||
359 | nchunk = list_entry(new, struct sctp_chunk, transmitted_list); | ||
360 | ntsn = ntohl(nchunk->subh.data_hdr->tsn); | ||
361 | |||
362 | list_for_each(pos, head) { | ||
363 | lchunk = list_entry(pos, struct sctp_chunk, transmitted_list); | ||
364 | ltsn = ntohl(lchunk->subh.data_hdr->tsn); | ||
365 | if (TSN_lt(ntsn, ltsn)) { | ||
366 | list_add(new, pos->prev); | ||
367 | done = 1; | ||
368 | break; | ||
369 | } | ||
370 | } | ||
371 | if (!done) | ||
372 | list_add_tail(new, head); | ||
373 | } | ||
374 | |||
375 | /* Mark all the eligible packets on a transport for retransmission. */ | ||
376 | void sctp_retransmit_mark(struct sctp_outq *q, | ||
377 | struct sctp_transport *transport, | ||
378 | __u8 fast_retransmit) | ||
379 | { | ||
380 | struct list_head *lchunk, *ltemp; | ||
381 | struct sctp_chunk *chunk; | ||
382 | |||
383 | /* Walk through the specified transmitted queue. */ | ||
384 | list_for_each_safe(lchunk, ltemp, &transport->transmitted) { | ||
385 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
386 | transmitted_list); | ||
387 | |||
388 | /* If the chunk is abandoned, move it to abandoned list. */ | ||
389 | if (sctp_chunk_abandoned(chunk)) { | ||
390 | list_del_init(lchunk); | ||
391 | sctp_insert_list(&q->abandoned, lchunk); | ||
392 | continue; | ||
393 | } | ||
394 | |||
395 | /* If we are doing retransmission due to a fast retransmit, | ||
396 | * only the chunk's that are marked for fast retransmit | ||
397 | * should be added to the retransmit queue. If we are doing | ||
398 | * retransmission due to a timeout or pmtu discovery, only the | ||
399 | * chunks that are not yet acked should be added to the | ||
400 | * retransmit queue. | ||
401 | */ | ||
402 | if ((fast_retransmit && chunk->fast_retransmit) || | ||
403 | (!fast_retransmit && !chunk->tsn_gap_acked)) { | ||
404 | /* RFC 2960 6.2.1 Processing a Received SACK | ||
405 | * | ||
406 | * C) Any time a DATA chunk is marked for | ||
407 | * retransmission (via either T3-rtx timer expiration | ||
408 | * (Section 6.3.3) or via fast retransmit | ||
409 | * (Section 7.2.4)), add the data size of those | ||
410 | * chunks to the rwnd. | ||
411 | */ | ||
412 | q->asoc->peer.rwnd += sctp_data_size(chunk); | ||
413 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
414 | transport->flight_size -= sctp_data_size(chunk); | ||
415 | |||
416 | /* sctpimpguide-05 Section 2.8.2 | ||
417 | * M5) If a T3-rtx timer expires, the | ||
418 | * 'TSN.Missing.Report' of all affected TSNs is set | ||
419 | * to 0. | ||
420 | */ | ||
421 | chunk->tsn_missing_report = 0; | ||
422 | |||
423 | /* If a chunk that is being used for RTT measurement | ||
424 | * has to be retransmitted, we cannot use this chunk | ||
425 | * anymore for RTT measurements. Reset rto_pending so | ||
426 | * that a new RTT measurement is started when a new | ||
427 | * data chunk is sent. | ||
428 | */ | ||
429 | if (chunk->rtt_in_progress) { | ||
430 | chunk->rtt_in_progress = 0; | ||
431 | transport->rto_pending = 0; | ||
432 | } | ||
433 | |||
434 | /* Move the chunk to the retransmit queue. The chunks | ||
435 | * on the retransmit queue are always kept in order. | ||
436 | */ | ||
437 | list_del_init(lchunk); | ||
438 | sctp_insert_list(&q->retransmit, lchunk); | ||
439 | } | ||
440 | } | ||
441 | |||
442 | SCTP_DEBUG_PRINTK("%s: transport: %p, fast_retransmit: %d, " | ||
443 | "cwnd: %d, ssthresh: %d, flight_size: %d, " | ||
444 | "pba: %d\n", __FUNCTION__, | ||
445 | transport, fast_retransmit, | ||
446 | transport->cwnd, transport->ssthresh, | ||
447 | transport->flight_size, | ||
448 | transport->partial_bytes_acked); | ||
449 | |||
450 | } | ||
451 | |||
452 | /* Mark all the eligible packets on a transport for retransmission and force | ||
453 | * one packet out. | ||
454 | */ | ||
455 | void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport, | ||
456 | sctp_retransmit_reason_t reason) | ||
457 | { | ||
458 | int error = 0; | ||
459 | __u8 fast_retransmit = 0; | ||
460 | |||
461 | switch(reason) { | ||
462 | case SCTP_RTXR_T3_RTX: | ||
463 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX); | ||
464 | /* Update the retran path if the T3-rtx timer has expired for | ||
465 | * the current retran path. | ||
466 | */ | ||
467 | if (transport == transport->asoc->peer.retran_path) | ||
468 | sctp_assoc_update_retran_path(transport->asoc); | ||
469 | break; | ||
470 | case SCTP_RTXR_FAST_RTX: | ||
471 | sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX); | ||
472 | fast_retransmit = 1; | ||
473 | break; | ||
474 | case SCTP_RTXR_PMTUD: | ||
475 | default: | ||
476 | break; | ||
477 | } | ||
478 | |||
479 | sctp_retransmit_mark(q, transport, fast_retransmit); | ||
480 | |||
481 | /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination, | ||
482 | * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by | ||
483 | * following the procedures outlined in C1 - C5. | ||
484 | */ | ||
485 | sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point); | ||
486 | |||
487 | error = sctp_outq_flush(q, /* rtx_timeout */ 1); | ||
488 | |||
489 | if (error) | ||
490 | q->asoc->base.sk->sk_err = -error; | ||
491 | } | ||
492 | |||
493 | /* | ||
494 | * Transmit DATA chunks on the retransmit queue. Upon return from | ||
495 | * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which | ||
496 | * need to be transmitted by the caller. | ||
497 | * We assume that pkt->transport has already been set. | ||
498 | * | ||
499 | * The return value is a normal kernel error return value. | ||
500 | */ | ||
501 | static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt, | ||
502 | int rtx_timeout, int *start_timer) | ||
503 | { | ||
504 | struct list_head *lqueue; | ||
505 | struct list_head *lchunk, *lchunk1; | ||
506 | struct sctp_transport *transport = pkt->transport; | ||
507 | sctp_xmit_t status; | ||
508 | struct sctp_chunk *chunk, *chunk1; | ||
509 | struct sctp_association *asoc; | ||
510 | int error = 0; | ||
511 | |||
512 | asoc = q->asoc; | ||
513 | lqueue = &q->retransmit; | ||
514 | |||
515 | /* RFC 2960 6.3.3 Handle T3-rtx Expiration | ||
516 | * | ||
517 | * E3) Determine how many of the earliest (i.e., lowest TSN) | ||
518 | * outstanding DATA chunks for the address for which the | ||
519 | * T3-rtx has expired will fit into a single packet, subject | ||
520 | * to the MTU constraint for the path corresponding to the | ||
521 | * destination transport address to which the retransmission | ||
522 | * is being sent (this may be different from the address for | ||
523 | * which the timer expires [see Section 6.4]). Call this value | ||
524 | * K. Bundle and retransmit those K DATA chunks in a single | ||
525 | * packet to the destination endpoint. | ||
526 | * | ||
527 | * [Just to be painfully clear, if we are retransmitting | ||
528 | * because a timeout just happened, we should send only ONE | ||
529 | * packet of retransmitted data.] | ||
530 | */ | ||
531 | lchunk = sctp_list_dequeue(lqueue); | ||
532 | |||
533 | while (lchunk) { | ||
534 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
535 | transmitted_list); | ||
536 | |||
537 | /* Make sure that Gap Acked TSNs are not retransmitted. A | ||
538 | * simple approach is just to move such TSNs out of the | ||
539 | * way and into a 'transmitted' queue and skip to the | ||
540 | * next chunk. | ||
541 | */ | ||
542 | if (chunk->tsn_gap_acked) { | ||
543 | list_add_tail(lchunk, &transport->transmitted); | ||
544 | lchunk = sctp_list_dequeue(lqueue); | ||
545 | continue; | ||
546 | } | ||
547 | |||
548 | /* Attempt to append this chunk to the packet. */ | ||
549 | status = sctp_packet_append_chunk(pkt, chunk); | ||
550 | |||
551 | switch (status) { | ||
552 | case SCTP_XMIT_PMTU_FULL: | ||
553 | /* Send this packet. */ | ||
554 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
555 | *start_timer = 1; | ||
556 | |||
557 | /* If we are retransmitting, we should only | ||
558 | * send a single packet. | ||
559 | */ | ||
560 | if (rtx_timeout) { | ||
561 | list_add(lchunk, lqueue); | ||
562 | lchunk = NULL; | ||
563 | } | ||
564 | |||
565 | /* Bundle lchunk in the next round. */ | ||
566 | break; | ||
567 | |||
568 | case SCTP_XMIT_RWND_FULL: | ||
569 | /* Send this packet. */ | ||
570 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
571 | *start_timer = 1; | ||
572 | |||
573 | /* Stop sending DATA as there is no more room | ||
574 | * at the receiver. | ||
575 | */ | ||
576 | list_add(lchunk, lqueue); | ||
577 | lchunk = NULL; | ||
578 | break; | ||
579 | |||
580 | case SCTP_XMIT_NAGLE_DELAY: | ||
581 | /* Send this packet. */ | ||
582 | if ((error = sctp_packet_transmit(pkt)) == 0) | ||
583 | *start_timer = 1; | ||
584 | |||
585 | /* Stop sending DATA because of nagle delay. */ | ||
586 | list_add(lchunk, lqueue); | ||
587 | lchunk = NULL; | ||
588 | break; | ||
589 | |||
590 | default: | ||
591 | /* The append was successful, so add this chunk to | ||
592 | * the transmitted list. | ||
593 | */ | ||
594 | list_add_tail(lchunk, &transport->transmitted); | ||
595 | |||
596 | /* Mark the chunk as ineligible for fast retransmit | ||
597 | * after it is retransmitted. | ||
598 | */ | ||
599 | chunk->fast_retransmit = 0; | ||
600 | |||
601 | *start_timer = 1; | ||
602 | q->empty = 0; | ||
603 | |||
604 | /* Retrieve a new chunk to bundle. */ | ||
605 | lchunk = sctp_list_dequeue(lqueue); | ||
606 | break; | ||
607 | }; | ||
608 | |||
609 | /* If we are here due to a retransmit timeout or a fast | ||
610 | * retransmit and if there are any chunks left in the retransmit | ||
611 | * queue that could not fit in the PMTU sized packet, they need * to be marked as ineligible for a subsequent fast retransmit. | ||
612 | */ | ||
613 | if (rtx_timeout && !lchunk) { | ||
614 | list_for_each(lchunk1, lqueue) { | ||
615 | chunk1 = list_entry(lchunk1, struct sctp_chunk, | ||
616 | transmitted_list); | ||
617 | chunk1->fast_retransmit = 0; | ||
618 | } | ||
619 | } | ||
620 | } | ||
621 | |||
622 | return error; | ||
623 | } | ||
624 | |||
625 | /* Cork the outqueue so queued chunks are really queued. */ | ||
626 | int sctp_outq_uncork(struct sctp_outq *q) | ||
627 | { | ||
628 | int error = 0; | ||
629 | if (q->cork) { | ||
630 | q->cork = 0; | ||
631 | error = sctp_outq_flush(q, 0); | ||
632 | } | ||
633 | return error; | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Try to flush an outqueue. | ||
638 | * | ||
639 | * Description: Send everything in q which we legally can, subject to | ||
640 | * congestion limitations. | ||
641 | * * Note: This function can be called from multiple contexts so appropriate | ||
642 | * locking concerns must be made. Today we use the sock lock to protect | ||
643 | * this function. | ||
644 | */ | ||
645 | int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout) | ||
646 | { | ||
647 | struct sctp_packet *packet; | ||
648 | struct sctp_packet singleton; | ||
649 | struct sctp_association *asoc = q->asoc; | ||
650 | __u16 sport = asoc->base.bind_addr.port; | ||
651 | __u16 dport = asoc->peer.port; | ||
652 | __u32 vtag = asoc->peer.i.init_tag; | ||
653 | struct sk_buff_head *queue; | ||
654 | struct sctp_transport *transport = NULL; | ||
655 | struct sctp_transport *new_transport; | ||
656 | struct sctp_chunk *chunk; | ||
657 | sctp_xmit_t status; | ||
658 | int error = 0; | ||
659 | int start_timer = 0; | ||
660 | |||
661 | /* These transports have chunks to send. */ | ||
662 | struct list_head transport_list; | ||
663 | struct list_head *ltransport; | ||
664 | |||
665 | INIT_LIST_HEAD(&transport_list); | ||
666 | packet = NULL; | ||
667 | |||
668 | /* | ||
669 | * 6.10 Bundling | ||
670 | * ... | ||
671 | * When bundling control chunks with DATA chunks, an | ||
672 | * endpoint MUST place control chunks first in the outbound | ||
673 | * SCTP packet. The transmitter MUST transmit DATA chunks | ||
674 | * within a SCTP packet in increasing order of TSN. | ||
675 | * ... | ||
676 | */ | ||
677 | |||
678 | queue = &q->control; | ||
679 | while ((chunk = (struct sctp_chunk *)skb_dequeue(queue)) != NULL) { | ||
680 | /* Pick the right transport to use. */ | ||
681 | new_transport = chunk->transport; | ||
682 | |||
683 | if (!new_transport) { | ||
684 | new_transport = asoc->peer.active_path; | ||
685 | } else if (!new_transport->active) { | ||
686 | /* If the chunk is Heartbeat or Heartbeat Ack, | ||
687 | * send it to chunk->transport, even if it's | ||
688 | * inactive. | ||
689 | * | ||
690 | * 3.3.6 Heartbeat Acknowledgement: | ||
691 | * ... | ||
692 | * A HEARTBEAT ACK is always sent to the source IP | ||
693 | * address of the IP datagram containing the | ||
694 | * HEARTBEAT chunk to which this ack is responding. | ||
695 | * ... | ||
696 | */ | ||
697 | if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT && | ||
698 | chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK) | ||
699 | new_transport = asoc->peer.active_path; | ||
700 | } | ||
701 | |||
702 | /* Are we switching transports? | ||
703 | * Take care of transport locks. | ||
704 | */ | ||
705 | if (new_transport != transport) { | ||
706 | transport = new_transport; | ||
707 | if (list_empty(&transport->send_ready)) { | ||
708 | list_add_tail(&transport->send_ready, | ||
709 | &transport_list); | ||
710 | } | ||
711 | packet = &transport->packet; | ||
712 | sctp_packet_config(packet, vtag, | ||
713 | asoc->peer.ecn_capable); | ||
714 | } | ||
715 | |||
716 | switch (chunk->chunk_hdr->type) { | ||
717 | /* | ||
718 | * 6.10 Bundling | ||
719 | * ... | ||
720 | * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN | ||
721 | * COMPLETE with any other chunks. [Send them immediately.] | ||
722 | */ | ||
723 | case SCTP_CID_INIT: | ||
724 | case SCTP_CID_INIT_ACK: | ||
725 | case SCTP_CID_SHUTDOWN_COMPLETE: | ||
726 | sctp_packet_init(&singleton, transport, sport, dport); | ||
727 | sctp_packet_config(&singleton, vtag, 0); | ||
728 | sctp_packet_append_chunk(&singleton, chunk); | ||
729 | error = sctp_packet_transmit(&singleton); | ||
730 | if (error < 0) | ||
731 | return error; | ||
732 | break; | ||
733 | |||
734 | case SCTP_CID_ABORT: | ||
735 | case SCTP_CID_SACK: | ||
736 | case SCTP_CID_HEARTBEAT: | ||
737 | case SCTP_CID_HEARTBEAT_ACK: | ||
738 | case SCTP_CID_SHUTDOWN: | ||
739 | case SCTP_CID_SHUTDOWN_ACK: | ||
740 | case SCTP_CID_ERROR: | ||
741 | case SCTP_CID_COOKIE_ECHO: | ||
742 | case SCTP_CID_COOKIE_ACK: | ||
743 | case SCTP_CID_ECN_ECNE: | ||
744 | case SCTP_CID_ECN_CWR: | ||
745 | case SCTP_CID_ASCONF: | ||
746 | case SCTP_CID_ASCONF_ACK: | ||
747 | case SCTP_CID_FWD_TSN: | ||
748 | sctp_packet_transmit_chunk(packet, chunk); | ||
749 | break; | ||
750 | |||
751 | default: | ||
752 | /* We built a chunk with an illegal type! */ | ||
753 | BUG(); | ||
754 | }; | ||
755 | } | ||
756 | |||
757 | /* Is it OK to send data chunks? */ | ||
758 | switch (asoc->state) { | ||
759 | case SCTP_STATE_COOKIE_ECHOED: | ||
760 | /* Only allow bundling when this packet has a COOKIE-ECHO | ||
761 | * chunk. | ||
762 | */ | ||
763 | if (!packet || !packet->has_cookie_echo) | ||
764 | break; | ||
765 | |||
766 | /* fallthru */ | ||
767 | case SCTP_STATE_ESTABLISHED: | ||
768 | case SCTP_STATE_SHUTDOWN_PENDING: | ||
769 | case SCTP_STATE_SHUTDOWN_RECEIVED: | ||
770 | /* | ||
771 | * RFC 2960 6.1 Transmission of DATA Chunks | ||
772 | * | ||
773 | * C) When the time comes for the sender to transmit, | ||
774 | * before sending new DATA chunks, the sender MUST | ||
775 | * first transmit any outstanding DATA chunks which | ||
776 | * are marked for retransmission (limited by the | ||
777 | * current cwnd). | ||
778 | */ | ||
779 | if (!list_empty(&q->retransmit)) { | ||
780 | if (transport == asoc->peer.retran_path) | ||
781 | goto retran; | ||
782 | |||
783 | /* Switch transports & prepare the packet. */ | ||
784 | |||
785 | transport = asoc->peer.retran_path; | ||
786 | |||
787 | if (list_empty(&transport->send_ready)) { | ||
788 | list_add_tail(&transport->send_ready, | ||
789 | &transport_list); | ||
790 | } | ||
791 | |||
792 | packet = &transport->packet; | ||
793 | sctp_packet_config(packet, vtag, | ||
794 | asoc->peer.ecn_capable); | ||
795 | retran: | ||
796 | error = sctp_outq_flush_rtx(q, packet, | ||
797 | rtx_timeout, &start_timer); | ||
798 | |||
799 | if (start_timer) | ||
800 | sctp_transport_reset_timers(transport); | ||
801 | |||
802 | /* This can happen on COOKIE-ECHO resend. Only | ||
803 | * one chunk can get bundled with a COOKIE-ECHO. | ||
804 | */ | ||
805 | if (packet->has_cookie_echo) | ||
806 | goto sctp_flush_out; | ||
807 | |||
808 | /* Don't send new data if there is still data | ||
809 | * waiting to retransmit. | ||
810 | */ | ||
811 | if (!list_empty(&q->retransmit)) | ||
812 | goto sctp_flush_out; | ||
813 | } | ||
814 | |||
815 | /* Finally, transmit new packets. */ | ||
816 | start_timer = 0; | ||
817 | queue = &q->out; | ||
818 | |||
819 | while ((chunk = sctp_outq_dequeue_data(q)) != NULL) { | ||
820 | /* RFC 2960 6.5 Every DATA chunk MUST carry a valid | ||
821 | * stream identifier. | ||
822 | */ | ||
823 | if (chunk->sinfo.sinfo_stream >= | ||
824 | asoc->c.sinit_num_ostreams) { | ||
825 | |||
826 | /* Mark as failed send. */ | ||
827 | sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM); | ||
828 | sctp_chunk_free(chunk); | ||
829 | continue; | ||
830 | } | ||
831 | |||
832 | /* Has this chunk expired? */ | ||
833 | if (sctp_chunk_abandoned(chunk)) { | ||
834 | sctp_chunk_fail(chunk, 0); | ||
835 | sctp_chunk_free(chunk); | ||
836 | continue; | ||
837 | } | ||
838 | |||
839 | /* If there is a specified transport, use it. | ||
840 | * Otherwise, we want to use the active path. | ||
841 | */ | ||
842 | new_transport = chunk->transport; | ||
843 | if (!new_transport || !new_transport->active) | ||
844 | new_transport = asoc->peer.active_path; | ||
845 | |||
846 | /* Change packets if necessary. */ | ||
847 | if (new_transport != transport) { | ||
848 | transport = new_transport; | ||
849 | |||
850 | /* Schedule to have this transport's | ||
851 | * packet flushed. | ||
852 | */ | ||
853 | if (list_empty(&transport->send_ready)) { | ||
854 | list_add_tail(&transport->send_ready, | ||
855 | &transport_list); | ||
856 | } | ||
857 | |||
858 | packet = &transport->packet; | ||
859 | sctp_packet_config(packet, vtag, | ||
860 | asoc->peer.ecn_capable); | ||
861 | } | ||
862 | |||
863 | SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ", | ||
864 | q, chunk, | ||
865 | chunk && chunk->chunk_hdr ? | ||
866 | sctp_cname(SCTP_ST_CHUNK( | ||
867 | chunk->chunk_hdr->type)) | ||
868 | : "Illegal Chunk"); | ||
869 | |||
870 | SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head " | ||
871 | "%p skb->users %d.\n", | ||
872 | ntohl(chunk->subh.data_hdr->tsn), | ||
873 | chunk->skb ?chunk->skb->head : NULL, | ||
874 | chunk->skb ? | ||
875 | atomic_read(&chunk->skb->users) : -1); | ||
876 | |||
877 | /* Add the chunk to the packet. */ | ||
878 | status = sctp_packet_transmit_chunk(packet, chunk); | ||
879 | |||
880 | switch (status) { | ||
881 | case SCTP_XMIT_PMTU_FULL: | ||
882 | case SCTP_XMIT_RWND_FULL: | ||
883 | case SCTP_XMIT_NAGLE_DELAY: | ||
884 | /* We could not append this chunk, so put | ||
885 | * the chunk back on the output queue. | ||
886 | */ | ||
887 | SCTP_DEBUG_PRINTK("sctp_outq_flush: could " | ||
888 | "not transmit TSN: 0x%x, status: %d\n", | ||
889 | ntohl(chunk->subh.data_hdr->tsn), | ||
890 | status); | ||
891 | sctp_outq_head_data(q, chunk); | ||
892 | goto sctp_flush_out; | ||
893 | break; | ||
894 | |||
895 | case SCTP_XMIT_OK: | ||
896 | break; | ||
897 | |||
898 | default: | ||
899 | BUG(); | ||
900 | } | ||
901 | |||
902 | /* BUG: We assume that the sctp_packet_transmit() | ||
903 | * call below will succeed all the time and add the | ||
904 | * chunk to the transmitted list and restart the | ||
905 | * timers. | ||
906 | * It is possible that the call can fail under OOM | ||
907 | * conditions. | ||
908 | * | ||
909 | * Is this really a problem? Won't this behave | ||
910 | * like a lost TSN? | ||
911 | */ | ||
912 | list_add_tail(&chunk->transmitted_list, | ||
913 | &transport->transmitted); | ||
914 | |||
915 | sctp_transport_reset_timers(transport); | ||
916 | |||
917 | q->empty = 0; | ||
918 | |||
919 | /* Only let one DATA chunk get bundled with a | ||
920 | * COOKIE-ECHO chunk. | ||
921 | */ | ||
922 | if (packet->has_cookie_echo) | ||
923 | goto sctp_flush_out; | ||
924 | } | ||
925 | break; | ||
926 | |||
927 | default: | ||
928 | /* Do nothing. */ | ||
929 | break; | ||
930 | } | ||
931 | |||
932 | sctp_flush_out: | ||
933 | |||
934 | /* Before returning, examine all the transports touched in | ||
935 | * this call. Right now, we bluntly force clear all the | ||
936 | * transports. Things might change after we implement Nagle. | ||
937 | * But such an examination is still required. | ||
938 | * | ||
939 | * --xguo | ||
940 | */ | ||
941 | while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) { | ||
942 | struct sctp_transport *t = list_entry(ltransport, | ||
943 | struct sctp_transport, | ||
944 | send_ready); | ||
945 | packet = &t->packet; | ||
946 | if (!sctp_packet_empty(packet)) | ||
947 | error = sctp_packet_transmit(packet); | ||
948 | } | ||
949 | |||
950 | return error; | ||
951 | } | ||
952 | |||
953 | /* Update unack_data based on the incoming SACK chunk */ | ||
954 | static void sctp_sack_update_unack_data(struct sctp_association *assoc, | ||
955 | struct sctp_sackhdr *sack) | ||
956 | { | ||
957 | sctp_sack_variable_t *frags; | ||
958 | __u16 unack_data; | ||
959 | int i; | ||
960 | |||
961 | unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1; | ||
962 | |||
963 | frags = sack->variable; | ||
964 | for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) { | ||
965 | unack_data -= ((ntohs(frags[i].gab.end) - | ||
966 | ntohs(frags[i].gab.start) + 1)); | ||
967 | } | ||
968 | |||
969 | assoc->unack_data = unack_data; | ||
970 | } | ||
971 | |||
972 | /* Return the highest new tsn that is acknowledged by the given SACK chunk. */ | ||
973 | static __u32 sctp_highest_new_tsn(struct sctp_sackhdr *sack, | ||
974 | struct sctp_association *asoc) | ||
975 | { | ||
976 | struct list_head *ltransport, *lchunk; | ||
977 | struct sctp_transport *transport; | ||
978 | struct sctp_chunk *chunk; | ||
979 | __u32 highest_new_tsn, tsn; | ||
980 | struct list_head *transport_list = &asoc->peer.transport_addr_list; | ||
981 | |||
982 | highest_new_tsn = ntohl(sack->cum_tsn_ack); | ||
983 | |||
984 | list_for_each(ltransport, transport_list) { | ||
985 | transport = list_entry(ltransport, struct sctp_transport, | ||
986 | transports); | ||
987 | list_for_each(lchunk, &transport->transmitted) { | ||
988 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
989 | transmitted_list); | ||
990 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
991 | |||
992 | if (!chunk->tsn_gap_acked && | ||
993 | TSN_lt(highest_new_tsn, tsn) && | ||
994 | sctp_acked(sack, tsn)) | ||
995 | highest_new_tsn = tsn; | ||
996 | } | ||
997 | } | ||
998 | |||
999 | return highest_new_tsn; | ||
1000 | } | ||
1001 | |||
1002 | /* This is where we REALLY process a SACK. | ||
1003 | * | ||
1004 | * Process the SACK against the outqueue. Mostly, this just frees | ||
1005 | * things off the transmitted queue. | ||
1006 | */ | ||
1007 | int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack) | ||
1008 | { | ||
1009 | struct sctp_association *asoc = q->asoc; | ||
1010 | struct sctp_transport *transport; | ||
1011 | struct sctp_chunk *tchunk = NULL; | ||
1012 | struct list_head *lchunk, *transport_list, *pos, *temp; | ||
1013 | sctp_sack_variable_t *frags = sack->variable; | ||
1014 | __u32 sack_ctsn, ctsn, tsn; | ||
1015 | __u32 highest_tsn, highest_new_tsn; | ||
1016 | __u32 sack_a_rwnd; | ||
1017 | unsigned outstanding; | ||
1018 | struct sctp_transport *primary = asoc->peer.primary_path; | ||
1019 | int count_of_newacks = 0; | ||
1020 | |||
1021 | /* Grab the association's destination address list. */ | ||
1022 | transport_list = &asoc->peer.transport_addr_list; | ||
1023 | |||
1024 | sack_ctsn = ntohl(sack->cum_tsn_ack); | ||
1025 | |||
1026 | /* | ||
1027 | * SFR-CACC algorithm: | ||
1028 | * On receipt of a SACK the sender SHOULD execute the | ||
1029 | * following statements. | ||
1030 | * | ||
1031 | * 1) If the cumulative ack in the SACK passes next tsn_at_change | ||
1032 | * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be | ||
1033 | * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for | ||
1034 | * all destinations. | ||
1035 | */ | ||
1036 | if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) { | ||
1037 | primary->cacc.changeover_active = 0; | ||
1038 | list_for_each(pos, transport_list) { | ||
1039 | transport = list_entry(pos, struct sctp_transport, | ||
1040 | transports); | ||
1041 | transport->cacc.cycling_changeover = 0; | ||
1042 | } | ||
1043 | } | ||
1044 | |||
1045 | /* | ||
1046 | * SFR-CACC algorithm: | ||
1047 | * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE | ||
1048 | * is set the receiver of the SACK MUST take the following actions: | ||
1049 | * | ||
1050 | * A) Initialize the cacc_saw_newack to 0 for all destination | ||
1051 | * addresses. | ||
1052 | */ | ||
1053 | if (sack->num_gap_ack_blocks > 0 && | ||
1054 | primary->cacc.changeover_active) { | ||
1055 | list_for_each(pos, transport_list) { | ||
1056 | transport = list_entry(pos, struct sctp_transport, | ||
1057 | transports); | ||
1058 | transport->cacc.cacc_saw_newack = 0; | ||
1059 | } | ||
1060 | } | ||
1061 | |||
1062 | /* Get the highest TSN in the sack. */ | ||
1063 | highest_tsn = sack_ctsn; | ||
1064 | if (sack->num_gap_ack_blocks) | ||
1065 | highest_tsn += | ||
1066 | ntohs(frags[ntohs(sack->num_gap_ack_blocks) - 1].gab.end); | ||
1067 | |||
1068 | if (TSN_lt(asoc->highest_sacked, highest_tsn)) { | ||
1069 | highest_new_tsn = highest_tsn; | ||
1070 | asoc->highest_sacked = highest_tsn; | ||
1071 | } else { | ||
1072 | highest_new_tsn = sctp_highest_new_tsn(sack, asoc); | ||
1073 | } | ||
1074 | |||
1075 | /* Run through the retransmit queue. Credit bytes received | ||
1076 | * and free those chunks that we can. | ||
1077 | */ | ||
1078 | sctp_check_transmitted(q, &q->retransmit, NULL, sack, highest_new_tsn); | ||
1079 | sctp_mark_missing(q, &q->retransmit, NULL, highest_new_tsn, 0); | ||
1080 | |||
1081 | /* Run through the transmitted queue. | ||
1082 | * Credit bytes received and free those chunks which we can. | ||
1083 | * | ||
1084 | * This is a MASSIVE candidate for optimization. | ||
1085 | */ | ||
1086 | list_for_each(pos, transport_list) { | ||
1087 | transport = list_entry(pos, struct sctp_transport, | ||
1088 | transports); | ||
1089 | sctp_check_transmitted(q, &transport->transmitted, | ||
1090 | transport, sack, highest_new_tsn); | ||
1091 | /* | ||
1092 | * SFR-CACC algorithm: | ||
1093 | * C) Let count_of_newacks be the number of | ||
1094 | * destinations for which cacc_saw_newack is set. | ||
1095 | */ | ||
1096 | if (transport->cacc.cacc_saw_newack) | ||
1097 | count_of_newacks ++; | ||
1098 | } | ||
1099 | |||
1100 | list_for_each(pos, transport_list) { | ||
1101 | transport = list_entry(pos, struct sctp_transport, | ||
1102 | transports); | ||
1103 | sctp_mark_missing(q, &transport->transmitted, transport, | ||
1104 | highest_new_tsn, count_of_newacks); | ||
1105 | } | ||
1106 | |||
1107 | /* Move the Cumulative TSN Ack Point if appropriate. */ | ||
1108 | if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) | ||
1109 | asoc->ctsn_ack_point = sack_ctsn; | ||
1110 | |||
1111 | /* Update unack_data field in the assoc. */ | ||
1112 | sctp_sack_update_unack_data(asoc, sack); | ||
1113 | |||
1114 | ctsn = asoc->ctsn_ack_point; | ||
1115 | |||
1116 | /* Throw away stuff rotting on the sack queue. */ | ||
1117 | list_for_each_safe(lchunk, temp, &q->sacked) { | ||
1118 | tchunk = list_entry(lchunk, struct sctp_chunk, | ||
1119 | transmitted_list); | ||
1120 | tsn = ntohl(tchunk->subh.data_hdr->tsn); | ||
1121 | if (TSN_lte(tsn, ctsn)) | ||
1122 | sctp_chunk_free(tchunk); | ||
1123 | } | ||
1124 | |||
1125 | /* ii) Set rwnd equal to the newly received a_rwnd minus the | ||
1126 | * number of bytes still outstanding after processing the | ||
1127 | * Cumulative TSN Ack and the Gap Ack Blocks. | ||
1128 | */ | ||
1129 | |||
1130 | sack_a_rwnd = ntohl(sack->a_rwnd); | ||
1131 | outstanding = q->outstanding_bytes; | ||
1132 | |||
1133 | if (outstanding < sack_a_rwnd) | ||
1134 | sack_a_rwnd -= outstanding; | ||
1135 | else | ||
1136 | sack_a_rwnd = 0; | ||
1137 | |||
1138 | asoc->peer.rwnd = sack_a_rwnd; | ||
1139 | |||
1140 | sctp_generate_fwdtsn(q, sack_ctsn); | ||
1141 | |||
1142 | SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n", | ||
1143 | __FUNCTION__, sack_ctsn); | ||
1144 | SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, " | ||
1145 | "%p is 0x%x. Adv peer ack point: 0x%x\n", | ||
1146 | __FUNCTION__, asoc, ctsn, asoc->adv_peer_ack_point); | ||
1147 | |||
1148 | /* See if all chunks are acked. | ||
1149 | * Make sure the empty queue handler will get run later. | ||
1150 | */ | ||
1151 | q->empty = skb_queue_empty(&q->out) && skb_queue_empty(&q->control) && | ||
1152 | list_empty(&q->retransmit); | ||
1153 | if (!q->empty) | ||
1154 | goto finish; | ||
1155 | |||
1156 | list_for_each(pos, transport_list) { | ||
1157 | transport = list_entry(pos, struct sctp_transport, | ||
1158 | transports); | ||
1159 | q->empty = q->empty && list_empty(&transport->transmitted); | ||
1160 | if (!q->empty) | ||
1161 | goto finish; | ||
1162 | } | ||
1163 | |||
1164 | SCTP_DEBUG_PRINTK("sack queue is empty.\n"); | ||
1165 | finish: | ||
1166 | return q->empty; | ||
1167 | } | ||
1168 | |||
1169 | /* Is the outqueue empty? */ | ||
1170 | int sctp_outq_is_empty(const struct sctp_outq *q) | ||
1171 | { | ||
1172 | return q->empty; | ||
1173 | } | ||
1174 | |||
1175 | /******************************************************************** | ||
1176 | * 2nd Level Abstractions | ||
1177 | ********************************************************************/ | ||
1178 | |||
1179 | /* Go through a transport's transmitted list or the association's retransmit | ||
1180 | * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked. | ||
1181 | * The retransmit list will not have an associated transport. | ||
1182 | * | ||
1183 | * I added coherent debug information output. --xguo | ||
1184 | * | ||
1185 | * Instead of printing 'sacked' or 'kept' for each TSN on the | ||
1186 | * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5. | ||
1187 | * KEPT TSN6-TSN7, etc. | ||
1188 | */ | ||
1189 | static void sctp_check_transmitted(struct sctp_outq *q, | ||
1190 | struct list_head *transmitted_queue, | ||
1191 | struct sctp_transport *transport, | ||
1192 | struct sctp_sackhdr *sack, | ||
1193 | __u32 highest_new_tsn_in_sack) | ||
1194 | { | ||
1195 | struct list_head *lchunk; | ||
1196 | struct sctp_chunk *tchunk; | ||
1197 | struct list_head tlist; | ||
1198 | __u32 tsn; | ||
1199 | __u32 sack_ctsn; | ||
1200 | __u32 rtt; | ||
1201 | __u8 restart_timer = 0; | ||
1202 | int bytes_acked = 0; | ||
1203 | |||
1204 | /* These state variables are for coherent debug output. --xguo */ | ||
1205 | |||
1206 | #if SCTP_DEBUG | ||
1207 | __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */ | ||
1208 | __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */ | ||
1209 | __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */ | ||
1210 | __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */ | ||
1211 | |||
1212 | /* 0 : The last TSN was ACKed. | ||
1213 | * 1 : The last TSN was NOT ACKed (i.e. KEPT). | ||
1214 | * -1: We need to initialize. | ||
1215 | */ | ||
1216 | int dbg_prt_state = -1; | ||
1217 | #endif /* SCTP_DEBUG */ | ||
1218 | |||
1219 | sack_ctsn = ntohl(sack->cum_tsn_ack); | ||
1220 | |||
1221 | INIT_LIST_HEAD(&tlist); | ||
1222 | |||
1223 | /* The while loop will skip empty transmitted queues. */ | ||
1224 | while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) { | ||
1225 | tchunk = list_entry(lchunk, struct sctp_chunk, | ||
1226 | transmitted_list); | ||
1227 | |||
1228 | if (sctp_chunk_abandoned(tchunk)) { | ||
1229 | /* Move the chunk to abandoned list. */ | ||
1230 | sctp_insert_list(&q->abandoned, lchunk); | ||
1231 | continue; | ||
1232 | } | ||
1233 | |||
1234 | tsn = ntohl(tchunk->subh.data_hdr->tsn); | ||
1235 | if (sctp_acked(sack, tsn)) { | ||
1236 | /* If this queue is the retransmit queue, the | ||
1237 | * retransmit timer has already reclaimed | ||
1238 | * the outstanding bytes for this chunk, so only | ||
1239 | * count bytes associated with a transport. | ||
1240 | */ | ||
1241 | if (transport) { | ||
1242 | /* If this chunk is being used for RTT | ||
1243 | * measurement, calculate the RTT and update | ||
1244 | * the RTO using this value. | ||
1245 | * | ||
1246 | * 6.3.1 C5) Karn's algorithm: RTT measurements | ||
1247 | * MUST NOT be made using packets that were | ||
1248 | * retransmitted (and thus for which it is | ||
1249 | * ambiguous whether the reply was for the | ||
1250 | * first instance of the packet or a later | ||
1251 | * instance). | ||
1252 | */ | ||
1253 | if (!tchunk->tsn_gap_acked && | ||
1254 | !tchunk->resent && | ||
1255 | tchunk->rtt_in_progress) { | ||
1256 | rtt = jiffies - tchunk->sent_at; | ||
1257 | sctp_transport_update_rto(transport, | ||
1258 | rtt); | ||
1259 | } | ||
1260 | } | ||
1261 | if (TSN_lte(tsn, sack_ctsn)) { | ||
1262 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1263 | * | ||
1264 | * R3) Whenever a SACK is received | ||
1265 | * that acknowledges the DATA chunk | ||
1266 | * with the earliest outstanding TSN | ||
1267 | * for that address, restart T3-rtx | ||
1268 | * timer for that address with its | ||
1269 | * current RTO. | ||
1270 | */ | ||
1271 | restart_timer = 1; | ||
1272 | |||
1273 | if (!tchunk->tsn_gap_acked) { | ||
1274 | tchunk->tsn_gap_acked = 1; | ||
1275 | bytes_acked += sctp_data_size(tchunk); | ||
1276 | /* | ||
1277 | * SFR-CACC algorithm: | ||
1278 | * 2) If the SACK contains gap acks | ||
1279 | * and the flag CHANGEOVER_ACTIVE is | ||
1280 | * set the receiver of the SACK MUST | ||
1281 | * take the following action: | ||
1282 | * | ||
1283 | * B) For each TSN t being acked that | ||
1284 | * has not been acked in any SACK so | ||
1285 | * far, set cacc_saw_newack to 1 for | ||
1286 | * the destination that the TSN was | ||
1287 | * sent to. | ||
1288 | */ | ||
1289 | if (transport && | ||
1290 | sack->num_gap_ack_blocks && | ||
1291 | q->asoc->peer.primary_path->cacc. | ||
1292 | changeover_active) | ||
1293 | transport->cacc.cacc_saw_newack | ||
1294 | = 1; | ||
1295 | } | ||
1296 | |||
1297 | list_add_tail(&tchunk->transmitted_list, | ||
1298 | &q->sacked); | ||
1299 | } else { | ||
1300 | /* RFC2960 7.2.4, sctpimpguide-05 2.8.2 | ||
1301 | * M2) Each time a SACK arrives reporting | ||
1302 | * 'Stray DATA chunk(s)' record the highest TSN | ||
1303 | * reported as newly acknowledged, call this | ||
1304 | * value 'HighestTSNinSack'. A newly | ||
1305 | * acknowledged DATA chunk is one not | ||
1306 | * previously acknowledged in a SACK. | ||
1307 | * | ||
1308 | * When the SCTP sender of data receives a SACK | ||
1309 | * chunk that acknowledges, for the first time, | ||
1310 | * the receipt of a DATA chunk, all the still | ||
1311 | * unacknowledged DATA chunks whose TSN is | ||
1312 | * older than that newly acknowledged DATA | ||
1313 | * chunk, are qualified as 'Stray DATA chunks'. | ||
1314 | */ | ||
1315 | if (!tchunk->tsn_gap_acked) { | ||
1316 | tchunk->tsn_gap_acked = 1; | ||
1317 | bytes_acked += sctp_data_size(tchunk); | ||
1318 | } | ||
1319 | list_add_tail(lchunk, &tlist); | ||
1320 | } | ||
1321 | |||
1322 | #if SCTP_DEBUG | ||
1323 | switch (dbg_prt_state) { | ||
1324 | case 0: /* last TSN was ACKed */ | ||
1325 | if (dbg_last_ack_tsn + 1 == tsn) { | ||
1326 | /* This TSN belongs to the | ||
1327 | * current ACK range. | ||
1328 | */ | ||
1329 | break; | ||
1330 | } | ||
1331 | |||
1332 | if (dbg_last_ack_tsn != dbg_ack_tsn) { | ||
1333 | /* Display the end of the | ||
1334 | * current range. | ||
1335 | */ | ||
1336 | SCTP_DEBUG_PRINTK("-%08x", | ||
1337 | dbg_last_ack_tsn); | ||
1338 | } | ||
1339 | |||
1340 | /* Start a new range. */ | ||
1341 | SCTP_DEBUG_PRINTK(",%08x", tsn); | ||
1342 | dbg_ack_tsn = tsn; | ||
1343 | break; | ||
1344 | |||
1345 | case 1: /* The last TSN was NOT ACKed. */ | ||
1346 | if (dbg_last_kept_tsn != dbg_kept_tsn) { | ||
1347 | /* Display the end of current range. */ | ||
1348 | SCTP_DEBUG_PRINTK("-%08x", | ||
1349 | dbg_last_kept_tsn); | ||
1350 | } | ||
1351 | |||
1352 | SCTP_DEBUG_PRINTK("\n"); | ||
1353 | |||
1354 | /* FALL THROUGH... */ | ||
1355 | default: | ||
1356 | /* This is the first-ever TSN we examined. */ | ||
1357 | /* Start a new range of ACK-ed TSNs. */ | ||
1358 | SCTP_DEBUG_PRINTK("ACKed: %08x", tsn); | ||
1359 | dbg_prt_state = 0; | ||
1360 | dbg_ack_tsn = tsn; | ||
1361 | }; | ||
1362 | |||
1363 | dbg_last_ack_tsn = tsn; | ||
1364 | #endif /* SCTP_DEBUG */ | ||
1365 | |||
1366 | } else { | ||
1367 | if (tchunk->tsn_gap_acked) { | ||
1368 | SCTP_DEBUG_PRINTK("%s: Receiver reneged on " | ||
1369 | "data TSN: 0x%x\n", | ||
1370 | __FUNCTION__, | ||
1371 | tsn); | ||
1372 | tchunk->tsn_gap_acked = 0; | ||
1373 | |||
1374 | bytes_acked -= sctp_data_size(tchunk); | ||
1375 | |||
1376 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1377 | * | ||
1378 | * R4) Whenever a SACK is received missing a | ||
1379 | * TSN that was previously acknowledged via a | ||
1380 | * Gap Ack Block, start T3-rtx for the | ||
1381 | * destination address to which the DATA | ||
1382 | * chunk was originally | ||
1383 | * transmitted if it is not already running. | ||
1384 | */ | ||
1385 | restart_timer = 1; | ||
1386 | } | ||
1387 | |||
1388 | list_add_tail(lchunk, &tlist); | ||
1389 | |||
1390 | #if SCTP_DEBUG | ||
1391 | /* See the above comments on ACK-ed TSNs. */ | ||
1392 | switch (dbg_prt_state) { | ||
1393 | case 1: | ||
1394 | if (dbg_last_kept_tsn + 1 == tsn) | ||
1395 | break; | ||
1396 | |||
1397 | if (dbg_last_kept_tsn != dbg_kept_tsn) | ||
1398 | SCTP_DEBUG_PRINTK("-%08x", | ||
1399 | dbg_last_kept_tsn); | ||
1400 | |||
1401 | SCTP_DEBUG_PRINTK(",%08x", tsn); | ||
1402 | dbg_kept_tsn = tsn; | ||
1403 | break; | ||
1404 | |||
1405 | case 0: | ||
1406 | if (dbg_last_ack_tsn != dbg_ack_tsn) | ||
1407 | SCTP_DEBUG_PRINTK("-%08x", | ||
1408 | dbg_last_ack_tsn); | ||
1409 | SCTP_DEBUG_PRINTK("\n"); | ||
1410 | |||
1411 | /* FALL THROUGH... */ | ||
1412 | default: | ||
1413 | SCTP_DEBUG_PRINTK("KEPT: %08x",tsn); | ||
1414 | dbg_prt_state = 1; | ||
1415 | dbg_kept_tsn = tsn; | ||
1416 | }; | ||
1417 | |||
1418 | dbg_last_kept_tsn = tsn; | ||
1419 | #endif /* SCTP_DEBUG */ | ||
1420 | } | ||
1421 | } | ||
1422 | |||
1423 | #if SCTP_DEBUG | ||
1424 | /* Finish off the last range, displaying its ending TSN. */ | ||
1425 | switch (dbg_prt_state) { | ||
1426 | case 0: | ||
1427 | if (dbg_last_ack_tsn != dbg_ack_tsn) { | ||
1428 | SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_ack_tsn); | ||
1429 | } else { | ||
1430 | SCTP_DEBUG_PRINTK("\n"); | ||
1431 | } | ||
1432 | break; | ||
1433 | |||
1434 | case 1: | ||
1435 | if (dbg_last_kept_tsn != dbg_kept_tsn) { | ||
1436 | SCTP_DEBUG_PRINTK("-%08x\n", dbg_last_kept_tsn); | ||
1437 | } else { | ||
1438 | SCTP_DEBUG_PRINTK("\n"); | ||
1439 | } | ||
1440 | }; | ||
1441 | #endif /* SCTP_DEBUG */ | ||
1442 | if (transport) { | ||
1443 | if (bytes_acked) { | ||
1444 | /* 8.2. When an outstanding TSN is acknowledged, | ||
1445 | * the endpoint shall clear the error counter of | ||
1446 | * the destination transport address to which the | ||
1447 | * DATA chunk was last sent. | ||
1448 | * The association's overall error counter is | ||
1449 | * also cleared. | ||
1450 | */ | ||
1451 | transport->error_count = 0; | ||
1452 | transport->asoc->overall_error_count = 0; | ||
1453 | |||
1454 | /* Mark the destination transport address as | ||
1455 | * active if it is not so marked. | ||
1456 | */ | ||
1457 | if (!transport->active) { | ||
1458 | sctp_assoc_control_transport( | ||
1459 | transport->asoc, | ||
1460 | transport, | ||
1461 | SCTP_TRANSPORT_UP, | ||
1462 | SCTP_RECEIVED_SACK); | ||
1463 | } | ||
1464 | |||
1465 | sctp_transport_raise_cwnd(transport, sack_ctsn, | ||
1466 | bytes_acked); | ||
1467 | |||
1468 | transport->flight_size -= bytes_acked; | ||
1469 | q->outstanding_bytes -= bytes_acked; | ||
1470 | } else { | ||
1471 | /* RFC 2960 6.1, sctpimpguide-06 2.15.2 | ||
1472 | * When a sender is doing zero window probing, it | ||
1473 | * should not timeout the association if it continues | ||
1474 | * to receive new packets from the receiver. The | ||
1475 | * reason is that the receiver MAY keep its window | ||
1476 | * closed for an indefinite time. | ||
1477 | * A sender is doing zero window probing when the | ||
1478 | * receiver's advertised window is zero, and there is | ||
1479 | * only one data chunk in flight to the receiver. | ||
1480 | */ | ||
1481 | if (!q->asoc->peer.rwnd && | ||
1482 | !list_empty(&tlist) && | ||
1483 | (sack_ctsn+2 == q->asoc->next_tsn)) { | ||
1484 | SCTP_DEBUG_PRINTK("%s: SACK received for zero " | ||
1485 | "window probe: %u\n", | ||
1486 | __FUNCTION__, sack_ctsn); | ||
1487 | q->asoc->overall_error_count = 0; | ||
1488 | transport->error_count = 0; | ||
1489 | } | ||
1490 | } | ||
1491 | |||
1492 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
1493 | * | ||
1494 | * R2) Whenever all outstanding data sent to an address have | ||
1495 | * been acknowledged, turn off the T3-rtx timer of that | ||
1496 | * address. | ||
1497 | */ | ||
1498 | if (!transport->flight_size) { | ||
1499 | if (timer_pending(&transport->T3_rtx_timer) && | ||
1500 | del_timer(&transport->T3_rtx_timer)) { | ||
1501 | sctp_transport_put(transport); | ||
1502 | } | ||
1503 | } else if (restart_timer) { | ||
1504 | if (!mod_timer(&transport->T3_rtx_timer, | ||
1505 | jiffies + transport->rto)) | ||
1506 | sctp_transport_hold(transport); | ||
1507 | } | ||
1508 | } | ||
1509 | |||
1510 | list_splice(&tlist, transmitted_queue); | ||
1511 | } | ||
1512 | |||
1513 | /* Mark chunks as missing and consequently may get retransmitted. */ | ||
1514 | static void sctp_mark_missing(struct sctp_outq *q, | ||
1515 | struct list_head *transmitted_queue, | ||
1516 | struct sctp_transport *transport, | ||
1517 | __u32 highest_new_tsn_in_sack, | ||
1518 | int count_of_newacks) | ||
1519 | { | ||
1520 | struct sctp_chunk *chunk; | ||
1521 | struct list_head *pos; | ||
1522 | __u32 tsn; | ||
1523 | char do_fast_retransmit = 0; | ||
1524 | struct sctp_transport *primary = q->asoc->peer.primary_path; | ||
1525 | |||
1526 | list_for_each(pos, transmitted_queue) { | ||
1527 | |||
1528 | chunk = list_entry(pos, struct sctp_chunk, transmitted_list); | ||
1529 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
1530 | |||
1531 | /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all | ||
1532 | * 'Unacknowledged TSN's', if the TSN number of an | ||
1533 | * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack' | ||
1534 | * value, increment the 'TSN.Missing.Report' count on that | ||
1535 | * chunk if it has NOT been fast retransmitted or marked for | ||
1536 | * fast retransmit already. | ||
1537 | */ | ||
1538 | if (!chunk->fast_retransmit && | ||
1539 | !chunk->tsn_gap_acked && | ||
1540 | TSN_lt(tsn, highest_new_tsn_in_sack)) { | ||
1541 | |||
1542 | /* SFR-CACC may require us to skip marking | ||
1543 | * this chunk as missing. | ||
1544 | */ | ||
1545 | if (!transport || !sctp_cacc_skip(primary, transport, | ||
1546 | count_of_newacks, tsn)) { | ||
1547 | chunk->tsn_missing_report++; | ||
1548 | |||
1549 | SCTP_DEBUG_PRINTK( | ||
1550 | "%s: TSN 0x%x missing counter: %d\n", | ||
1551 | __FUNCTION__, tsn, | ||
1552 | chunk->tsn_missing_report); | ||
1553 | } | ||
1554 | } | ||
1555 | /* | ||
1556 | * M4) If any DATA chunk is found to have a | ||
1557 | * 'TSN.Missing.Report' | ||
1558 | * value larger than or equal to 4, mark that chunk for | ||
1559 | * retransmission and start the fast retransmit procedure. | ||
1560 | */ | ||
1561 | |||
1562 | if (chunk->tsn_missing_report >= 4) { | ||
1563 | chunk->fast_retransmit = 1; | ||
1564 | do_fast_retransmit = 1; | ||
1565 | } | ||
1566 | } | ||
1567 | |||
1568 | if (transport) { | ||
1569 | if (do_fast_retransmit) | ||
1570 | sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX); | ||
1571 | |||
1572 | SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, " | ||
1573 | "ssthresh: %d, flight_size: %d, pba: %d\n", | ||
1574 | __FUNCTION__, transport, transport->cwnd, | ||
1575 | transport->ssthresh, transport->flight_size, | ||
1576 | transport->partial_bytes_acked); | ||
1577 | } | ||
1578 | } | ||
1579 | |||
1580 | /* Is the given TSN acked by this packet? */ | ||
1581 | static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn) | ||
1582 | { | ||
1583 | int i; | ||
1584 | sctp_sack_variable_t *frags; | ||
1585 | __u16 gap; | ||
1586 | __u32 ctsn = ntohl(sack->cum_tsn_ack); | ||
1587 | |||
1588 | if (TSN_lte(tsn, ctsn)) | ||
1589 | goto pass; | ||
1590 | |||
1591 | /* 3.3.4 Selective Acknowledgement (SACK) (3): | ||
1592 | * | ||
1593 | * Gap Ack Blocks: | ||
1594 | * These fields contain the Gap Ack Blocks. They are repeated | ||
1595 | * for each Gap Ack Block up to the number of Gap Ack Blocks | ||
1596 | * defined in the Number of Gap Ack Blocks field. All DATA | ||
1597 | * chunks with TSNs greater than or equal to (Cumulative TSN | ||
1598 | * Ack + Gap Ack Block Start) and less than or equal to | ||
1599 | * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack | ||
1600 | * Block are assumed to have been received correctly. | ||
1601 | */ | ||
1602 | |||
1603 | frags = sack->variable; | ||
1604 | gap = tsn - ctsn; | ||
1605 | for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) { | ||
1606 | if (TSN_lte(ntohs(frags[i].gab.start), gap) && | ||
1607 | TSN_lte(gap, ntohs(frags[i].gab.end))) | ||
1608 | goto pass; | ||
1609 | } | ||
1610 | |||
1611 | return 0; | ||
1612 | pass: | ||
1613 | return 1; | ||
1614 | } | ||
1615 | |||
1616 | static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist, | ||
1617 | int nskips, __u16 stream) | ||
1618 | { | ||
1619 | int i; | ||
1620 | |||
1621 | for (i = 0; i < nskips; i++) { | ||
1622 | if (skiplist[i].stream == stream) | ||
1623 | return i; | ||
1624 | } | ||
1625 | return i; | ||
1626 | } | ||
1627 | |||
1628 | /* Create and add a fwdtsn chunk to the outq's control queue if needed. */ | ||
1629 | static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn) | ||
1630 | { | ||
1631 | struct sctp_association *asoc = q->asoc; | ||
1632 | struct sctp_chunk *ftsn_chunk = NULL; | ||
1633 | struct sctp_fwdtsn_skip ftsn_skip_arr[10]; | ||
1634 | int nskips = 0; | ||
1635 | int skip_pos = 0; | ||
1636 | __u32 tsn; | ||
1637 | struct sctp_chunk *chunk; | ||
1638 | struct list_head *lchunk, *temp; | ||
1639 | |||
1640 | /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the | ||
1641 | * received SACK. | ||
1642 | * | ||
1643 | * If (Advanced.Peer.Ack.Point < SackCumAck), then update | ||
1644 | * Advanced.Peer.Ack.Point to be equal to SackCumAck. | ||
1645 | */ | ||
1646 | if (TSN_lt(asoc->adv_peer_ack_point, ctsn)) | ||
1647 | asoc->adv_peer_ack_point = ctsn; | ||
1648 | |||
1649 | /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point" | ||
1650 | * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as | ||
1651 | * the chunk next in the out-queue space is marked as "abandoned" as | ||
1652 | * shown in the following example: | ||
1653 | * | ||
1654 | * Assuming that a SACK arrived with the Cumulative TSN ACK 102 | ||
1655 | * and the Advanced.Peer.Ack.Point is updated to this value: | ||
1656 | * | ||
1657 | * out-queue at the end of ==> out-queue after Adv.Ack.Point | ||
1658 | * normal SACK processing local advancement | ||
1659 | * ... ... | ||
1660 | * Adv.Ack.Pt-> 102 acked 102 acked | ||
1661 | * 103 abandoned 103 abandoned | ||
1662 | * 104 abandoned Adv.Ack.P-> 104 abandoned | ||
1663 | * 105 105 | ||
1664 | * 106 acked 106 acked | ||
1665 | * ... ... | ||
1666 | * | ||
1667 | * In this example, the data sender successfully advanced the | ||
1668 | * "Advanced.Peer.Ack.Point" from 102 to 104 locally. | ||
1669 | */ | ||
1670 | list_for_each_safe(lchunk, temp, &q->abandoned) { | ||
1671 | chunk = list_entry(lchunk, struct sctp_chunk, | ||
1672 | transmitted_list); | ||
1673 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
1674 | |||
1675 | /* Remove any chunks in the abandoned queue that are acked by | ||
1676 | * the ctsn. | ||
1677 | */ | ||
1678 | if (TSN_lte(tsn, ctsn)) { | ||
1679 | list_del_init(lchunk); | ||
1680 | if (!chunk->tsn_gap_acked) { | ||
1681 | chunk->transport->flight_size -= | ||
1682 | sctp_data_size(chunk); | ||
1683 | q->outstanding_bytes -= sctp_data_size(chunk); | ||
1684 | } | ||
1685 | sctp_chunk_free(chunk); | ||
1686 | } else { | ||
1687 | if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) { | ||
1688 | asoc->adv_peer_ack_point = tsn; | ||
1689 | if (chunk->chunk_hdr->flags & | ||
1690 | SCTP_DATA_UNORDERED) | ||
1691 | continue; | ||
1692 | skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], | ||
1693 | nskips, | ||
1694 | chunk->subh.data_hdr->stream); | ||
1695 | ftsn_skip_arr[skip_pos].stream = | ||
1696 | chunk->subh.data_hdr->stream; | ||
1697 | ftsn_skip_arr[skip_pos].ssn = | ||
1698 | chunk->subh.data_hdr->ssn; | ||
1699 | if (skip_pos == nskips) | ||
1700 | nskips++; | ||
1701 | if (nskips == 10) | ||
1702 | break; | ||
1703 | } else | ||
1704 | break; | ||
1705 | } | ||
1706 | } | ||
1707 | |||
1708 | /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point" | ||
1709 | * is greater than the Cumulative TSN ACK carried in the received | ||
1710 | * SACK, the data sender MUST send the data receiver a FORWARD TSN | ||
1711 | * chunk containing the latest value of the | ||
1712 | * "Advanced.Peer.Ack.Point". | ||
1713 | * | ||
1714 | * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD | ||
1715 | * list each stream and sequence number in the forwarded TSN. This | ||
1716 | * information will enable the receiver to easily find any | ||
1717 | * stranded TSN's waiting on stream reorder queues. Each stream | ||
1718 | * SHOULD only be reported once; this means that if multiple | ||
1719 | * abandoned messages occur in the same stream then only the | ||
1720 | * highest abandoned stream sequence number is reported. If the | ||
1721 | * total size of the FORWARD TSN does NOT fit in a single MTU then | ||
1722 | * the sender of the FORWARD TSN SHOULD lower the | ||
1723 | * Advanced.Peer.Ack.Point to the last TSN that will fit in a | ||
1724 | * single MTU. | ||
1725 | */ | ||
1726 | if (asoc->adv_peer_ack_point > ctsn) | ||
1727 | ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point, | ||
1728 | nskips, &ftsn_skip_arr[0]); | ||
1729 | |||
1730 | if (ftsn_chunk) { | ||
1731 | __skb_queue_tail(&q->control, (struct sk_buff *)ftsn_chunk); | ||
1732 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
1733 | } | ||
1734 | } | ||
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c new file mode 100644 index 000000000000..3a7ebfcc1fdb --- /dev/null +++ b/net/sctp/primitive.c | |||
@@ -0,0 +1,219 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
3 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
4 | * | ||
5 | * This file is part of the SCTP kernel reference Implementation | ||
6 | * | ||
7 | * These functions implement the SCTP primitive functions from Section 10. | ||
8 | * | ||
9 | * Note that the descriptions from the specification are USER level | ||
10 | * functions--this file is the functions which populate the struct proto | ||
11 | * for SCTP which is the BOTTOM of the sockets interface. | ||
12 | * | ||
13 | * The SCTP reference implementation is free software; | ||
14 | * you can redistribute it and/or modify it under the terms of | ||
15 | * the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The SCTP reference implementation is distributed in the hope that it | ||
20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
21 | * ************************ | ||
22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
23 | * See the GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with GNU CC; see the file COPYING. If not, write to | ||
27 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
28 | * Boston, MA 02111-1307, USA. | ||
29 | * | ||
30 | * Please send any bug reports or fixes you make to the | ||
31 | * email address(es): | ||
32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
33 | * | ||
34 | * Or submit a bug report through the following website: | ||
35 | * http://www.sf.net/projects/lksctp | ||
36 | * | ||
37 | * Written or modified by: | ||
38 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
39 | * Narasimha Budihal <narasimha@refcode.org> | ||
40 | * Karl Knutson <karl@athena.chicago.il.us> | ||
41 | * Ardelle Fan <ardelle.fan@intel.com> | ||
42 | * Kevin Gao <kevin.gao@intel.com> | ||
43 | * | ||
44 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
45 | * be incorporated into the next SCTP release. | ||
46 | */ | ||
47 | |||
48 | #include <linux/types.h> | ||
49 | #include <linux/list.h> /* For struct list_head */ | ||
50 | #include <linux/socket.h> | ||
51 | #include <linux/ip.h> | ||
52 | #include <linux/time.h> /* For struct timeval */ | ||
53 | #include <net/sock.h> | ||
54 | #include <net/sctp/sctp.h> | ||
55 | #include <net/sctp/sm.h> | ||
56 | |||
57 | #define DECLARE_PRIMITIVE(name) \ | ||
58 | /* This is called in the code as sctp_primitive_ ## name. */ \ | ||
59 | int sctp_primitive_ ## name(struct sctp_association *asoc, \ | ||
60 | void *arg) { \ | ||
61 | int error = 0; \ | ||
62 | sctp_event_t event_type; sctp_subtype_t subtype; \ | ||
63 | sctp_state_t state; \ | ||
64 | struct sctp_endpoint *ep; \ | ||
65 | \ | ||
66 | event_type = SCTP_EVENT_T_PRIMITIVE; \ | ||
67 | subtype = SCTP_ST_PRIMITIVE(SCTP_PRIMITIVE_ ## name); \ | ||
68 | state = asoc ? asoc->state : SCTP_STATE_CLOSED; \ | ||
69 | ep = asoc ? asoc->ep : NULL; \ | ||
70 | \ | ||
71 | error = sctp_do_sm(event_type, subtype, state, ep, asoc, \ | ||
72 | arg, GFP_KERNEL); \ | ||
73 | return error; \ | ||
74 | } | ||
75 | |||
76 | /* 10.1 ULP-to-SCTP | ||
77 | * B) Associate | ||
78 | * | ||
79 | * Format: ASSOCIATE(local SCTP instance name, destination transport addr, | ||
80 | * outbound stream count) | ||
81 | * -> association id [,destination transport addr list] [,outbound stream | ||
82 | * count] | ||
83 | * | ||
84 | * This primitive allows the upper layer to initiate an association to a | ||
85 | * specific peer endpoint. | ||
86 | * | ||
87 | * This version assumes that asoc is fully populated with the initial | ||
88 | * parameters. We then return a traditional kernel indicator of | ||
89 | * success or failure. | ||
90 | */ | ||
91 | |||
92 | /* This is called in the code as sctp_primitive_ASSOCIATE. */ | ||
93 | |||
94 | DECLARE_PRIMITIVE(ASSOCIATE) | ||
95 | |||
96 | /* 10.1 ULP-to-SCTP | ||
97 | * C) Shutdown | ||
98 | * | ||
99 | * Format: SHUTDOWN(association id) | ||
100 | * -> result | ||
101 | * | ||
102 | * Gracefully closes an association. Any locally queued user data | ||
103 | * will be delivered to the peer. The association will be terminated only | ||
104 | * after the peer acknowledges all the SCTP packets sent. A success code | ||
105 | * will be returned on successful termination of the association. If | ||
106 | * attempting to terminate the association results in a failure, an error | ||
107 | * code shall be returned. | ||
108 | */ | ||
109 | |||
110 | DECLARE_PRIMITIVE(SHUTDOWN); | ||
111 | |||
112 | /* 10.1 ULP-to-SCTP | ||
113 | * C) Abort | ||
114 | * | ||
115 | * Format: Abort(association id [, cause code]) | ||
116 | * -> result | ||
117 | * | ||
118 | * Ungracefully closes an association. Any locally queued user data | ||
119 | * will be discarded and an ABORT chunk is sent to the peer. A success | ||
120 | * code will be returned on successful abortion of the association. If | ||
121 | * attempting to abort the association results in a failure, an error | ||
122 | * code shall be returned. | ||
123 | */ | ||
124 | |||
125 | DECLARE_PRIMITIVE(ABORT); | ||
126 | |||
127 | /* 10.1 ULP-to-SCTP | ||
128 | * E) Send | ||
129 | * | ||
130 | * Format: SEND(association id, buffer address, byte count [,context] | ||
131 | * [,stream id] [,life time] [,destination transport address] | ||
132 | * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) | ||
133 | * -> result | ||
134 | * | ||
135 | * This is the main method to send user data via SCTP. | ||
136 | * | ||
137 | * Mandatory attributes: | ||
138 | * | ||
139 | * o association id - local handle to the SCTP association | ||
140 | * | ||
141 | * o buffer address - the location where the user message to be | ||
142 | * transmitted is stored; | ||
143 | * | ||
144 | * o byte count - The size of the user data in number of bytes; | ||
145 | * | ||
146 | * Optional attributes: | ||
147 | * | ||
148 | * o context - an optional 32 bit integer that will be carried in the | ||
149 | * sending failure notification to the ULP if the transportation of | ||
150 | * this User Message fails. | ||
151 | * | ||
152 | * o stream id - to indicate which stream to send the data on. If not | ||
153 | * specified, stream 0 will be used. | ||
154 | * | ||
155 | * o life time - specifies the life time of the user data. The user data | ||
156 | * will not be sent by SCTP after the life time expires. This | ||
157 | * parameter can be used to avoid efforts to transmit stale | ||
158 | * user messages. SCTP notifies the ULP if the data cannot be | ||
159 | * initiated to transport (i.e. sent to the destination via SCTP's | ||
160 | * send primitive) within the life time variable. However, the | ||
161 | * user data will be transmitted if SCTP has attempted to transmit a | ||
162 | * chunk before the life time expired. | ||
163 | * | ||
164 | * o destination transport address - specified as one of the destination | ||
165 | * transport addresses of the peer endpoint to which this packet | ||
166 | * should be sent. Whenever possible, SCTP should use this destination | ||
167 | * transport address for sending the packets, instead of the current | ||
168 | * primary path. | ||
169 | * | ||
170 | * o unorder flag - this flag, if present, indicates that the user | ||
171 | * would like the data delivered in an unordered fashion to the peer | ||
172 | * (i.e., the U flag is set to 1 on all DATA chunks carrying this | ||
173 | * message). | ||
174 | * | ||
175 | * o no-bundle flag - instructs SCTP not to bundle this user data with | ||
176 | * other outbound DATA chunks. SCTP MAY still bundle even when | ||
177 | * this flag is present, when faced with network congestion. | ||
178 | * | ||
179 | * o payload protocol-id - A 32 bit unsigned integer that is to be | ||
180 | * passed to the peer indicating the type of payload protocol data | ||
181 | * being transmitted. This value is passed as opaque data by SCTP. | ||
182 | */ | ||
183 | |||
184 | DECLARE_PRIMITIVE(SEND); | ||
185 | |||
186 | /* 10.1 ULP-to-SCTP | ||
187 | * J) Request Heartbeat | ||
188 | * | ||
189 | * Format: REQUESTHEARTBEAT(association id, destination transport address) | ||
190 | * | ||
191 | * -> result | ||
192 | * | ||
193 | * Instructs the local endpoint to perform a HeartBeat on the specified | ||
194 | * destination transport address of the given association. The returned | ||
195 | * result should indicate whether the transmission of the HEARTBEAT | ||
196 | * chunk to the destination address is successful. | ||
197 | * | ||
198 | * Mandatory attributes: | ||
199 | * | ||
200 | * o association id - local handle to the SCTP association | ||
201 | * | ||
202 | * o destination transport address - the transport address of the | ||
203 | * association on which a heartbeat should be issued. | ||
204 | */ | ||
205 | |||
206 | DECLARE_PRIMITIVE(REQUESTHEARTBEAT); | ||
207 | |||
208 | /* ADDIP | ||
209 | * 3.1.1 Address Configuration Change Chunk (ASCONF) | ||
210 | * | ||
211 | * This chunk is used to communicate to the remote endpoint one of the | ||
212 | * configuration change requests that MUST be acknowledged. The | ||
213 | * information carried in the ASCONF Chunk uses the form of a | ||
214 | * Type-Length-Value (TLV), as described in "3.2.1 Optional/ | ||
215 | * Variable-length Parameter Format" in RFC2960 [5], forall variable | ||
216 | * parameters. | ||
217 | */ | ||
218 | |||
219 | DECLARE_PRIMITIVE(ASCONF); | ||
diff --git a/net/sctp/proc.c b/net/sctp/proc.c new file mode 100644 index 000000000000..e42fd8c2916b --- /dev/null +++ b/net/sctp/proc.c | |||
@@ -0,0 +1,288 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 2003 International Business Machines, Corp. | ||
3 | * | ||
4 | * This file is part of the SCTP kernel reference Implementation | ||
5 | * | ||
6 | * The SCTP reference implementation is free software; | ||
7 | * you can redistribute it and/or modify it under the terms of | ||
8 | * the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2, or (at your option) | ||
10 | * any later version. | ||
11 | * | ||
12 | * The SCTP reference implementation is distributed in the hope that it | ||
13 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
14 | * ************************ | ||
15 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
16 | * See the GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with GNU CC; see the file COPYING. If not, write to | ||
20 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
21 | * Boston, MA 02111-1307, USA. | ||
22 | * | ||
23 | * Please send any bug reports or fixes you make to the | ||
24 | * email address(es): | ||
25 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
26 | * | ||
27 | * Or submit a bug report through the following website: | ||
28 | * http://www.sf.net/projects/lksctp | ||
29 | * | ||
30 | * Written or modified by: | ||
31 | * Sridhar Samudrala <sri@us.ibm.com> | ||
32 | * | ||
33 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
34 | * be incorporated into the next SCTP release. | ||
35 | */ | ||
36 | |||
37 | #include <linux/types.h> | ||
38 | #include <linux/seq_file.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <net/sctp/sctp.h> | ||
41 | |||
42 | static struct snmp_mib sctp_snmp_list[] = { | ||
43 | SNMP_MIB_ITEM("SctpCurrEstab", SCTP_MIB_CURRESTAB), | ||
44 | SNMP_MIB_ITEM("SctpActiveEstabs", SCTP_MIB_ACTIVEESTABS), | ||
45 | SNMP_MIB_ITEM("SctpPassiveEstabs", SCTP_MIB_PASSIVEESTABS), | ||
46 | SNMP_MIB_ITEM("SctpAborteds", SCTP_MIB_ABORTEDS), | ||
47 | SNMP_MIB_ITEM("SctpShutdowns", SCTP_MIB_SHUTDOWNS), | ||
48 | SNMP_MIB_ITEM("SctpOutOfBlues", SCTP_MIB_OUTOFBLUES), | ||
49 | SNMP_MIB_ITEM("SctpChecksumErrors", SCTP_MIB_CHECKSUMERRORS), | ||
50 | SNMP_MIB_ITEM("SctpOutCtrlChunks", SCTP_MIB_OUTCTRLCHUNKS), | ||
51 | SNMP_MIB_ITEM("SctpOutOrderChunks", SCTP_MIB_OUTORDERCHUNKS), | ||
52 | SNMP_MIB_ITEM("SctpOutUnorderChunks", SCTP_MIB_OUTUNORDERCHUNKS), | ||
53 | SNMP_MIB_ITEM("SctpInCtrlChunks", SCTP_MIB_INCTRLCHUNKS), | ||
54 | SNMP_MIB_ITEM("SctpInOrderChunks", SCTP_MIB_INORDERCHUNKS), | ||
55 | SNMP_MIB_ITEM("SctpInUnorderChunks", SCTP_MIB_INUNORDERCHUNKS), | ||
56 | SNMP_MIB_ITEM("SctpFragUsrMsgs", SCTP_MIB_FRAGUSRMSGS), | ||
57 | SNMP_MIB_ITEM("SctpReasmUsrMsgs", SCTP_MIB_REASMUSRMSGS), | ||
58 | SNMP_MIB_ITEM("SctpOutSCTPPacks", SCTP_MIB_OUTSCTPPACKS), | ||
59 | SNMP_MIB_ITEM("SctpInSCTPPacks", SCTP_MIB_INSCTPPACKS), | ||
60 | }; | ||
61 | |||
62 | /* Return the current value of a particular entry in the mib by adding its | ||
63 | * per cpu counters. | ||
64 | */ | ||
65 | static unsigned long | ||
66 | fold_field(void *mib[], int nr) | ||
67 | { | ||
68 | unsigned long res = 0; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < NR_CPUS; i++) { | ||
72 | if (!cpu_possible(i)) | ||
73 | continue; | ||
74 | res += | ||
75 | *((unsigned long *) (((void *) per_cpu_ptr(mib[0], i)) + | ||
76 | sizeof (unsigned long) * nr)); | ||
77 | res += | ||
78 | *((unsigned long *) (((void *) per_cpu_ptr(mib[1], i)) + | ||
79 | sizeof (unsigned long) * nr)); | ||
80 | } | ||
81 | return res; | ||
82 | } | ||
83 | |||
84 | /* Display sctp snmp mib statistics(/proc/net/sctp/snmp). */ | ||
85 | static int sctp_snmp_seq_show(struct seq_file *seq, void *v) | ||
86 | { | ||
87 | int i; | ||
88 | |||
89 | for (i = 0; sctp_snmp_list[i].name != NULL; i++) | ||
90 | seq_printf(seq, "%-32s\t%ld\n", sctp_snmp_list[i].name, | ||
91 | fold_field((void **)sctp_statistics, | ||
92 | sctp_snmp_list[i].entry)); | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | /* Initialize the seq file operations for 'snmp' object. */ | ||
98 | static int sctp_snmp_seq_open(struct inode *inode, struct file *file) | ||
99 | { | ||
100 | return single_open(file, sctp_snmp_seq_show, NULL); | ||
101 | } | ||
102 | |||
103 | static struct file_operations sctp_snmp_seq_fops = { | ||
104 | .owner = THIS_MODULE, | ||
105 | .open = sctp_snmp_seq_open, | ||
106 | .read = seq_read, | ||
107 | .llseek = seq_lseek, | ||
108 | .release = single_release, | ||
109 | }; | ||
110 | |||
111 | /* Set up the proc fs entry for 'snmp' object. */ | ||
112 | int __init sctp_snmp_proc_init(void) | ||
113 | { | ||
114 | struct proc_dir_entry *p; | ||
115 | |||
116 | p = create_proc_entry("snmp", S_IRUGO, proc_net_sctp); | ||
117 | if (!p) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | p->proc_fops = &sctp_snmp_seq_fops; | ||
121 | |||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | /* Cleanup the proc fs entry for 'snmp' object. */ | ||
126 | void sctp_snmp_proc_exit(void) | ||
127 | { | ||
128 | remove_proc_entry("snmp", proc_net_sctp); | ||
129 | } | ||
130 | |||
131 | /* Dump local addresses of an association/endpoint. */ | ||
132 | static void sctp_seq_dump_local_addrs(struct seq_file *seq, struct sctp_ep_common *epb) | ||
133 | { | ||
134 | struct list_head *pos; | ||
135 | struct sctp_sockaddr_entry *laddr; | ||
136 | union sctp_addr *addr; | ||
137 | struct sctp_af *af; | ||
138 | |||
139 | list_for_each(pos, &epb->bind_addr.address_list) { | ||
140 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
141 | addr = (union sctp_addr *)&laddr->a; | ||
142 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
143 | af->seq_dump_addr(seq, addr); | ||
144 | } | ||
145 | } | ||
146 | |||
147 | /* Dump remote addresses of an association. */ | ||
148 | static void sctp_seq_dump_remote_addrs(struct seq_file *seq, struct sctp_association *assoc) | ||
149 | { | ||
150 | struct list_head *pos; | ||
151 | struct sctp_transport *transport; | ||
152 | union sctp_addr *addr; | ||
153 | struct sctp_af *af; | ||
154 | |||
155 | list_for_each(pos, &assoc->peer.transport_addr_list) { | ||
156 | transport = list_entry(pos, struct sctp_transport, transports); | ||
157 | addr = (union sctp_addr *)&transport->ipaddr; | ||
158 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
159 | af->seq_dump_addr(seq, addr); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /* Display sctp endpoints (/proc/net/sctp/eps). */ | ||
164 | static int sctp_eps_seq_show(struct seq_file *seq, void *v) | ||
165 | { | ||
166 | struct sctp_hashbucket *head; | ||
167 | struct sctp_ep_common *epb; | ||
168 | struct sctp_endpoint *ep; | ||
169 | struct sock *sk; | ||
170 | int hash; | ||
171 | |||
172 | seq_printf(seq, " ENDPT SOCK STY SST HBKT LPORT LADDRS\n"); | ||
173 | for (hash = 0; hash < sctp_ep_hashsize; hash++) { | ||
174 | head = &sctp_ep_hashtable[hash]; | ||
175 | read_lock(&head->lock); | ||
176 | for (epb = head->chain; epb; epb = epb->next) { | ||
177 | ep = sctp_ep(epb); | ||
178 | sk = epb->sk; | ||
179 | seq_printf(seq, "%8p %8p %-3d %-3d %-4d %-5d ", ep, sk, | ||
180 | sctp_sk(sk)->type, sk->sk_state, hash, | ||
181 | epb->bind_addr.port); | ||
182 | sctp_seq_dump_local_addrs(seq, epb); | ||
183 | seq_printf(seq, "\n"); | ||
184 | } | ||
185 | read_unlock(&head->lock); | ||
186 | } | ||
187 | |||
188 | return 0; | ||
189 | } | ||
190 | |||
191 | /* Initialize the seq file operations for 'eps' object. */ | ||
192 | static int sctp_eps_seq_open(struct inode *inode, struct file *file) | ||
193 | { | ||
194 | return single_open(file, sctp_eps_seq_show, NULL); | ||
195 | } | ||
196 | |||
197 | static struct file_operations sctp_eps_seq_fops = { | ||
198 | .open = sctp_eps_seq_open, | ||
199 | .read = seq_read, | ||
200 | .llseek = seq_lseek, | ||
201 | .release = single_release, | ||
202 | }; | ||
203 | |||
204 | /* Set up the proc fs entry for 'eps' object. */ | ||
205 | int __init sctp_eps_proc_init(void) | ||
206 | { | ||
207 | struct proc_dir_entry *p; | ||
208 | |||
209 | p = create_proc_entry("eps", S_IRUGO, proc_net_sctp); | ||
210 | if (!p) | ||
211 | return -ENOMEM; | ||
212 | |||
213 | p->proc_fops = &sctp_eps_seq_fops; | ||
214 | |||
215 | return 0; | ||
216 | } | ||
217 | |||
218 | /* Cleanup the proc fs entry for 'eps' object. */ | ||
219 | void sctp_eps_proc_exit(void) | ||
220 | { | ||
221 | remove_proc_entry("eps", proc_net_sctp); | ||
222 | } | ||
223 | |||
224 | /* Display sctp associations (/proc/net/sctp/assocs). */ | ||
225 | static int sctp_assocs_seq_show(struct seq_file *seq, void *v) | ||
226 | { | ||
227 | struct sctp_hashbucket *head; | ||
228 | struct sctp_ep_common *epb; | ||
229 | struct sctp_association *assoc; | ||
230 | struct sock *sk; | ||
231 | int hash; | ||
232 | |||
233 | seq_printf(seq, " ASSOC SOCK STY SST ST HBKT LPORT RPORT " | ||
234 | "LADDRS <-> RADDRS\n"); | ||
235 | for (hash = 0; hash < sctp_assoc_hashsize; hash++) { | ||
236 | head = &sctp_assoc_hashtable[hash]; | ||
237 | read_lock(&head->lock); | ||
238 | for (epb = head->chain; epb; epb = epb->next) { | ||
239 | assoc = sctp_assoc(epb); | ||
240 | sk = epb->sk; | ||
241 | seq_printf(seq, | ||
242 | "%8p %8p %-3d %-3d %-2d %-4d %-5d %-5d ", | ||
243 | assoc, sk, sctp_sk(sk)->type, sk->sk_state, | ||
244 | assoc->state, hash, epb->bind_addr.port, | ||
245 | assoc->peer.port); | ||
246 | sctp_seq_dump_local_addrs(seq, epb); | ||
247 | seq_printf(seq, "<-> "); | ||
248 | sctp_seq_dump_remote_addrs(seq, assoc); | ||
249 | seq_printf(seq, "\n"); | ||
250 | } | ||
251 | read_unlock(&head->lock); | ||
252 | } | ||
253 | |||
254 | return 0; | ||
255 | } | ||
256 | |||
257 | /* Initialize the seq file operations for 'assocs' object. */ | ||
258 | static int sctp_assocs_seq_open(struct inode *inode, struct file *file) | ||
259 | { | ||
260 | return single_open(file, sctp_assocs_seq_show, NULL); | ||
261 | } | ||
262 | |||
263 | static struct file_operations sctp_assocs_seq_fops = { | ||
264 | .open = sctp_assocs_seq_open, | ||
265 | .read = seq_read, | ||
266 | .llseek = seq_lseek, | ||
267 | .release = single_release, | ||
268 | }; | ||
269 | |||
270 | /* Set up the proc fs entry for 'assocs' object. */ | ||
271 | int __init sctp_assocs_proc_init(void) | ||
272 | { | ||
273 | struct proc_dir_entry *p; | ||
274 | |||
275 | p = create_proc_entry("assocs", S_IRUGO, proc_net_sctp); | ||
276 | if (!p) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | p->proc_fops = &sctp_assocs_seq_fops; | ||
280 | |||
281 | return 0; | ||
282 | } | ||
283 | |||
284 | /* Cleanup the proc fs entry for 'assocs' object. */ | ||
285 | void sctp_assocs_proc_exit(void) | ||
286 | { | ||
287 | remove_proc_entry("assocs", proc_net_sctp); | ||
288 | } | ||
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c new file mode 100644 index 000000000000..b9813cf3d91c --- /dev/null +++ b/net/sctp/protocol.c | |||
@@ -0,0 +1,1240 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * This file is part of the SCTP kernel reference Implementation | ||
10 | * | ||
11 | * Initialization/cleanup for SCTP protocol support. | ||
12 | * | ||
13 | * The SCTP reference implementation is free software; | ||
14 | * you can redistribute it and/or modify it under the terms of | ||
15 | * the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The SCTP reference implementation is distributed in the hope that it | ||
20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
21 | * ************************ | ||
22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
23 | * See the GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with GNU CC; see the file COPYING. If not, write to | ||
27 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
28 | * Boston, MA 02111-1307, USA. | ||
29 | * | ||
30 | * Please send any bug reports or fixes you make to the | ||
31 | * email address(es): | ||
32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
33 | * | ||
34 | * Or submit a bug report through the following website: | ||
35 | * http://www.sf.net/projects/lksctp | ||
36 | * | ||
37 | * Written or modified by: | ||
38 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
39 | * Karl Knutson <karl@athena.chicago.il.us> | ||
40 | * Jon Grimm <jgrimm@us.ibm.com> | ||
41 | * Sridhar Samudrala <sri@us.ibm.com> | ||
42 | * Daisy Chang <daisyc@us.ibm.com> | ||
43 | * Ardelle Fan <ardelle.fan@intel.com> | ||
44 | * | ||
45 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
46 | * be incorporated into the next SCTP release. | ||
47 | */ | ||
48 | |||
49 | #include <linux/module.h> | ||
50 | #include <linux/init.h> | ||
51 | #include <linux/netdevice.h> | ||
52 | #include <linux/inetdevice.h> | ||
53 | #include <linux/seq_file.h> | ||
54 | #include <net/protocol.h> | ||
55 | #include <net/ip.h> | ||
56 | #include <net/ipv6.h> | ||
57 | #include <net/sctp/sctp.h> | ||
58 | #include <net/addrconf.h> | ||
59 | #include <net/inet_common.h> | ||
60 | #include <net/inet_ecn.h> | ||
61 | |||
62 | /* Global data structures. */ | ||
63 | struct sctp_globals sctp_globals; | ||
64 | struct proc_dir_entry *proc_net_sctp; | ||
65 | DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics); | ||
66 | |||
67 | struct idr sctp_assocs_id; | ||
68 | DEFINE_SPINLOCK(sctp_assocs_id_lock); | ||
69 | |||
70 | /* This is the global socket data structure used for responding to | ||
71 | * the Out-of-the-blue (OOTB) packets. A control sock will be created | ||
72 | * for this socket at the initialization time. | ||
73 | */ | ||
74 | static struct socket *sctp_ctl_socket; | ||
75 | |||
76 | static struct sctp_pf *sctp_pf_inet6_specific; | ||
77 | static struct sctp_pf *sctp_pf_inet_specific; | ||
78 | static struct sctp_af *sctp_af_v4_specific; | ||
79 | static struct sctp_af *sctp_af_v6_specific; | ||
80 | |||
81 | kmem_cache_t *sctp_chunk_cachep; | ||
82 | kmem_cache_t *sctp_bucket_cachep; | ||
83 | |||
84 | extern int sctp_snmp_proc_init(void); | ||
85 | extern int sctp_snmp_proc_exit(void); | ||
86 | extern int sctp_eps_proc_init(void); | ||
87 | extern int sctp_eps_proc_exit(void); | ||
88 | extern int sctp_assocs_proc_init(void); | ||
89 | extern int sctp_assocs_proc_exit(void); | ||
90 | |||
91 | /* Return the address of the control sock. */ | ||
92 | struct sock *sctp_get_ctl_sock(void) | ||
93 | { | ||
94 | return sctp_ctl_socket->sk; | ||
95 | } | ||
96 | |||
97 | /* Set up the proc fs entry for the SCTP protocol. */ | ||
98 | static __init int sctp_proc_init(void) | ||
99 | { | ||
100 | if (!proc_net_sctp) { | ||
101 | struct proc_dir_entry *ent; | ||
102 | ent = proc_mkdir("net/sctp", NULL); | ||
103 | if (ent) { | ||
104 | ent->owner = THIS_MODULE; | ||
105 | proc_net_sctp = ent; | ||
106 | } else | ||
107 | goto out_nomem; | ||
108 | } | ||
109 | |||
110 | if (sctp_snmp_proc_init()) | ||
111 | goto out_nomem; | ||
112 | if (sctp_eps_proc_init()) | ||
113 | goto out_nomem; | ||
114 | if (sctp_assocs_proc_init()) | ||
115 | goto out_nomem; | ||
116 | |||
117 | return 0; | ||
118 | |||
119 | out_nomem: | ||
120 | return -ENOMEM; | ||
121 | } | ||
122 | |||
123 | /* Clean up the proc fs entry for the SCTP protocol. | ||
124 | * Note: Do not make this __exit as it is used in the init error | ||
125 | * path. | ||
126 | */ | ||
127 | static void sctp_proc_exit(void) | ||
128 | { | ||
129 | sctp_snmp_proc_exit(); | ||
130 | sctp_eps_proc_exit(); | ||
131 | sctp_assocs_proc_exit(); | ||
132 | |||
133 | if (proc_net_sctp) { | ||
134 | proc_net_sctp = NULL; | ||
135 | remove_proc_entry("net/sctp", NULL); | ||
136 | } | ||
137 | } | ||
138 | |||
139 | /* Private helper to extract ipv4 address and stash them in | ||
140 | * the protocol structure. | ||
141 | */ | ||
142 | static void sctp_v4_copy_addrlist(struct list_head *addrlist, | ||
143 | struct net_device *dev) | ||
144 | { | ||
145 | struct in_device *in_dev; | ||
146 | struct in_ifaddr *ifa; | ||
147 | struct sctp_sockaddr_entry *addr; | ||
148 | |||
149 | rcu_read_lock(); | ||
150 | if ((in_dev = __in_dev_get(dev)) == NULL) { | ||
151 | rcu_read_unlock(); | ||
152 | return; | ||
153 | } | ||
154 | |||
155 | for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) { | ||
156 | /* Add the address to the local list. */ | ||
157 | addr = t_new(struct sctp_sockaddr_entry, GFP_ATOMIC); | ||
158 | if (addr) { | ||
159 | addr->a.v4.sin_family = AF_INET; | ||
160 | addr->a.v4.sin_port = 0; | ||
161 | addr->a.v4.sin_addr.s_addr = ifa->ifa_local; | ||
162 | list_add_tail(&addr->list, addrlist); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | rcu_read_unlock(); | ||
167 | } | ||
168 | |||
169 | /* Extract our IP addresses from the system and stash them in the | ||
170 | * protocol structure. | ||
171 | */ | ||
172 | static void __sctp_get_local_addr_list(void) | ||
173 | { | ||
174 | struct net_device *dev; | ||
175 | struct list_head *pos; | ||
176 | struct sctp_af *af; | ||
177 | |||
178 | read_lock(&dev_base_lock); | ||
179 | for (dev = dev_base; dev; dev = dev->next) { | ||
180 | __list_for_each(pos, &sctp_address_families) { | ||
181 | af = list_entry(pos, struct sctp_af, list); | ||
182 | af->copy_addrlist(&sctp_local_addr_list, dev); | ||
183 | } | ||
184 | } | ||
185 | read_unlock(&dev_base_lock); | ||
186 | } | ||
187 | |||
188 | static void sctp_get_local_addr_list(void) | ||
189 | { | ||
190 | unsigned long flags; | ||
191 | |||
192 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
193 | __sctp_get_local_addr_list(); | ||
194 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
195 | } | ||
196 | |||
197 | /* Free the existing local addresses. */ | ||
198 | static void __sctp_free_local_addr_list(void) | ||
199 | { | ||
200 | struct sctp_sockaddr_entry *addr; | ||
201 | struct list_head *pos, *temp; | ||
202 | |||
203 | list_for_each_safe(pos, temp, &sctp_local_addr_list) { | ||
204 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
205 | list_del(pos); | ||
206 | kfree(addr); | ||
207 | } | ||
208 | } | ||
209 | |||
210 | /* Free the existing local addresses. */ | ||
211 | static void sctp_free_local_addr_list(void) | ||
212 | { | ||
213 | unsigned long flags; | ||
214 | |||
215 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
216 | __sctp_free_local_addr_list(); | ||
217 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
218 | } | ||
219 | |||
220 | /* Copy the local addresses which are valid for 'scope' into 'bp'. */ | ||
221 | int sctp_copy_local_addr_list(struct sctp_bind_addr *bp, sctp_scope_t scope, | ||
222 | int gfp, int copy_flags) | ||
223 | { | ||
224 | struct sctp_sockaddr_entry *addr; | ||
225 | int error = 0; | ||
226 | struct list_head *pos; | ||
227 | unsigned long flags; | ||
228 | |||
229 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
230 | list_for_each(pos, &sctp_local_addr_list) { | ||
231 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
232 | if (sctp_in_scope(&addr->a, scope)) { | ||
233 | /* Now that the address is in scope, check to see if | ||
234 | * the address type is really supported by the local | ||
235 | * sock as well as the remote peer. | ||
236 | */ | ||
237 | if ((((AF_INET == addr->a.sa.sa_family) && | ||
238 | (copy_flags & SCTP_ADDR4_PEERSUPP))) || | ||
239 | (((AF_INET6 == addr->a.sa.sa_family) && | ||
240 | (copy_flags & SCTP_ADDR6_ALLOWED) && | ||
241 | (copy_flags & SCTP_ADDR6_PEERSUPP)))) { | ||
242 | error = sctp_add_bind_addr(bp, &addr->a, | ||
243 | GFP_ATOMIC); | ||
244 | if (error) | ||
245 | goto end_copy; | ||
246 | } | ||
247 | } | ||
248 | } | ||
249 | |||
250 | end_copy: | ||
251 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
252 | return error; | ||
253 | } | ||
254 | |||
255 | /* Initialize a sctp_addr from in incoming skb. */ | ||
256 | static void sctp_v4_from_skb(union sctp_addr *addr, struct sk_buff *skb, | ||
257 | int is_saddr) | ||
258 | { | ||
259 | void *from; | ||
260 | __u16 *port; | ||
261 | struct sctphdr *sh; | ||
262 | |||
263 | port = &addr->v4.sin_port; | ||
264 | addr->v4.sin_family = AF_INET; | ||
265 | |||
266 | sh = (struct sctphdr *) skb->h.raw; | ||
267 | if (is_saddr) { | ||
268 | *port = ntohs(sh->source); | ||
269 | from = &skb->nh.iph->saddr; | ||
270 | } else { | ||
271 | *port = ntohs(sh->dest); | ||
272 | from = &skb->nh.iph->daddr; | ||
273 | } | ||
274 | memcpy(&addr->v4.sin_addr.s_addr, from, sizeof(struct in_addr)); | ||
275 | } | ||
276 | |||
277 | /* Initialize an sctp_addr from a socket. */ | ||
278 | static void sctp_v4_from_sk(union sctp_addr *addr, struct sock *sk) | ||
279 | { | ||
280 | addr->v4.sin_family = AF_INET; | ||
281 | addr->v4.sin_port = inet_sk(sk)->num; | ||
282 | addr->v4.sin_addr.s_addr = inet_sk(sk)->rcv_saddr; | ||
283 | } | ||
284 | |||
285 | /* Initialize sk->sk_rcv_saddr from sctp_addr. */ | ||
286 | static void sctp_v4_to_sk_saddr(union sctp_addr *addr, struct sock *sk) | ||
287 | { | ||
288 | inet_sk(sk)->rcv_saddr = addr->v4.sin_addr.s_addr; | ||
289 | } | ||
290 | |||
291 | /* Initialize sk->sk_daddr from sctp_addr. */ | ||
292 | static void sctp_v4_to_sk_daddr(union sctp_addr *addr, struct sock *sk) | ||
293 | { | ||
294 | inet_sk(sk)->daddr = addr->v4.sin_addr.s_addr; | ||
295 | } | ||
296 | |||
297 | /* Initialize a sctp_addr from an address parameter. */ | ||
298 | static void sctp_v4_from_addr_param(union sctp_addr *addr, | ||
299 | union sctp_addr_param *param, | ||
300 | __u16 port, int iif) | ||
301 | { | ||
302 | addr->v4.sin_family = AF_INET; | ||
303 | addr->v4.sin_port = port; | ||
304 | addr->v4.sin_addr.s_addr = param->v4.addr.s_addr; | ||
305 | } | ||
306 | |||
307 | /* Initialize an address parameter from a sctp_addr and return the length | ||
308 | * of the address parameter. | ||
309 | */ | ||
310 | static int sctp_v4_to_addr_param(const union sctp_addr *addr, | ||
311 | union sctp_addr_param *param) | ||
312 | { | ||
313 | int length = sizeof(sctp_ipv4addr_param_t); | ||
314 | |||
315 | param->v4.param_hdr.type = SCTP_PARAM_IPV4_ADDRESS; | ||
316 | param->v4.param_hdr.length = ntohs(length); | ||
317 | param->v4.addr.s_addr = addr->v4.sin_addr.s_addr; | ||
318 | |||
319 | return length; | ||
320 | } | ||
321 | |||
322 | /* Initialize a sctp_addr from a dst_entry. */ | ||
323 | static void sctp_v4_dst_saddr(union sctp_addr *saddr, struct dst_entry *dst, | ||
324 | unsigned short port) | ||
325 | { | ||
326 | struct rtable *rt = (struct rtable *)dst; | ||
327 | saddr->v4.sin_family = AF_INET; | ||
328 | saddr->v4.sin_port = port; | ||
329 | saddr->v4.sin_addr.s_addr = rt->rt_src; | ||
330 | } | ||
331 | |||
332 | /* Compare two addresses exactly. */ | ||
333 | static int sctp_v4_cmp_addr(const union sctp_addr *addr1, | ||
334 | const union sctp_addr *addr2) | ||
335 | { | ||
336 | if (addr1->sa.sa_family != addr2->sa.sa_family) | ||
337 | return 0; | ||
338 | if (addr1->v4.sin_port != addr2->v4.sin_port) | ||
339 | return 0; | ||
340 | if (addr1->v4.sin_addr.s_addr != addr2->v4.sin_addr.s_addr) | ||
341 | return 0; | ||
342 | |||
343 | return 1; | ||
344 | } | ||
345 | |||
346 | /* Initialize addr struct to INADDR_ANY. */ | ||
347 | static void sctp_v4_inaddr_any(union sctp_addr *addr, unsigned short port) | ||
348 | { | ||
349 | addr->v4.sin_family = AF_INET; | ||
350 | addr->v4.sin_addr.s_addr = INADDR_ANY; | ||
351 | addr->v4.sin_port = port; | ||
352 | } | ||
353 | |||
354 | /* Is this a wildcard address? */ | ||
355 | static int sctp_v4_is_any(const union sctp_addr *addr) | ||
356 | { | ||
357 | return INADDR_ANY == addr->v4.sin_addr.s_addr; | ||
358 | } | ||
359 | |||
360 | /* This function checks if the address is a valid address to be used for | ||
361 | * SCTP binding. | ||
362 | * | ||
363 | * Output: | ||
364 | * Return 0 - If the address is a non-unicast or an illegal address. | ||
365 | * Return 1 - If the address is a unicast. | ||
366 | */ | ||
367 | static int sctp_v4_addr_valid(union sctp_addr *addr, struct sctp_sock *sp) | ||
368 | { | ||
369 | /* Is this a non-unicast address or a unusable SCTP address? */ | ||
370 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) | ||
371 | return 0; | ||
372 | |||
373 | return 1; | ||
374 | } | ||
375 | |||
376 | /* Should this be available for binding? */ | ||
377 | static int sctp_v4_available(union sctp_addr *addr, struct sctp_sock *sp) | ||
378 | { | ||
379 | int ret = inet_addr_type(addr->v4.sin_addr.s_addr); | ||
380 | |||
381 | /* FIXME: ip_nonlocal_bind sysctl support. */ | ||
382 | |||
383 | if (addr->v4.sin_addr.s_addr != INADDR_ANY && ret != RTN_LOCAL) | ||
384 | return 0; | ||
385 | return 1; | ||
386 | } | ||
387 | |||
388 | /* Checking the loopback, private and other address scopes as defined in | ||
389 | * RFC 1918. The IPv4 scoping is based on the draft for SCTP IPv4 | ||
390 | * scoping <draft-stewart-tsvwg-sctp-ipv4-00.txt>. | ||
391 | * | ||
392 | * Level 0 - unusable SCTP addresses | ||
393 | * Level 1 - loopback address | ||
394 | * Level 2 - link-local addresses | ||
395 | * Level 3 - private addresses. | ||
396 | * Level 4 - global addresses | ||
397 | * For INIT and INIT-ACK address list, let L be the level of | ||
398 | * of requested destination address, sender and receiver | ||
399 | * SHOULD include all of its addresses with level greater | ||
400 | * than or equal to L. | ||
401 | */ | ||
402 | static sctp_scope_t sctp_v4_scope(union sctp_addr *addr) | ||
403 | { | ||
404 | sctp_scope_t retval; | ||
405 | |||
406 | /* Should IPv4 scoping be a sysctl configurable option | ||
407 | * so users can turn it off (default on) for certain | ||
408 | * unconventional networking environments? | ||
409 | */ | ||
410 | |||
411 | /* Check for unusable SCTP addresses. */ | ||
412 | if (IS_IPV4_UNUSABLE_ADDRESS(&addr->v4.sin_addr.s_addr)) { | ||
413 | retval = SCTP_SCOPE_UNUSABLE; | ||
414 | } else if (LOOPBACK(addr->v4.sin_addr.s_addr)) { | ||
415 | retval = SCTP_SCOPE_LOOPBACK; | ||
416 | } else if (IS_IPV4_LINK_ADDRESS(&addr->v4.sin_addr.s_addr)) { | ||
417 | retval = SCTP_SCOPE_LINK; | ||
418 | } else if (IS_IPV4_PRIVATE_ADDRESS(&addr->v4.sin_addr.s_addr)) { | ||
419 | retval = SCTP_SCOPE_PRIVATE; | ||
420 | } else { | ||
421 | retval = SCTP_SCOPE_GLOBAL; | ||
422 | } | ||
423 | |||
424 | return retval; | ||
425 | } | ||
426 | |||
427 | /* Returns a valid dst cache entry for the given source and destination ip | ||
428 | * addresses. If an association is passed, trys to get a dst entry with a | ||
429 | * source address that matches an address in the bind address list. | ||
430 | */ | ||
431 | static struct dst_entry *sctp_v4_get_dst(struct sctp_association *asoc, | ||
432 | union sctp_addr *daddr, | ||
433 | union sctp_addr *saddr) | ||
434 | { | ||
435 | struct rtable *rt; | ||
436 | struct flowi fl; | ||
437 | struct sctp_bind_addr *bp; | ||
438 | rwlock_t *addr_lock; | ||
439 | struct sctp_sockaddr_entry *laddr; | ||
440 | struct list_head *pos; | ||
441 | struct dst_entry *dst = NULL; | ||
442 | union sctp_addr dst_saddr; | ||
443 | |||
444 | memset(&fl, 0x0, sizeof(struct flowi)); | ||
445 | fl.fl4_dst = daddr->v4.sin_addr.s_addr; | ||
446 | fl.proto = IPPROTO_SCTP; | ||
447 | if (asoc) { | ||
448 | fl.fl4_tos = RT_CONN_FLAGS(asoc->base.sk); | ||
449 | fl.oif = asoc->base.sk->sk_bound_dev_if; | ||
450 | } | ||
451 | if (saddr) | ||
452 | fl.fl4_src = saddr->v4.sin_addr.s_addr; | ||
453 | |||
454 | SCTP_DEBUG_PRINTK("%s: DST:%u.%u.%u.%u, SRC:%u.%u.%u.%u - ", | ||
455 | __FUNCTION__, NIPQUAD(fl.fl4_dst), | ||
456 | NIPQUAD(fl.fl4_src)); | ||
457 | |||
458 | if (!ip_route_output_key(&rt, &fl)) { | ||
459 | dst = &rt->u.dst; | ||
460 | } | ||
461 | |||
462 | /* If there is no association or if a source address is passed, no | ||
463 | * more validation is required. | ||
464 | */ | ||
465 | if (!asoc || saddr) | ||
466 | goto out; | ||
467 | |||
468 | bp = &asoc->base.bind_addr; | ||
469 | addr_lock = &asoc->base.addr_lock; | ||
470 | |||
471 | if (dst) { | ||
472 | /* Walk through the bind address list and look for a bind | ||
473 | * address that matches the source address of the returned dst. | ||
474 | */ | ||
475 | sctp_read_lock(addr_lock); | ||
476 | list_for_each(pos, &bp->address_list) { | ||
477 | laddr = list_entry(pos, struct sctp_sockaddr_entry, | ||
478 | list); | ||
479 | sctp_v4_dst_saddr(&dst_saddr, dst, bp->port); | ||
480 | if (sctp_v4_cmp_addr(&dst_saddr, &laddr->a)) | ||
481 | goto out_unlock; | ||
482 | } | ||
483 | sctp_read_unlock(addr_lock); | ||
484 | |||
485 | /* None of the bound addresses match the source address of the | ||
486 | * dst. So release it. | ||
487 | */ | ||
488 | dst_release(dst); | ||
489 | dst = NULL; | ||
490 | } | ||
491 | |||
492 | /* Walk through the bind address list and try to get a dst that | ||
493 | * matches a bind address as the source address. | ||
494 | */ | ||
495 | sctp_read_lock(addr_lock); | ||
496 | list_for_each(pos, &bp->address_list) { | ||
497 | laddr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
498 | |||
499 | if (AF_INET == laddr->a.sa.sa_family) { | ||
500 | fl.fl4_src = laddr->a.v4.sin_addr.s_addr; | ||
501 | if (!ip_route_output_key(&rt, &fl)) { | ||
502 | dst = &rt->u.dst; | ||
503 | goto out_unlock; | ||
504 | } | ||
505 | } | ||
506 | } | ||
507 | |||
508 | out_unlock: | ||
509 | sctp_read_unlock(addr_lock); | ||
510 | out: | ||
511 | if (dst) | ||
512 | SCTP_DEBUG_PRINTK("rt_dst:%u.%u.%u.%u, rt_src:%u.%u.%u.%u\n", | ||
513 | NIPQUAD(rt->rt_dst), NIPQUAD(rt->rt_src)); | ||
514 | else | ||
515 | SCTP_DEBUG_PRINTK("NO ROUTE\n"); | ||
516 | |||
517 | return dst; | ||
518 | } | ||
519 | |||
520 | /* For v4, the source address is cached in the route entry(dst). So no need | ||
521 | * to cache it separately and hence this is an empty routine. | ||
522 | */ | ||
523 | static void sctp_v4_get_saddr(struct sctp_association *asoc, | ||
524 | struct dst_entry *dst, | ||
525 | union sctp_addr *daddr, | ||
526 | union sctp_addr *saddr) | ||
527 | { | ||
528 | struct rtable *rt = (struct rtable *)dst; | ||
529 | |||
530 | if (rt) { | ||
531 | saddr->v4.sin_family = AF_INET; | ||
532 | saddr->v4.sin_port = asoc->base.bind_addr.port; | ||
533 | saddr->v4.sin_addr.s_addr = rt->rt_src; | ||
534 | } | ||
535 | } | ||
536 | |||
537 | /* What interface did this skb arrive on? */ | ||
538 | static int sctp_v4_skb_iif(const struct sk_buff *skb) | ||
539 | { | ||
540 | return ((struct rtable *)skb->dst)->rt_iif; | ||
541 | } | ||
542 | |||
543 | /* Was this packet marked by Explicit Congestion Notification? */ | ||
544 | static int sctp_v4_is_ce(const struct sk_buff *skb) | ||
545 | { | ||
546 | return INET_ECN_is_ce(skb->nh.iph->tos); | ||
547 | } | ||
548 | |||
549 | /* Create and initialize a new sk for the socket returned by accept(). */ | ||
550 | static struct sock *sctp_v4_create_accept_sk(struct sock *sk, | ||
551 | struct sctp_association *asoc) | ||
552 | { | ||
553 | struct inet_sock *inet = inet_sk(sk); | ||
554 | struct inet_sock *newinet; | ||
555 | struct sock *newsk = sk_alloc(PF_INET, GFP_KERNEL, sk->sk_prot, 1); | ||
556 | |||
557 | if (!newsk) | ||
558 | goto out; | ||
559 | |||
560 | sock_init_data(NULL, newsk); | ||
561 | |||
562 | newsk->sk_type = SOCK_STREAM; | ||
563 | |||
564 | newsk->sk_no_check = sk->sk_no_check; | ||
565 | newsk->sk_reuse = sk->sk_reuse; | ||
566 | newsk->sk_shutdown = sk->sk_shutdown; | ||
567 | |||
568 | newsk->sk_destruct = inet_sock_destruct; | ||
569 | newsk->sk_family = PF_INET; | ||
570 | newsk->sk_protocol = IPPROTO_SCTP; | ||
571 | newsk->sk_backlog_rcv = sk->sk_prot->backlog_rcv; | ||
572 | sock_reset_flag(newsk, SOCK_ZAPPED); | ||
573 | |||
574 | newinet = inet_sk(newsk); | ||
575 | |||
576 | /* Initialize sk's sport, dport, rcv_saddr and daddr for | ||
577 | * getsockname() and getpeername() | ||
578 | */ | ||
579 | newinet->sport = inet->sport; | ||
580 | newinet->saddr = inet->saddr; | ||
581 | newinet->rcv_saddr = inet->rcv_saddr; | ||
582 | newinet->dport = htons(asoc->peer.port); | ||
583 | newinet->daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr; | ||
584 | newinet->pmtudisc = inet->pmtudisc; | ||
585 | newinet->id = 0; | ||
586 | |||
587 | newinet->uc_ttl = -1; | ||
588 | newinet->mc_loop = 1; | ||
589 | newinet->mc_ttl = 1; | ||
590 | newinet->mc_index = 0; | ||
591 | newinet->mc_list = NULL; | ||
592 | |||
593 | #ifdef INET_REFCNT_DEBUG | ||
594 | atomic_inc(&inet_sock_nr); | ||
595 | #endif | ||
596 | |||
597 | if (newsk->sk_prot->init(newsk)) { | ||
598 | sk_common_release(newsk); | ||
599 | newsk = NULL; | ||
600 | } | ||
601 | |||
602 | out: | ||
603 | return newsk; | ||
604 | } | ||
605 | |||
606 | /* Map address, empty for v4 family */ | ||
607 | static void sctp_v4_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr) | ||
608 | { | ||
609 | /* Empty */ | ||
610 | } | ||
611 | |||
612 | /* Dump the v4 addr to the seq file. */ | ||
613 | static void sctp_v4_seq_dump_addr(struct seq_file *seq, union sctp_addr *addr) | ||
614 | { | ||
615 | seq_printf(seq, "%d.%d.%d.%d ", NIPQUAD(addr->v4.sin_addr)); | ||
616 | } | ||
617 | |||
618 | /* Event handler for inet address addition/deletion events. | ||
619 | * Basically, whenever there is an event, we re-build our local address list. | ||
620 | */ | ||
621 | int sctp_inetaddr_event(struct notifier_block *this, unsigned long ev, | ||
622 | void *ptr) | ||
623 | { | ||
624 | unsigned long flags; | ||
625 | |||
626 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
627 | __sctp_free_local_addr_list(); | ||
628 | __sctp_get_local_addr_list(); | ||
629 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
630 | |||
631 | return NOTIFY_DONE; | ||
632 | } | ||
633 | |||
634 | /* | ||
635 | * Initialize the control inode/socket with a control endpoint data | ||
636 | * structure. This endpoint is reserved exclusively for the OOTB processing. | ||
637 | */ | ||
638 | static int sctp_ctl_sock_init(void) | ||
639 | { | ||
640 | int err; | ||
641 | sa_family_t family; | ||
642 | |||
643 | if (sctp_get_pf_specific(PF_INET6)) | ||
644 | family = PF_INET6; | ||
645 | else | ||
646 | family = PF_INET; | ||
647 | |||
648 | err = sock_create_kern(family, SOCK_SEQPACKET, IPPROTO_SCTP, | ||
649 | &sctp_ctl_socket); | ||
650 | if (err < 0) { | ||
651 | printk(KERN_ERR | ||
652 | "SCTP: Failed to create the SCTP control socket.\n"); | ||
653 | return err; | ||
654 | } | ||
655 | sctp_ctl_socket->sk->sk_allocation = GFP_ATOMIC; | ||
656 | inet_sk(sctp_ctl_socket->sk)->uc_ttl = -1; | ||
657 | |||
658 | return 0; | ||
659 | } | ||
660 | |||
661 | /* Register address family specific functions. */ | ||
662 | int sctp_register_af(struct sctp_af *af) | ||
663 | { | ||
664 | switch (af->sa_family) { | ||
665 | case AF_INET: | ||
666 | if (sctp_af_v4_specific) | ||
667 | return 0; | ||
668 | sctp_af_v4_specific = af; | ||
669 | break; | ||
670 | case AF_INET6: | ||
671 | if (sctp_af_v6_specific) | ||
672 | return 0; | ||
673 | sctp_af_v6_specific = af; | ||
674 | break; | ||
675 | default: | ||
676 | return 0; | ||
677 | } | ||
678 | |||
679 | INIT_LIST_HEAD(&af->list); | ||
680 | list_add_tail(&af->list, &sctp_address_families); | ||
681 | return 1; | ||
682 | } | ||
683 | |||
684 | /* Get the table of functions for manipulating a particular address | ||
685 | * family. | ||
686 | */ | ||
687 | struct sctp_af *sctp_get_af_specific(sa_family_t family) | ||
688 | { | ||
689 | switch (family) { | ||
690 | case AF_INET: | ||
691 | return sctp_af_v4_specific; | ||
692 | case AF_INET6: | ||
693 | return sctp_af_v6_specific; | ||
694 | default: | ||
695 | return NULL; | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* Common code to initialize a AF_INET msg_name. */ | ||
700 | static void sctp_inet_msgname(char *msgname, int *addr_len) | ||
701 | { | ||
702 | struct sockaddr_in *sin; | ||
703 | |||
704 | sin = (struct sockaddr_in *)msgname; | ||
705 | *addr_len = sizeof(struct sockaddr_in); | ||
706 | sin->sin_family = AF_INET; | ||
707 | memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); | ||
708 | } | ||
709 | |||
710 | /* Copy the primary address of the peer primary address as the msg_name. */ | ||
711 | static void sctp_inet_event_msgname(struct sctp_ulpevent *event, char *msgname, | ||
712 | int *addr_len) | ||
713 | { | ||
714 | struct sockaddr_in *sin, *sinfrom; | ||
715 | |||
716 | if (msgname) { | ||
717 | struct sctp_association *asoc; | ||
718 | |||
719 | asoc = event->asoc; | ||
720 | sctp_inet_msgname(msgname, addr_len); | ||
721 | sin = (struct sockaddr_in *)msgname; | ||
722 | sinfrom = &asoc->peer.primary_addr.v4; | ||
723 | sin->sin_port = htons(asoc->peer.port); | ||
724 | sin->sin_addr.s_addr = sinfrom->sin_addr.s_addr; | ||
725 | } | ||
726 | } | ||
727 | |||
728 | /* Initialize and copy out a msgname from an inbound skb. */ | ||
729 | static void sctp_inet_skb_msgname(struct sk_buff *skb, char *msgname, int *len) | ||
730 | { | ||
731 | struct sctphdr *sh; | ||
732 | struct sockaddr_in *sin; | ||
733 | |||
734 | if (msgname) { | ||
735 | sctp_inet_msgname(msgname, len); | ||
736 | sin = (struct sockaddr_in *)msgname; | ||
737 | sh = (struct sctphdr *)skb->h.raw; | ||
738 | sin->sin_port = sh->source; | ||
739 | sin->sin_addr.s_addr = skb->nh.iph->saddr; | ||
740 | } | ||
741 | } | ||
742 | |||
743 | /* Do we support this AF? */ | ||
744 | static int sctp_inet_af_supported(sa_family_t family, struct sctp_sock *sp) | ||
745 | { | ||
746 | /* PF_INET only supports AF_INET addresses. */ | ||
747 | return (AF_INET == family); | ||
748 | } | ||
749 | |||
750 | /* Address matching with wildcards allowed. */ | ||
751 | static int sctp_inet_cmp_addr(const union sctp_addr *addr1, | ||
752 | const union sctp_addr *addr2, | ||
753 | struct sctp_sock *opt) | ||
754 | { | ||
755 | /* PF_INET only supports AF_INET addresses. */ | ||
756 | if (addr1->sa.sa_family != addr2->sa.sa_family) | ||
757 | return 0; | ||
758 | if (INADDR_ANY == addr1->v4.sin_addr.s_addr || | ||
759 | INADDR_ANY == addr2->v4.sin_addr.s_addr) | ||
760 | return 1; | ||
761 | if (addr1->v4.sin_addr.s_addr == addr2->v4.sin_addr.s_addr) | ||
762 | return 1; | ||
763 | |||
764 | return 0; | ||
765 | } | ||
766 | |||
767 | /* Verify that provided sockaddr looks bindable. Common verification has | ||
768 | * already been taken care of. | ||
769 | */ | ||
770 | static int sctp_inet_bind_verify(struct sctp_sock *opt, union sctp_addr *addr) | ||
771 | { | ||
772 | return sctp_v4_available(addr, opt); | ||
773 | } | ||
774 | |||
775 | /* Verify that sockaddr looks sendable. Common verification has already | ||
776 | * been taken care of. | ||
777 | */ | ||
778 | static int sctp_inet_send_verify(struct sctp_sock *opt, union sctp_addr *addr) | ||
779 | { | ||
780 | return 1; | ||
781 | } | ||
782 | |||
783 | /* Fill in Supported Address Type information for INIT and INIT-ACK | ||
784 | * chunks. Returns number of addresses supported. | ||
785 | */ | ||
786 | static int sctp_inet_supported_addrs(const struct sctp_sock *opt, | ||
787 | __u16 *types) | ||
788 | { | ||
789 | types[0] = SCTP_PARAM_IPV4_ADDRESS; | ||
790 | return 1; | ||
791 | } | ||
792 | |||
793 | /* Wrapper routine that calls the ip transmit routine. */ | ||
794 | static inline int sctp_v4_xmit(struct sk_buff *skb, | ||
795 | struct sctp_transport *transport, int ipfragok) | ||
796 | { | ||
797 | SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, " | ||
798 | "src:%u.%u.%u.%u, dst:%u.%u.%u.%u\n", | ||
799 | __FUNCTION__, skb, skb->len, | ||
800 | NIPQUAD(((struct rtable *)skb->dst)->rt_src), | ||
801 | NIPQUAD(((struct rtable *)skb->dst)->rt_dst)); | ||
802 | |||
803 | SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); | ||
804 | return ip_queue_xmit(skb, ipfragok); | ||
805 | } | ||
806 | |||
807 | static struct sctp_af sctp_ipv4_specific; | ||
808 | |||
809 | static struct sctp_pf sctp_pf_inet = { | ||
810 | .event_msgname = sctp_inet_event_msgname, | ||
811 | .skb_msgname = sctp_inet_skb_msgname, | ||
812 | .af_supported = sctp_inet_af_supported, | ||
813 | .cmp_addr = sctp_inet_cmp_addr, | ||
814 | .bind_verify = sctp_inet_bind_verify, | ||
815 | .send_verify = sctp_inet_send_verify, | ||
816 | .supported_addrs = sctp_inet_supported_addrs, | ||
817 | .create_accept_sk = sctp_v4_create_accept_sk, | ||
818 | .addr_v4map = sctp_v4_addr_v4map, | ||
819 | .af = &sctp_ipv4_specific, | ||
820 | }; | ||
821 | |||
822 | /* Notifier for inetaddr addition/deletion events. */ | ||
823 | static struct notifier_block sctp_inetaddr_notifier = { | ||
824 | .notifier_call = sctp_inetaddr_event, | ||
825 | }; | ||
826 | |||
827 | /* Socket operations. */ | ||
828 | static struct proto_ops inet_seqpacket_ops = { | ||
829 | .family = PF_INET, | ||
830 | .owner = THIS_MODULE, | ||
831 | .release = inet_release, /* Needs to be wrapped... */ | ||
832 | .bind = inet_bind, | ||
833 | .connect = inet_dgram_connect, | ||
834 | .socketpair = sock_no_socketpair, | ||
835 | .accept = inet_accept, | ||
836 | .getname = inet_getname, /* Semantics are different. */ | ||
837 | .poll = sctp_poll, | ||
838 | .ioctl = inet_ioctl, | ||
839 | .listen = sctp_inet_listen, | ||
840 | .shutdown = inet_shutdown, /* Looks harmless. */ | ||
841 | .setsockopt = sock_common_setsockopt, /* IP_SOL IP_OPTION is a problem. */ | ||
842 | .getsockopt = sock_common_getsockopt, | ||
843 | .sendmsg = inet_sendmsg, | ||
844 | .recvmsg = sock_common_recvmsg, | ||
845 | .mmap = sock_no_mmap, | ||
846 | .sendpage = sock_no_sendpage, | ||
847 | }; | ||
848 | |||
849 | /* Registration with AF_INET family. */ | ||
850 | static struct inet_protosw sctp_seqpacket_protosw = { | ||
851 | .type = SOCK_SEQPACKET, | ||
852 | .protocol = IPPROTO_SCTP, | ||
853 | .prot = &sctp_prot, | ||
854 | .ops = &inet_seqpacket_ops, | ||
855 | .capability = -1, | ||
856 | .no_check = 0, | ||
857 | .flags = SCTP_PROTOSW_FLAG | ||
858 | }; | ||
859 | static struct inet_protosw sctp_stream_protosw = { | ||
860 | .type = SOCK_STREAM, | ||
861 | .protocol = IPPROTO_SCTP, | ||
862 | .prot = &sctp_prot, | ||
863 | .ops = &inet_seqpacket_ops, | ||
864 | .capability = -1, | ||
865 | .no_check = 0, | ||
866 | .flags = SCTP_PROTOSW_FLAG | ||
867 | }; | ||
868 | |||
869 | /* Register with IP layer. */ | ||
870 | static struct net_protocol sctp_protocol = { | ||
871 | .handler = sctp_rcv, | ||
872 | .err_handler = sctp_v4_err, | ||
873 | .no_policy = 1, | ||
874 | }; | ||
875 | |||
876 | /* IPv4 address related functions. */ | ||
877 | static struct sctp_af sctp_ipv4_specific = { | ||
878 | .sctp_xmit = sctp_v4_xmit, | ||
879 | .setsockopt = ip_setsockopt, | ||
880 | .getsockopt = ip_getsockopt, | ||
881 | .get_dst = sctp_v4_get_dst, | ||
882 | .get_saddr = sctp_v4_get_saddr, | ||
883 | .copy_addrlist = sctp_v4_copy_addrlist, | ||
884 | .from_skb = sctp_v4_from_skb, | ||
885 | .from_sk = sctp_v4_from_sk, | ||
886 | .to_sk_saddr = sctp_v4_to_sk_saddr, | ||
887 | .to_sk_daddr = sctp_v4_to_sk_daddr, | ||
888 | .from_addr_param= sctp_v4_from_addr_param, | ||
889 | .to_addr_param = sctp_v4_to_addr_param, | ||
890 | .dst_saddr = sctp_v4_dst_saddr, | ||
891 | .cmp_addr = sctp_v4_cmp_addr, | ||
892 | .addr_valid = sctp_v4_addr_valid, | ||
893 | .inaddr_any = sctp_v4_inaddr_any, | ||
894 | .is_any = sctp_v4_is_any, | ||
895 | .available = sctp_v4_available, | ||
896 | .scope = sctp_v4_scope, | ||
897 | .skb_iif = sctp_v4_skb_iif, | ||
898 | .is_ce = sctp_v4_is_ce, | ||
899 | .seq_dump_addr = sctp_v4_seq_dump_addr, | ||
900 | .net_header_len = sizeof(struct iphdr), | ||
901 | .sockaddr_len = sizeof(struct sockaddr_in), | ||
902 | .sa_family = AF_INET, | ||
903 | }; | ||
904 | |||
905 | struct sctp_pf *sctp_get_pf_specific(sa_family_t family) { | ||
906 | |||
907 | switch (family) { | ||
908 | case PF_INET: | ||
909 | return sctp_pf_inet_specific; | ||
910 | case PF_INET6: | ||
911 | return sctp_pf_inet6_specific; | ||
912 | default: | ||
913 | return NULL; | ||
914 | } | ||
915 | } | ||
916 | |||
917 | /* Register the PF specific function table. */ | ||
918 | int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) | ||
919 | { | ||
920 | switch (family) { | ||
921 | case PF_INET: | ||
922 | if (sctp_pf_inet_specific) | ||
923 | return 0; | ||
924 | sctp_pf_inet_specific = pf; | ||
925 | break; | ||
926 | case PF_INET6: | ||
927 | if (sctp_pf_inet6_specific) | ||
928 | return 0; | ||
929 | sctp_pf_inet6_specific = pf; | ||
930 | break; | ||
931 | default: | ||
932 | return 0; | ||
933 | } | ||
934 | return 1; | ||
935 | } | ||
936 | |||
937 | static int __init init_sctp_mibs(void) | ||
938 | { | ||
939 | sctp_statistics[0] = alloc_percpu(struct sctp_mib); | ||
940 | if (!sctp_statistics[0]) | ||
941 | return -ENOMEM; | ||
942 | sctp_statistics[1] = alloc_percpu(struct sctp_mib); | ||
943 | if (!sctp_statistics[1]) { | ||
944 | free_percpu(sctp_statistics[0]); | ||
945 | return -ENOMEM; | ||
946 | } | ||
947 | return 0; | ||
948 | |||
949 | } | ||
950 | |||
951 | static void cleanup_sctp_mibs(void) | ||
952 | { | ||
953 | free_percpu(sctp_statistics[0]); | ||
954 | free_percpu(sctp_statistics[1]); | ||
955 | } | ||
956 | |||
957 | /* Initialize the universe into something sensible. */ | ||
958 | SCTP_STATIC __init int sctp_init(void) | ||
959 | { | ||
960 | int i; | ||
961 | int status = -EINVAL; | ||
962 | unsigned long goal; | ||
963 | int order; | ||
964 | |||
965 | /* SCTP_DEBUG sanity check. */ | ||
966 | if (!sctp_sanity_check()) | ||
967 | goto out; | ||
968 | |||
969 | status = proto_register(&sctp_prot, 1); | ||
970 | if (status) | ||
971 | goto out; | ||
972 | |||
973 | /* Add SCTP to inet_protos hash table. */ | ||
974 | status = -EAGAIN; | ||
975 | if (inet_add_protocol(&sctp_protocol, IPPROTO_SCTP) < 0) | ||
976 | goto err_add_protocol; | ||
977 | |||
978 | /* Add SCTP(TCP and UDP style) to inetsw linked list. */ | ||
979 | inet_register_protosw(&sctp_seqpacket_protosw); | ||
980 | inet_register_protosw(&sctp_stream_protosw); | ||
981 | |||
982 | /* Allocate a cache pools. */ | ||
983 | status = -ENOBUFS; | ||
984 | sctp_bucket_cachep = kmem_cache_create("sctp_bind_bucket", | ||
985 | sizeof(struct sctp_bind_bucket), | ||
986 | 0, SLAB_HWCACHE_ALIGN, | ||
987 | NULL, NULL); | ||
988 | |||
989 | if (!sctp_bucket_cachep) | ||
990 | goto err_bucket_cachep; | ||
991 | |||
992 | sctp_chunk_cachep = kmem_cache_create("sctp_chunk", | ||
993 | sizeof(struct sctp_chunk), | ||
994 | 0, SLAB_HWCACHE_ALIGN, | ||
995 | NULL, NULL); | ||
996 | if (!sctp_chunk_cachep) | ||
997 | goto err_chunk_cachep; | ||
998 | |||
999 | /* Allocate and initialise sctp mibs. */ | ||
1000 | status = init_sctp_mibs(); | ||
1001 | if (status) | ||
1002 | goto err_init_mibs; | ||
1003 | |||
1004 | /* Initialize proc fs directory. */ | ||
1005 | status = sctp_proc_init(); | ||
1006 | if (status) | ||
1007 | goto err_init_proc; | ||
1008 | |||
1009 | /* Initialize object count debugging. */ | ||
1010 | sctp_dbg_objcnt_init(); | ||
1011 | |||
1012 | /* Initialize the SCTP specific PF functions. */ | ||
1013 | sctp_register_pf(&sctp_pf_inet, PF_INET); | ||
1014 | /* | ||
1015 | * 14. Suggested SCTP Protocol Parameter Values | ||
1016 | */ | ||
1017 | /* The following protocol parameters are RECOMMENDED: */ | ||
1018 | /* RTO.Initial - 3 seconds */ | ||
1019 | sctp_rto_initial = SCTP_RTO_INITIAL; | ||
1020 | /* RTO.Min - 1 second */ | ||
1021 | sctp_rto_min = SCTP_RTO_MIN; | ||
1022 | /* RTO.Max - 60 seconds */ | ||
1023 | sctp_rto_max = SCTP_RTO_MAX; | ||
1024 | /* RTO.Alpha - 1/8 */ | ||
1025 | sctp_rto_alpha = SCTP_RTO_ALPHA; | ||
1026 | /* RTO.Beta - 1/4 */ | ||
1027 | sctp_rto_beta = SCTP_RTO_BETA; | ||
1028 | |||
1029 | /* Valid.Cookie.Life - 60 seconds */ | ||
1030 | sctp_valid_cookie_life = 60 * HZ; | ||
1031 | |||
1032 | /* Whether Cookie Preservative is enabled(1) or not(0) */ | ||
1033 | sctp_cookie_preserve_enable = 1; | ||
1034 | |||
1035 | /* Max.Burst - 4 */ | ||
1036 | sctp_max_burst = SCTP_MAX_BURST; | ||
1037 | |||
1038 | /* Association.Max.Retrans - 10 attempts | ||
1039 | * Path.Max.Retrans - 5 attempts (per destination address) | ||
1040 | * Max.Init.Retransmits - 8 attempts | ||
1041 | */ | ||
1042 | sctp_max_retrans_association = 10; | ||
1043 | sctp_max_retrans_path = 5; | ||
1044 | sctp_max_retrans_init = 8; | ||
1045 | |||
1046 | /* HB.interval - 30 seconds */ | ||
1047 | sctp_hb_interval = 30 * HZ; | ||
1048 | |||
1049 | /* Implementation specific variables. */ | ||
1050 | |||
1051 | /* Initialize default stream count setup information. */ | ||
1052 | sctp_max_instreams = SCTP_DEFAULT_INSTREAMS; | ||
1053 | sctp_max_outstreams = SCTP_DEFAULT_OUTSTREAMS; | ||
1054 | |||
1055 | /* Initialize handle used for association ids. */ | ||
1056 | idr_init(&sctp_assocs_id); | ||
1057 | |||
1058 | /* Size and allocate the association hash table. | ||
1059 | * The methodology is similar to that of the tcp hash tables. | ||
1060 | */ | ||
1061 | if (num_physpages >= (128 * 1024)) | ||
1062 | goal = num_physpages >> (22 - PAGE_SHIFT); | ||
1063 | else | ||
1064 | goal = num_physpages >> (24 - PAGE_SHIFT); | ||
1065 | |||
1066 | for (order = 0; (1UL << order) < goal; order++) | ||
1067 | ; | ||
1068 | |||
1069 | do { | ||
1070 | sctp_assoc_hashsize = (1UL << order) * PAGE_SIZE / | ||
1071 | sizeof(struct sctp_hashbucket); | ||
1072 | if ((sctp_assoc_hashsize > (64 * 1024)) && order > 0) | ||
1073 | continue; | ||
1074 | sctp_assoc_hashtable = (struct sctp_hashbucket *) | ||
1075 | __get_free_pages(GFP_ATOMIC, order); | ||
1076 | } while (!sctp_assoc_hashtable && --order > 0); | ||
1077 | if (!sctp_assoc_hashtable) { | ||
1078 | printk(KERN_ERR "SCTP: Failed association hash alloc.\n"); | ||
1079 | status = -ENOMEM; | ||
1080 | goto err_ahash_alloc; | ||
1081 | } | ||
1082 | for (i = 0; i < sctp_assoc_hashsize; i++) { | ||
1083 | rwlock_init(&sctp_assoc_hashtable[i].lock); | ||
1084 | sctp_assoc_hashtable[i].chain = NULL; | ||
1085 | } | ||
1086 | |||
1087 | /* Allocate and initialize the endpoint hash table. */ | ||
1088 | sctp_ep_hashsize = 64; | ||
1089 | sctp_ep_hashtable = (struct sctp_hashbucket *) | ||
1090 | kmalloc(64 * sizeof(struct sctp_hashbucket), GFP_KERNEL); | ||
1091 | if (!sctp_ep_hashtable) { | ||
1092 | printk(KERN_ERR "SCTP: Failed endpoint_hash alloc.\n"); | ||
1093 | status = -ENOMEM; | ||
1094 | goto err_ehash_alloc; | ||
1095 | } | ||
1096 | for (i = 0; i < sctp_ep_hashsize; i++) { | ||
1097 | rwlock_init(&sctp_ep_hashtable[i].lock); | ||
1098 | sctp_ep_hashtable[i].chain = NULL; | ||
1099 | } | ||
1100 | |||
1101 | /* Allocate and initialize the SCTP port hash table. */ | ||
1102 | do { | ||
1103 | sctp_port_hashsize = (1UL << order) * PAGE_SIZE / | ||
1104 | sizeof(struct sctp_bind_hashbucket); | ||
1105 | if ((sctp_port_hashsize > (64 * 1024)) && order > 0) | ||
1106 | continue; | ||
1107 | sctp_port_hashtable = (struct sctp_bind_hashbucket *) | ||
1108 | __get_free_pages(GFP_ATOMIC, order); | ||
1109 | } while (!sctp_port_hashtable && --order > 0); | ||
1110 | if (!sctp_port_hashtable) { | ||
1111 | printk(KERN_ERR "SCTP: Failed bind hash alloc."); | ||
1112 | status = -ENOMEM; | ||
1113 | goto err_bhash_alloc; | ||
1114 | } | ||
1115 | for (i = 0; i < sctp_port_hashsize; i++) { | ||
1116 | spin_lock_init(&sctp_port_hashtable[i].lock); | ||
1117 | sctp_port_hashtable[i].chain = NULL; | ||
1118 | } | ||
1119 | |||
1120 | spin_lock_init(&sctp_port_alloc_lock); | ||
1121 | sctp_port_rover = sysctl_local_port_range[0] - 1; | ||
1122 | |||
1123 | printk(KERN_INFO "SCTP: Hash tables configured " | ||
1124 | "(established %d bind %d)\n", | ||
1125 | sctp_assoc_hashsize, sctp_port_hashsize); | ||
1126 | |||
1127 | /* Disable ADDIP by default. */ | ||
1128 | sctp_addip_enable = 0; | ||
1129 | |||
1130 | /* Enable PR-SCTP by default. */ | ||
1131 | sctp_prsctp_enable = 1; | ||
1132 | |||
1133 | sctp_sysctl_register(); | ||
1134 | |||
1135 | INIT_LIST_HEAD(&sctp_address_families); | ||
1136 | sctp_register_af(&sctp_ipv4_specific); | ||
1137 | |||
1138 | status = sctp_v6_init(); | ||
1139 | if (status) | ||
1140 | goto err_v6_init; | ||
1141 | |||
1142 | /* Initialize the control inode/socket for handling OOTB packets. */ | ||
1143 | if ((status = sctp_ctl_sock_init())) { | ||
1144 | printk (KERN_ERR | ||
1145 | "SCTP: Failed to initialize the SCTP control sock.\n"); | ||
1146 | goto err_ctl_sock_init; | ||
1147 | } | ||
1148 | |||
1149 | /* Initialize the local address list. */ | ||
1150 | INIT_LIST_HEAD(&sctp_local_addr_list); | ||
1151 | spin_lock_init(&sctp_local_addr_lock); | ||
1152 | |||
1153 | /* Register notifier for inet address additions/deletions. */ | ||
1154 | register_inetaddr_notifier(&sctp_inetaddr_notifier); | ||
1155 | |||
1156 | sctp_get_local_addr_list(); | ||
1157 | |||
1158 | __unsafe(THIS_MODULE); | ||
1159 | status = 0; | ||
1160 | out: | ||
1161 | return status; | ||
1162 | err_add_protocol: | ||
1163 | proto_unregister(&sctp_prot); | ||
1164 | err_ctl_sock_init: | ||
1165 | sctp_v6_exit(); | ||
1166 | err_v6_init: | ||
1167 | sctp_sysctl_unregister(); | ||
1168 | list_del(&sctp_ipv4_specific.list); | ||
1169 | free_pages((unsigned long)sctp_port_hashtable, | ||
1170 | get_order(sctp_port_hashsize * | ||
1171 | sizeof(struct sctp_bind_hashbucket))); | ||
1172 | err_bhash_alloc: | ||
1173 | kfree(sctp_ep_hashtable); | ||
1174 | err_ehash_alloc: | ||
1175 | free_pages((unsigned long)sctp_assoc_hashtable, | ||
1176 | get_order(sctp_assoc_hashsize * | ||
1177 | sizeof(struct sctp_hashbucket))); | ||
1178 | err_ahash_alloc: | ||
1179 | sctp_dbg_objcnt_exit(); | ||
1180 | err_init_proc: | ||
1181 | sctp_proc_exit(); | ||
1182 | cleanup_sctp_mibs(); | ||
1183 | err_init_mibs: | ||
1184 | kmem_cache_destroy(sctp_chunk_cachep); | ||
1185 | err_chunk_cachep: | ||
1186 | kmem_cache_destroy(sctp_bucket_cachep); | ||
1187 | err_bucket_cachep: | ||
1188 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); | ||
1189 | inet_unregister_protosw(&sctp_seqpacket_protosw); | ||
1190 | inet_unregister_protosw(&sctp_stream_protosw); | ||
1191 | goto out; | ||
1192 | } | ||
1193 | |||
1194 | /* Exit handler for the SCTP protocol. */ | ||
1195 | SCTP_STATIC __exit void sctp_exit(void) | ||
1196 | { | ||
1197 | /* BUG. This should probably do something useful like clean | ||
1198 | * up all the remaining associations and all that memory. | ||
1199 | */ | ||
1200 | |||
1201 | /* Unregister notifier for inet address additions/deletions. */ | ||
1202 | unregister_inetaddr_notifier(&sctp_inetaddr_notifier); | ||
1203 | |||
1204 | /* Free the local address list. */ | ||
1205 | sctp_free_local_addr_list(); | ||
1206 | |||
1207 | /* Free the control endpoint. */ | ||
1208 | sock_release(sctp_ctl_socket); | ||
1209 | |||
1210 | sctp_v6_exit(); | ||
1211 | sctp_sysctl_unregister(); | ||
1212 | list_del(&sctp_ipv4_specific.list); | ||
1213 | |||
1214 | free_pages((unsigned long)sctp_assoc_hashtable, | ||
1215 | get_order(sctp_assoc_hashsize * | ||
1216 | sizeof(struct sctp_hashbucket))); | ||
1217 | kfree(sctp_ep_hashtable); | ||
1218 | free_pages((unsigned long)sctp_port_hashtable, | ||
1219 | get_order(sctp_port_hashsize * | ||
1220 | sizeof(struct sctp_bind_hashbucket))); | ||
1221 | |||
1222 | kmem_cache_destroy(sctp_chunk_cachep); | ||
1223 | kmem_cache_destroy(sctp_bucket_cachep); | ||
1224 | |||
1225 | sctp_dbg_objcnt_exit(); | ||
1226 | sctp_proc_exit(); | ||
1227 | cleanup_sctp_mibs(); | ||
1228 | |||
1229 | inet_del_protocol(&sctp_protocol, IPPROTO_SCTP); | ||
1230 | inet_unregister_protosw(&sctp_seqpacket_protosw); | ||
1231 | inet_unregister_protosw(&sctp_stream_protosw); | ||
1232 | proto_unregister(&sctp_prot); | ||
1233 | } | ||
1234 | |||
1235 | module_init(sctp_init); | ||
1236 | module_exit(sctp_exit); | ||
1237 | |||
1238 | MODULE_AUTHOR("Linux Kernel SCTP developers <lksctp-developers@lists.sourceforge.net>"); | ||
1239 | MODULE_DESCRIPTION("Support for the SCTP protocol (RFC2960)"); | ||
1240 | MODULE_LICENSE("GPL"); | ||
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c new file mode 100644 index 000000000000..1db12cc18cf7 --- /dev/null +++ b/net/sctp/sm_make_chunk.c | |||
@@ -0,0 +1,2766 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001-2002 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * These functions work with the state functions in sctp_sm_statefuns.c | ||
10 | * to implement the state operations. These functions implement the | ||
11 | * steps which require modifying existing data structures. | ||
12 | * | ||
13 | * The SCTP reference implementation is free software; | ||
14 | * you can redistribute it and/or modify it under the terms of | ||
15 | * the GNU General Public License as published by | ||
16 | * the Free Software Foundation; either version 2, or (at your option) | ||
17 | * any later version. | ||
18 | * | ||
19 | * The SCTP reference implementation is distributed in the hope that it | ||
20 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
21 | * ************************ | ||
22 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
23 | * See the GNU General Public License for more details. | ||
24 | * | ||
25 | * You should have received a copy of the GNU General Public License | ||
26 | * along with GNU CC; see the file COPYING. If not, write to | ||
27 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
28 | * Boston, MA 02111-1307, USA. | ||
29 | * | ||
30 | * Please send any bug reports or fixes you make to the | ||
31 | * email address(es): | ||
32 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
33 | * | ||
34 | * Or submit a bug report through the following website: | ||
35 | * http://www.sf.net/projects/lksctp | ||
36 | * | ||
37 | * Written or modified by: | ||
38 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
39 | * Karl Knutson <karl@athena.chicago.il.us> | ||
40 | * C. Robin <chris@hundredacre.ac.uk> | ||
41 | * Jon Grimm <jgrimm@us.ibm.com> | ||
42 | * Xingang Guo <xingang.guo@intel.com> | ||
43 | * Dajiang Zhang <dajiang.zhang@nokia.com> | ||
44 | * Sridhar Samudrala <sri@us.ibm.com> | ||
45 | * Daisy Chang <daisyc@us.ibm.com> | ||
46 | * Ardelle Fan <ardelle.fan@intel.com> | ||
47 | * Kevin Gao <kevin.gao@intel.com> | ||
48 | * | ||
49 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
50 | * be incorporated into the next SCTP release. | ||
51 | */ | ||
52 | |||
53 | #include <linux/types.h> | ||
54 | #include <linux/kernel.h> | ||
55 | #include <linux/ip.h> | ||
56 | #include <linux/ipv6.h> | ||
57 | #include <linux/net.h> | ||
58 | #include <linux/inet.h> | ||
59 | #include <asm/scatterlist.h> | ||
60 | #include <linux/crypto.h> | ||
61 | #include <net/sock.h> | ||
62 | |||
63 | #include <linux/skbuff.h> | ||
64 | #include <linux/random.h> /* for get_random_bytes */ | ||
65 | #include <net/sctp/sctp.h> | ||
66 | #include <net/sctp/sm.h> | ||
67 | |||
68 | extern kmem_cache_t *sctp_chunk_cachep; | ||
69 | |||
70 | SCTP_STATIC | ||
71 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, | ||
72 | __u8 type, __u8 flags, int paylen); | ||
73 | static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | ||
74 | const struct sctp_association *asoc, | ||
75 | const struct sctp_chunk *init_chunk, | ||
76 | int *cookie_len, | ||
77 | const __u8 *raw_addrs, int addrs_len); | ||
78 | static int sctp_process_param(struct sctp_association *asoc, | ||
79 | union sctp_params param, | ||
80 | const union sctp_addr *peer_addr, | ||
81 | int gfp); | ||
82 | |||
83 | /* What was the inbound interface for this chunk? */ | ||
84 | int sctp_chunk_iif(const struct sctp_chunk *chunk) | ||
85 | { | ||
86 | struct sctp_af *af; | ||
87 | int iif = 0; | ||
88 | |||
89 | af = sctp_get_af_specific(ipver2af(chunk->skb->nh.iph->version)); | ||
90 | if (af) | ||
91 | iif = af->skb_iif(chunk->skb); | ||
92 | |||
93 | return iif; | ||
94 | } | ||
95 | |||
96 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | ||
97 | * | ||
98 | * Note 2: The ECN capable field is reserved for future use of | ||
99 | * Explicit Congestion Notification. | ||
100 | */ | ||
101 | static const struct sctp_paramhdr ecap_param = { | ||
102 | SCTP_PARAM_ECN_CAPABLE, | ||
103 | __constant_htons(sizeof(struct sctp_paramhdr)), | ||
104 | }; | ||
105 | static const struct sctp_paramhdr prsctp_param = { | ||
106 | SCTP_PARAM_FWD_TSN_SUPPORT, | ||
107 | __constant_htons(sizeof(struct sctp_paramhdr)), | ||
108 | }; | ||
109 | |||
110 | /* A helper to initialize to initialize an op error inside a | ||
111 | * provided chunk, as most cause codes will be embedded inside an | ||
112 | * abort chunk. | ||
113 | */ | ||
114 | void sctp_init_cause(struct sctp_chunk *chunk, __u16 cause_code, | ||
115 | const void *payload, size_t paylen) | ||
116 | { | ||
117 | sctp_errhdr_t err; | ||
118 | int padlen; | ||
119 | __u16 len; | ||
120 | |||
121 | /* Cause code constants are now defined in network order. */ | ||
122 | err.cause = cause_code; | ||
123 | len = sizeof(sctp_errhdr_t) + paylen; | ||
124 | padlen = len % 4; | ||
125 | err.length = htons(len); | ||
126 | len += padlen; | ||
127 | sctp_addto_chunk(chunk, sizeof(sctp_errhdr_t), &err); | ||
128 | chunk->subh.err_hdr = sctp_addto_chunk(chunk, paylen, payload); | ||
129 | } | ||
130 | |||
131 | /* 3.3.2 Initiation (INIT) (1) | ||
132 | * | ||
133 | * This chunk is used to initiate a SCTP association between two | ||
134 | * endpoints. The format of the INIT chunk is shown below: | ||
135 | * | ||
136 | * 0 1 2 3 | ||
137 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
138 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
139 | * | Type = 1 | Chunk Flags | Chunk Length | | ||
140 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
141 | * | Initiate Tag | | ||
142 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
143 | * | Advertised Receiver Window Credit (a_rwnd) | | ||
144 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
145 | * | Number of Outbound Streams | Number of Inbound Streams | | ||
146 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
147 | * | Initial TSN | | ||
148 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
149 | * \ \ | ||
150 | * / Optional/Variable-Length Parameters / | ||
151 | * \ \ | ||
152 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
153 | * | ||
154 | * | ||
155 | * The INIT chunk contains the following parameters. Unless otherwise | ||
156 | * noted, each parameter MUST only be included once in the INIT chunk. | ||
157 | * | ||
158 | * Fixed Parameters Status | ||
159 | * ---------------------------------------------- | ||
160 | * Initiate Tag Mandatory | ||
161 | * Advertised Receiver Window Credit Mandatory | ||
162 | * Number of Outbound Streams Mandatory | ||
163 | * Number of Inbound Streams Mandatory | ||
164 | * Initial TSN Mandatory | ||
165 | * | ||
166 | * Variable Parameters Status Type Value | ||
167 | * ------------------------------------------------------------- | ||
168 | * IPv4 Address (Note 1) Optional 5 | ||
169 | * IPv6 Address (Note 1) Optional 6 | ||
170 | * Cookie Preservative Optional 9 | ||
171 | * Reserved for ECN Capable (Note 2) Optional 32768 (0x8000) | ||
172 | * Host Name Address (Note 3) Optional 11 | ||
173 | * Supported Address Types (Note 4) Optional 12 | ||
174 | */ | ||
175 | struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc, | ||
176 | const struct sctp_bind_addr *bp, | ||
177 | int gfp, int vparam_len) | ||
178 | { | ||
179 | sctp_inithdr_t init; | ||
180 | union sctp_params addrs; | ||
181 | size_t chunksize; | ||
182 | struct sctp_chunk *retval = NULL; | ||
183 | int num_types, addrs_len = 0; | ||
184 | struct sctp_sock *sp; | ||
185 | sctp_supported_addrs_param_t sat; | ||
186 | __u16 types[2]; | ||
187 | sctp_adaption_ind_param_t aiparam; | ||
188 | |||
189 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | ||
190 | * | ||
191 | * Note 1: The INIT chunks can contain multiple addresses that | ||
192 | * can be IPv4 and/or IPv6 in any combination. | ||
193 | */ | ||
194 | retval = NULL; | ||
195 | |||
196 | /* Convert the provided bind address list to raw format. */ | ||
197 | addrs = sctp_bind_addrs_to_raw(bp, &addrs_len, gfp); | ||
198 | |||
199 | init.init_tag = htonl(asoc->c.my_vtag); | ||
200 | init.a_rwnd = htonl(asoc->rwnd); | ||
201 | init.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); | ||
202 | init.num_inbound_streams = htons(asoc->c.sinit_max_instreams); | ||
203 | init.initial_tsn = htonl(asoc->c.initial_tsn); | ||
204 | |||
205 | /* How many address types are needed? */ | ||
206 | sp = sctp_sk(asoc->base.sk); | ||
207 | num_types = sp->pf->supported_addrs(sp, types); | ||
208 | |||
209 | chunksize = sizeof(init) + addrs_len + SCTP_SAT_LEN(num_types); | ||
210 | chunksize += sizeof(ecap_param); | ||
211 | if (sctp_prsctp_enable) | ||
212 | chunksize += sizeof(prsctp_param); | ||
213 | chunksize += sizeof(aiparam); | ||
214 | chunksize += vparam_len; | ||
215 | |||
216 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | ||
217 | * | ||
218 | * Note 3: An INIT chunk MUST NOT contain more than one Host | ||
219 | * Name address parameter. Moreover, the sender of the INIT | ||
220 | * MUST NOT combine any other address types with the Host Name | ||
221 | * address in the INIT. The receiver of INIT MUST ignore any | ||
222 | * other address types if the Host Name address parameter is | ||
223 | * present in the received INIT chunk. | ||
224 | * | ||
225 | * PLEASE DO NOT FIXME [This version does not support Host Name.] | ||
226 | */ | ||
227 | |||
228 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT, 0, chunksize); | ||
229 | if (!retval) | ||
230 | goto nodata; | ||
231 | |||
232 | retval->subh.init_hdr = | ||
233 | sctp_addto_chunk(retval, sizeof(init), &init); | ||
234 | retval->param_hdr.v = | ||
235 | sctp_addto_chunk(retval, addrs_len, addrs.v); | ||
236 | |||
237 | /* RFC 2960 3.3.2 Initiation (INIT) (1) | ||
238 | * | ||
239 | * Note 4: This parameter, when present, specifies all the | ||
240 | * address types the sending endpoint can support. The absence | ||
241 | * of this parameter indicates that the sending endpoint can | ||
242 | * support any address type. | ||
243 | */ | ||
244 | sat.param_hdr.type = SCTP_PARAM_SUPPORTED_ADDRESS_TYPES; | ||
245 | sat.param_hdr.length = htons(SCTP_SAT_LEN(num_types)); | ||
246 | sctp_addto_chunk(retval, sizeof(sat), &sat); | ||
247 | sctp_addto_chunk(retval, num_types * sizeof(__u16), &types); | ||
248 | |||
249 | sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); | ||
250 | if (sctp_prsctp_enable) | ||
251 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); | ||
252 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; | ||
253 | aiparam.param_hdr.length = htons(sizeof(aiparam)); | ||
254 | aiparam.adaption_ind = htonl(sp->adaption_ind); | ||
255 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); | ||
256 | nodata: | ||
257 | if (addrs.v) | ||
258 | kfree(addrs.v); | ||
259 | return retval; | ||
260 | } | ||
261 | |||
262 | struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc, | ||
263 | const struct sctp_chunk *chunk, | ||
264 | int gfp, int unkparam_len) | ||
265 | { | ||
266 | sctp_inithdr_t initack; | ||
267 | struct sctp_chunk *retval; | ||
268 | union sctp_params addrs; | ||
269 | int addrs_len; | ||
270 | sctp_cookie_param_t *cookie; | ||
271 | int cookie_len; | ||
272 | size_t chunksize; | ||
273 | sctp_adaption_ind_param_t aiparam; | ||
274 | |||
275 | retval = NULL; | ||
276 | |||
277 | /* Note: there may be no addresses to embed. */ | ||
278 | addrs = sctp_bind_addrs_to_raw(&asoc->base.bind_addr, &addrs_len, gfp); | ||
279 | |||
280 | initack.init_tag = htonl(asoc->c.my_vtag); | ||
281 | initack.a_rwnd = htonl(asoc->rwnd); | ||
282 | initack.num_outbound_streams = htons(asoc->c.sinit_num_ostreams); | ||
283 | initack.num_inbound_streams = htons(asoc->c.sinit_max_instreams); | ||
284 | initack.initial_tsn = htonl(asoc->c.initial_tsn); | ||
285 | |||
286 | /* FIXME: We really ought to build the cookie right | ||
287 | * into the packet instead of allocating more fresh memory. | ||
288 | */ | ||
289 | cookie = sctp_pack_cookie(asoc->ep, asoc, chunk, &cookie_len, | ||
290 | addrs.v, addrs_len); | ||
291 | if (!cookie) | ||
292 | goto nomem_cookie; | ||
293 | |||
294 | /* Calculate the total size of allocation, include the reserved | ||
295 | * space for reporting unknown parameters if it is specified. | ||
296 | */ | ||
297 | chunksize = sizeof(initack) + addrs_len + cookie_len + unkparam_len; | ||
298 | |||
299 | /* Tell peer that we'll do ECN only if peer advertised such cap. */ | ||
300 | if (asoc->peer.ecn_capable) | ||
301 | chunksize += sizeof(ecap_param); | ||
302 | |||
303 | /* Tell peer that we'll do PR-SCTP only if peer advertised. */ | ||
304 | if (asoc->peer.prsctp_capable) | ||
305 | chunksize += sizeof(prsctp_param); | ||
306 | |||
307 | chunksize += sizeof(aiparam); | ||
308 | |||
309 | /* Now allocate and fill out the chunk. */ | ||
310 | retval = sctp_make_chunk(asoc, SCTP_CID_INIT_ACK, 0, chunksize); | ||
311 | if (!retval) | ||
312 | goto nomem_chunk; | ||
313 | |||
314 | /* Per the advice in RFC 2960 6.4, send this reply to | ||
315 | * the source of the INIT packet. | ||
316 | */ | ||
317 | retval->transport = chunk->transport; | ||
318 | retval->subh.init_hdr = | ||
319 | sctp_addto_chunk(retval, sizeof(initack), &initack); | ||
320 | retval->param_hdr.v = sctp_addto_chunk(retval, addrs_len, addrs.v); | ||
321 | sctp_addto_chunk(retval, cookie_len, cookie); | ||
322 | if (asoc->peer.ecn_capable) | ||
323 | sctp_addto_chunk(retval, sizeof(ecap_param), &ecap_param); | ||
324 | if (asoc->peer.prsctp_capable) | ||
325 | sctp_addto_chunk(retval, sizeof(prsctp_param), &prsctp_param); | ||
326 | |||
327 | aiparam.param_hdr.type = SCTP_PARAM_ADAPTION_LAYER_IND; | ||
328 | aiparam.param_hdr.length = htons(sizeof(aiparam)); | ||
329 | aiparam.adaption_ind = htonl(sctp_sk(asoc->base.sk)->adaption_ind); | ||
330 | sctp_addto_chunk(retval, sizeof(aiparam), &aiparam); | ||
331 | |||
332 | /* We need to remove the const qualifier at this point. */ | ||
333 | retval->asoc = (struct sctp_association *) asoc; | ||
334 | |||
335 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
336 | * | ||
337 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
338 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
339 | * address from which it received the DATA or control chunk | ||
340 | * to which it is replying. | ||
341 | * | ||
342 | * [INIT ACK back to where the INIT came from.] | ||
343 | */ | ||
344 | if (chunk) | ||
345 | retval->transport = chunk->transport; | ||
346 | |||
347 | nomem_chunk: | ||
348 | kfree(cookie); | ||
349 | nomem_cookie: | ||
350 | if (addrs.v) | ||
351 | kfree(addrs.v); | ||
352 | return retval; | ||
353 | } | ||
354 | |||
355 | /* 3.3.11 Cookie Echo (COOKIE ECHO) (10): | ||
356 | * | ||
357 | * This chunk is used only during the initialization of an association. | ||
358 | * It is sent by the initiator of an association to its peer to complete | ||
359 | * the initialization process. This chunk MUST precede any DATA chunk | ||
360 | * sent within the association, but MAY be bundled with one or more DATA | ||
361 | * chunks in the same packet. | ||
362 | * | ||
363 | * 0 1 2 3 | ||
364 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
365 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
366 | * | Type = 10 |Chunk Flags | Length | | ||
367 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
368 | * / Cookie / | ||
369 | * \ \ | ||
370 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
371 | * | ||
372 | * Chunk Flags: 8 bit | ||
373 | * | ||
374 | * Set to zero on transmit and ignored on receipt. | ||
375 | * | ||
376 | * Length: 16 bits (unsigned integer) | ||
377 | * | ||
378 | * Set to the size of the chunk in bytes, including the 4 bytes of | ||
379 | * the chunk header and the size of the Cookie. | ||
380 | * | ||
381 | * Cookie: variable size | ||
382 | * | ||
383 | * This field must contain the exact cookie received in the | ||
384 | * State Cookie parameter from the previous INIT ACK. | ||
385 | * | ||
386 | * An implementation SHOULD make the cookie as small as possible | ||
387 | * to insure interoperability. | ||
388 | */ | ||
389 | struct sctp_chunk *sctp_make_cookie_echo(const struct sctp_association *asoc, | ||
390 | const struct sctp_chunk *chunk) | ||
391 | { | ||
392 | struct sctp_chunk *retval; | ||
393 | void *cookie; | ||
394 | int cookie_len; | ||
395 | |||
396 | cookie = asoc->peer.cookie; | ||
397 | cookie_len = asoc->peer.cookie_len; | ||
398 | |||
399 | /* Build a cookie echo chunk. */ | ||
400 | retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ECHO, 0, cookie_len); | ||
401 | if (!retval) | ||
402 | goto nodata; | ||
403 | retval->subh.cookie_hdr = | ||
404 | sctp_addto_chunk(retval, cookie_len, cookie); | ||
405 | |||
406 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
407 | * | ||
408 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
409 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
410 | * address from which it * received the DATA or control chunk | ||
411 | * to which it is replying. | ||
412 | * | ||
413 | * [COOKIE ECHO back to where the INIT ACK came from.] | ||
414 | */ | ||
415 | if (chunk) | ||
416 | retval->transport = chunk->transport; | ||
417 | |||
418 | nodata: | ||
419 | return retval; | ||
420 | } | ||
421 | |||
422 | /* 3.3.12 Cookie Acknowledgement (COOKIE ACK) (11): | ||
423 | * | ||
424 | * This chunk is used only during the initialization of an | ||
425 | * association. It is used to acknowledge the receipt of a COOKIE | ||
426 | * ECHO chunk. This chunk MUST precede any DATA or SACK chunk sent | ||
427 | * within the association, but MAY be bundled with one or more DATA | ||
428 | * chunks or SACK chunk in the same SCTP packet. | ||
429 | * | ||
430 | * 0 1 2 3 | ||
431 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
432 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
433 | * | Type = 11 |Chunk Flags | Length = 4 | | ||
434 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
435 | * | ||
436 | * Chunk Flags: 8 bits | ||
437 | * | ||
438 | * Set to zero on transmit and ignored on receipt. | ||
439 | */ | ||
440 | struct sctp_chunk *sctp_make_cookie_ack(const struct sctp_association *asoc, | ||
441 | const struct sctp_chunk *chunk) | ||
442 | { | ||
443 | struct sctp_chunk *retval; | ||
444 | |||
445 | retval = sctp_make_chunk(asoc, SCTP_CID_COOKIE_ACK, 0, 0); | ||
446 | |||
447 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
448 | * | ||
449 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
450 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
451 | * address from which it * received the DATA or control chunk | ||
452 | * to which it is replying. | ||
453 | * | ||
454 | * [COOKIE ACK back to where the COOKIE ECHO came from.] | ||
455 | */ | ||
456 | if (retval && chunk) | ||
457 | retval->transport = chunk->transport; | ||
458 | |||
459 | return retval; | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Appendix A: Explicit Congestion Notification: | ||
464 | * CWR: | ||
465 | * | ||
466 | * RFC 2481 details a specific bit for a sender to send in the header of | ||
467 | * its next outbound TCP segment to indicate to its peer that it has | ||
468 | * reduced its congestion window. This is termed the CWR bit. For | ||
469 | * SCTP the same indication is made by including the CWR chunk. | ||
470 | * This chunk contains one data element, i.e. the TSN number that | ||
471 | * was sent in the ECNE chunk. This element represents the lowest | ||
472 | * TSN number in the datagram that was originally marked with the | ||
473 | * CE bit. | ||
474 | * | ||
475 | * 0 1 2 3 | ||
476 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
477 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
478 | * | Chunk Type=13 | Flags=00000000| Chunk Length = 8 | | ||
479 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
480 | * | Lowest TSN Number | | ||
481 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
482 | * | ||
483 | * Note: The CWR is considered a Control chunk. | ||
484 | */ | ||
485 | struct sctp_chunk *sctp_make_cwr(const struct sctp_association *asoc, | ||
486 | const __u32 lowest_tsn, | ||
487 | const struct sctp_chunk *chunk) | ||
488 | { | ||
489 | struct sctp_chunk *retval; | ||
490 | sctp_cwrhdr_t cwr; | ||
491 | |||
492 | cwr.lowest_tsn = htonl(lowest_tsn); | ||
493 | retval = sctp_make_chunk(asoc, SCTP_CID_ECN_CWR, 0, | ||
494 | sizeof(sctp_cwrhdr_t)); | ||
495 | |||
496 | if (!retval) | ||
497 | goto nodata; | ||
498 | |||
499 | retval->subh.ecn_cwr_hdr = | ||
500 | sctp_addto_chunk(retval, sizeof(cwr), &cwr); | ||
501 | |||
502 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
503 | * | ||
504 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
505 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
506 | * address from which it * received the DATA or control chunk | ||
507 | * to which it is replying. | ||
508 | * | ||
509 | * [Report a reduced congestion window back to where the ECNE | ||
510 | * came from.] | ||
511 | */ | ||
512 | if (chunk) | ||
513 | retval->transport = chunk->transport; | ||
514 | |||
515 | nodata: | ||
516 | return retval; | ||
517 | } | ||
518 | |||
519 | /* Make an ECNE chunk. This is a congestion experienced report. */ | ||
520 | struct sctp_chunk *sctp_make_ecne(const struct sctp_association *asoc, | ||
521 | const __u32 lowest_tsn) | ||
522 | { | ||
523 | struct sctp_chunk *retval; | ||
524 | sctp_ecnehdr_t ecne; | ||
525 | |||
526 | ecne.lowest_tsn = htonl(lowest_tsn); | ||
527 | retval = sctp_make_chunk(asoc, SCTP_CID_ECN_ECNE, 0, | ||
528 | sizeof(sctp_ecnehdr_t)); | ||
529 | if (!retval) | ||
530 | goto nodata; | ||
531 | retval->subh.ecne_hdr = | ||
532 | sctp_addto_chunk(retval, sizeof(ecne), &ecne); | ||
533 | |||
534 | nodata: | ||
535 | return retval; | ||
536 | } | ||
537 | |||
538 | /* Make a DATA chunk for the given association from the provided | ||
539 | * parameters. However, do not populate the data payload. | ||
540 | */ | ||
541 | struct sctp_chunk *sctp_make_datafrag_empty(struct sctp_association *asoc, | ||
542 | const struct sctp_sndrcvinfo *sinfo, | ||
543 | int data_len, __u8 flags, __u16 ssn) | ||
544 | { | ||
545 | struct sctp_chunk *retval; | ||
546 | struct sctp_datahdr dp; | ||
547 | int chunk_len; | ||
548 | |||
549 | /* We assign the TSN as LATE as possible, not here when | ||
550 | * creating the chunk. | ||
551 | */ | ||
552 | dp.tsn = 0; | ||
553 | dp.stream = htons(sinfo->sinfo_stream); | ||
554 | dp.ppid = sinfo->sinfo_ppid; | ||
555 | |||
556 | /* Set the flags for an unordered send. */ | ||
557 | if (sinfo->sinfo_flags & MSG_UNORDERED) { | ||
558 | flags |= SCTP_DATA_UNORDERED; | ||
559 | dp.ssn = 0; | ||
560 | } else | ||
561 | dp.ssn = htons(ssn); | ||
562 | |||
563 | chunk_len = sizeof(dp) + data_len; | ||
564 | retval = sctp_make_chunk(asoc, SCTP_CID_DATA, flags, chunk_len); | ||
565 | if (!retval) | ||
566 | goto nodata; | ||
567 | |||
568 | retval->subh.data_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp); | ||
569 | memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo)); | ||
570 | |||
571 | nodata: | ||
572 | return retval; | ||
573 | } | ||
574 | |||
575 | /* Create a selective ackowledgement (SACK) for the given | ||
576 | * association. This reports on which TSN's we've seen to date, | ||
577 | * including duplicates and gaps. | ||
578 | */ | ||
579 | struct sctp_chunk *sctp_make_sack(const struct sctp_association *asoc) | ||
580 | { | ||
581 | struct sctp_chunk *retval; | ||
582 | struct sctp_sackhdr sack; | ||
583 | int len; | ||
584 | __u32 ctsn; | ||
585 | __u16 num_gabs, num_dup_tsns; | ||
586 | struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map; | ||
587 | |||
588 | ctsn = sctp_tsnmap_get_ctsn(map); | ||
589 | SCTP_DEBUG_PRINTK("sackCTSNAck sent: 0x%x.\n", ctsn); | ||
590 | |||
591 | /* How much room is needed in the chunk? */ | ||
592 | num_gabs = sctp_tsnmap_num_gabs(map); | ||
593 | num_dup_tsns = sctp_tsnmap_num_dups(map); | ||
594 | |||
595 | /* Initialize the SACK header. */ | ||
596 | sack.cum_tsn_ack = htonl(ctsn); | ||
597 | sack.a_rwnd = htonl(asoc->a_rwnd); | ||
598 | sack.num_gap_ack_blocks = htons(num_gabs); | ||
599 | sack.num_dup_tsns = htons(num_dup_tsns); | ||
600 | |||
601 | len = sizeof(sack) | ||
602 | + sizeof(struct sctp_gap_ack_block) * num_gabs | ||
603 | + sizeof(__u32) * num_dup_tsns; | ||
604 | |||
605 | /* Create the chunk. */ | ||
606 | retval = sctp_make_chunk(asoc, SCTP_CID_SACK, 0, len); | ||
607 | if (!retval) | ||
608 | goto nodata; | ||
609 | |||
610 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
611 | * | ||
612 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
613 | * HEARTBEAT ACK, etc.) to the same destination transport | ||
614 | * address from which it received the DATA or control chunk to | ||
615 | * which it is replying. This rule should also be followed if | ||
616 | * the endpoint is bundling DATA chunks together with the | ||
617 | * reply chunk. | ||
618 | * | ||
619 | * However, when acknowledging multiple DATA chunks received | ||
620 | * in packets from different source addresses in a single | ||
621 | * SACK, the SACK chunk may be transmitted to one of the | ||
622 | * destination transport addresses from which the DATA or | ||
623 | * control chunks being acknowledged were received. | ||
624 | * | ||
625 | * [BUG: We do not implement the following paragraph. | ||
626 | * Perhaps we should remember the last transport we used for a | ||
627 | * SACK and avoid that (if possible) if we have seen any | ||
628 | * duplicates. --piggy] | ||
629 | * | ||
630 | * When a receiver of a duplicate DATA chunk sends a SACK to a | ||
631 | * multi- homed endpoint it MAY be beneficial to vary the | ||
632 | * destination address and not use the source address of the | ||
633 | * DATA chunk. The reason being that receiving a duplicate | ||
634 | * from a multi-homed endpoint might indicate that the return | ||
635 | * path (as specified in the source address of the DATA chunk) | ||
636 | * for the SACK is broken. | ||
637 | * | ||
638 | * [Send to the address from which we last received a DATA chunk.] | ||
639 | */ | ||
640 | retval->transport = asoc->peer.last_data_from; | ||
641 | |||
642 | retval->subh.sack_hdr = | ||
643 | sctp_addto_chunk(retval, sizeof(sack), &sack); | ||
644 | |||
645 | /* Add the gap ack block information. */ | ||
646 | if (num_gabs) | ||
647 | sctp_addto_chunk(retval, sizeof(__u32) * num_gabs, | ||
648 | sctp_tsnmap_get_gabs(map)); | ||
649 | |||
650 | /* Add the duplicate TSN information. */ | ||
651 | if (num_dup_tsns) | ||
652 | sctp_addto_chunk(retval, sizeof(__u32) * num_dup_tsns, | ||
653 | sctp_tsnmap_get_dups(map)); | ||
654 | |||
655 | nodata: | ||
656 | return retval; | ||
657 | } | ||
658 | |||
659 | /* Make a SHUTDOWN chunk. */ | ||
660 | struct sctp_chunk *sctp_make_shutdown(const struct sctp_association *asoc, | ||
661 | const struct sctp_chunk *chunk) | ||
662 | { | ||
663 | struct sctp_chunk *retval; | ||
664 | sctp_shutdownhdr_t shut; | ||
665 | __u32 ctsn; | ||
666 | |||
667 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | ||
668 | shut.cum_tsn_ack = htonl(ctsn); | ||
669 | |||
670 | retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN, 0, | ||
671 | sizeof(sctp_shutdownhdr_t)); | ||
672 | if (!retval) | ||
673 | goto nodata; | ||
674 | |||
675 | retval->subh.shutdown_hdr = | ||
676 | sctp_addto_chunk(retval, sizeof(shut), &shut); | ||
677 | |||
678 | if (chunk) | ||
679 | retval->transport = chunk->transport; | ||
680 | nodata: | ||
681 | return retval; | ||
682 | } | ||
683 | |||
684 | struct sctp_chunk *sctp_make_shutdown_ack(const struct sctp_association *asoc, | ||
685 | const struct sctp_chunk *chunk) | ||
686 | { | ||
687 | struct sctp_chunk *retval; | ||
688 | |||
689 | retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_ACK, 0, 0); | ||
690 | |||
691 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
692 | * | ||
693 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
694 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
695 | * address from which it * received the DATA or control chunk | ||
696 | * to which it is replying. | ||
697 | * | ||
698 | * [ACK back to where the SHUTDOWN came from.] | ||
699 | */ | ||
700 | if (retval && chunk) | ||
701 | retval->transport = chunk->transport; | ||
702 | |||
703 | return retval; | ||
704 | } | ||
705 | |||
706 | struct sctp_chunk *sctp_make_shutdown_complete( | ||
707 | const struct sctp_association *asoc, | ||
708 | const struct sctp_chunk *chunk) | ||
709 | { | ||
710 | struct sctp_chunk *retval; | ||
711 | __u8 flags = 0; | ||
712 | |||
713 | /* Maybe set the T-bit if we have no association. */ | ||
714 | flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; | ||
715 | |||
716 | retval = sctp_make_chunk(asoc, SCTP_CID_SHUTDOWN_COMPLETE, flags, 0); | ||
717 | |||
718 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
719 | * | ||
720 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
721 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
722 | * address from which it * received the DATA or control chunk | ||
723 | * to which it is replying. | ||
724 | * | ||
725 | * [Report SHUTDOWN COMPLETE back to where the SHUTDOWN ACK | ||
726 | * came from.] | ||
727 | */ | ||
728 | if (retval && chunk) | ||
729 | retval->transport = chunk->transport; | ||
730 | |||
731 | return retval; | ||
732 | } | ||
733 | |||
734 | /* Create an ABORT. Note that we set the T bit if we have no | ||
735 | * association. | ||
736 | */ | ||
737 | struct sctp_chunk *sctp_make_abort(const struct sctp_association *asoc, | ||
738 | const struct sctp_chunk *chunk, | ||
739 | const size_t hint) | ||
740 | { | ||
741 | struct sctp_chunk *retval; | ||
742 | __u8 flags = 0; | ||
743 | |||
744 | /* Maybe set the T-bit if we have no association. */ | ||
745 | flags |= asoc ? 0 : SCTP_CHUNK_FLAG_T; | ||
746 | |||
747 | retval = sctp_make_chunk(asoc, SCTP_CID_ABORT, flags, hint); | ||
748 | |||
749 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
750 | * | ||
751 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
752 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
753 | * address from which it * received the DATA or control chunk | ||
754 | * to which it is replying. | ||
755 | * | ||
756 | * [ABORT back to where the offender came from.] | ||
757 | */ | ||
758 | if (retval && chunk) | ||
759 | retval->transport = chunk->transport; | ||
760 | |||
761 | return retval; | ||
762 | } | ||
763 | |||
764 | /* Helper to create ABORT with a NO_USER_DATA error. */ | ||
765 | struct sctp_chunk *sctp_make_abort_no_data( | ||
766 | const struct sctp_association *asoc, | ||
767 | const struct sctp_chunk *chunk, __u32 tsn) | ||
768 | { | ||
769 | struct sctp_chunk *retval; | ||
770 | __u32 payload; | ||
771 | |||
772 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) | ||
773 | + sizeof(tsn)); | ||
774 | |||
775 | if (!retval) | ||
776 | goto no_mem; | ||
777 | |||
778 | /* Put the tsn back into network byte order. */ | ||
779 | payload = htonl(tsn); | ||
780 | sctp_init_cause(retval, SCTP_ERROR_NO_DATA, (const void *)&payload, | ||
781 | sizeof(payload)); | ||
782 | |||
783 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
784 | * | ||
785 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
786 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
787 | * address from which it * received the DATA or control chunk | ||
788 | * to which it is replying. | ||
789 | * | ||
790 | * [ABORT back to where the offender came from.] | ||
791 | */ | ||
792 | if (chunk) | ||
793 | retval->transport = chunk->transport; | ||
794 | |||
795 | no_mem: | ||
796 | return retval; | ||
797 | } | ||
798 | |||
799 | /* Helper to create ABORT with a SCTP_ERROR_USER_ABORT error. */ | ||
800 | struct sctp_chunk *sctp_make_abort_user(const struct sctp_association *asoc, | ||
801 | const struct sctp_chunk *chunk, | ||
802 | const struct msghdr *msg) | ||
803 | { | ||
804 | struct sctp_chunk *retval; | ||
805 | void *payload = NULL, *payoff; | ||
806 | size_t paylen = 0; | ||
807 | struct iovec *iov = NULL; | ||
808 | int iovlen = 0; | ||
809 | |||
810 | if (msg) { | ||
811 | iov = msg->msg_iov; | ||
812 | iovlen = msg->msg_iovlen; | ||
813 | paylen = get_user_iov_size(iov, iovlen); | ||
814 | } | ||
815 | |||
816 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen); | ||
817 | if (!retval) | ||
818 | goto err_chunk; | ||
819 | |||
820 | if (paylen) { | ||
821 | /* Put the msg_iov together into payload. */ | ||
822 | payload = kmalloc(paylen, GFP_ATOMIC); | ||
823 | if (!payload) | ||
824 | goto err_payload; | ||
825 | payoff = payload; | ||
826 | |||
827 | for (; iovlen > 0; --iovlen) { | ||
828 | if (copy_from_user(payoff, iov->iov_base,iov->iov_len)) | ||
829 | goto err_copy; | ||
830 | payoff += iov->iov_len; | ||
831 | iov++; | ||
832 | } | ||
833 | } | ||
834 | |||
835 | sctp_init_cause(retval, SCTP_ERROR_USER_ABORT, payload, paylen); | ||
836 | |||
837 | if (paylen) | ||
838 | kfree(payload); | ||
839 | |||
840 | return retval; | ||
841 | |||
842 | err_copy: | ||
843 | kfree(payload); | ||
844 | err_payload: | ||
845 | sctp_chunk_free(retval); | ||
846 | retval = NULL; | ||
847 | err_chunk: | ||
848 | return retval; | ||
849 | } | ||
850 | |||
851 | /* Make an ABORT chunk with a PROTOCOL VIOLATION cause code. */ | ||
852 | struct sctp_chunk *sctp_make_abort_violation( | ||
853 | const struct sctp_association *asoc, | ||
854 | const struct sctp_chunk *chunk, | ||
855 | const __u8 *payload, | ||
856 | const size_t paylen) | ||
857 | { | ||
858 | struct sctp_chunk *retval; | ||
859 | struct sctp_paramhdr phdr; | ||
860 | |||
861 | retval = sctp_make_abort(asoc, chunk, sizeof(sctp_errhdr_t) + paylen | ||
862 | + sizeof(sctp_chunkhdr_t)); | ||
863 | if (!retval) | ||
864 | goto end; | ||
865 | |||
866 | sctp_init_cause(retval, SCTP_ERROR_PROTO_VIOLATION, payload, paylen); | ||
867 | |||
868 | phdr.type = htons(chunk->chunk_hdr->type); | ||
869 | phdr.length = chunk->chunk_hdr->length; | ||
870 | sctp_addto_chunk(retval, sizeof(sctp_paramhdr_t), &phdr); | ||
871 | |||
872 | end: | ||
873 | return retval; | ||
874 | } | ||
875 | |||
876 | /* Make a HEARTBEAT chunk. */ | ||
877 | struct sctp_chunk *sctp_make_heartbeat(const struct sctp_association *asoc, | ||
878 | const struct sctp_transport *transport, | ||
879 | const void *payload, const size_t paylen) | ||
880 | { | ||
881 | struct sctp_chunk *retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT, | ||
882 | 0, paylen); | ||
883 | |||
884 | if (!retval) | ||
885 | goto nodata; | ||
886 | |||
887 | /* Cast away the 'const', as this is just telling the chunk | ||
888 | * what transport it belongs to. | ||
889 | */ | ||
890 | retval->transport = (struct sctp_transport *) transport; | ||
891 | retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); | ||
892 | |||
893 | nodata: | ||
894 | return retval; | ||
895 | } | ||
896 | |||
897 | struct sctp_chunk *sctp_make_heartbeat_ack(const struct sctp_association *asoc, | ||
898 | const struct sctp_chunk *chunk, | ||
899 | const void *payload, const size_t paylen) | ||
900 | { | ||
901 | struct sctp_chunk *retval; | ||
902 | |||
903 | retval = sctp_make_chunk(asoc, SCTP_CID_HEARTBEAT_ACK, 0, paylen); | ||
904 | if (!retval) | ||
905 | goto nodata; | ||
906 | |||
907 | retval->subh.hbs_hdr = sctp_addto_chunk(retval, paylen, payload); | ||
908 | |||
909 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
910 | * | ||
911 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
912 | * HEARTBEAT ACK, * etc.) to the same destination transport | ||
913 | * address from which it * received the DATA or control chunk | ||
914 | * to which it is replying. | ||
915 | * | ||
916 | * [HBACK back to where the HEARTBEAT came from.] | ||
917 | */ | ||
918 | if (chunk) | ||
919 | retval->transport = chunk->transport; | ||
920 | |||
921 | nodata: | ||
922 | return retval; | ||
923 | } | ||
924 | |||
925 | /* Create an Operation Error chunk with the specified space reserved. | ||
926 | * This routine can be used for containing multiple causes in the chunk. | ||
927 | */ | ||
928 | static struct sctp_chunk *sctp_make_op_error_space( | ||
929 | const struct sctp_association *asoc, | ||
930 | const struct sctp_chunk *chunk, | ||
931 | size_t size) | ||
932 | { | ||
933 | struct sctp_chunk *retval; | ||
934 | |||
935 | retval = sctp_make_chunk(asoc, SCTP_CID_ERROR, 0, | ||
936 | sizeof(sctp_errhdr_t) + size); | ||
937 | if (!retval) | ||
938 | goto nodata; | ||
939 | |||
940 | /* RFC 2960 6.4 Multi-homed SCTP Endpoints | ||
941 | * | ||
942 | * An endpoint SHOULD transmit reply chunks (e.g., SACK, | ||
943 | * HEARTBEAT ACK, etc.) to the same destination transport | ||
944 | * address from which it received the DATA or control chunk | ||
945 | * to which it is replying. | ||
946 | * | ||
947 | */ | ||
948 | if (chunk) | ||
949 | retval->transport = chunk->transport; | ||
950 | |||
951 | nodata: | ||
952 | return retval; | ||
953 | } | ||
954 | |||
955 | /* Create an Operation Error chunk. */ | ||
956 | struct sctp_chunk *sctp_make_op_error(const struct sctp_association *asoc, | ||
957 | const struct sctp_chunk *chunk, | ||
958 | __u16 cause_code, const void *payload, | ||
959 | size_t paylen) | ||
960 | { | ||
961 | struct sctp_chunk *retval; | ||
962 | |||
963 | retval = sctp_make_op_error_space(asoc, chunk, paylen); | ||
964 | if (!retval) | ||
965 | goto nodata; | ||
966 | |||
967 | sctp_init_cause(retval, cause_code, payload, paylen); | ||
968 | |||
969 | nodata: | ||
970 | return retval; | ||
971 | } | ||
972 | |||
973 | /******************************************************************** | ||
974 | * 2nd Level Abstractions | ||
975 | ********************************************************************/ | ||
976 | |||
977 | /* Turn an skb into a chunk. | ||
978 | * FIXME: Eventually move the structure directly inside the skb->cb[]. | ||
979 | */ | ||
980 | struct sctp_chunk *sctp_chunkify(struct sk_buff *skb, | ||
981 | const struct sctp_association *asoc, | ||
982 | struct sock *sk) | ||
983 | { | ||
984 | struct sctp_chunk *retval; | ||
985 | |||
986 | retval = kmem_cache_alloc(sctp_chunk_cachep, SLAB_ATOMIC); | ||
987 | |||
988 | if (!retval) | ||
989 | goto nodata; | ||
990 | memset(retval, 0, sizeof(struct sctp_chunk)); | ||
991 | |||
992 | if (!sk) { | ||
993 | SCTP_DEBUG_PRINTK("chunkifying skb %p w/o an sk\n", skb); | ||
994 | } | ||
995 | |||
996 | retval->skb = skb; | ||
997 | retval->asoc = (struct sctp_association *)asoc; | ||
998 | retval->resent = 0; | ||
999 | retval->has_tsn = 0; | ||
1000 | retval->has_ssn = 0; | ||
1001 | retval->rtt_in_progress = 0; | ||
1002 | retval->sent_at = 0; | ||
1003 | retval->singleton = 1; | ||
1004 | retval->end_of_packet = 0; | ||
1005 | retval->ecn_ce_done = 0; | ||
1006 | retval->pdiscard = 0; | ||
1007 | |||
1008 | /* sctpimpguide-05.txt Section 2.8.2 | ||
1009 | * M1) Each time a new DATA chunk is transmitted | ||
1010 | * set the 'TSN.Missing.Report' count for that TSN to 0. The | ||
1011 | * 'TSN.Missing.Report' count will be used to determine missing chunks | ||
1012 | * and when to fast retransmit. | ||
1013 | */ | ||
1014 | retval->tsn_missing_report = 0; | ||
1015 | retval->tsn_gap_acked = 0; | ||
1016 | retval->fast_retransmit = 0; | ||
1017 | |||
1018 | /* If this is a fragmented message, track all fragments | ||
1019 | * of the message (for SEND_FAILED). | ||
1020 | */ | ||
1021 | retval->msg = NULL; | ||
1022 | |||
1023 | /* Polish the bead hole. */ | ||
1024 | INIT_LIST_HEAD(&retval->transmitted_list); | ||
1025 | INIT_LIST_HEAD(&retval->frag_list); | ||
1026 | SCTP_DBG_OBJCNT_INC(chunk); | ||
1027 | atomic_set(&retval->refcnt, 1); | ||
1028 | |||
1029 | nodata: | ||
1030 | return retval; | ||
1031 | } | ||
1032 | |||
1033 | /* Set chunk->source and dest based on the IP header in chunk->skb. */ | ||
1034 | void sctp_init_addrs(struct sctp_chunk *chunk, union sctp_addr *src, | ||
1035 | union sctp_addr *dest) | ||
1036 | { | ||
1037 | memcpy(&chunk->source, src, sizeof(union sctp_addr)); | ||
1038 | memcpy(&chunk->dest, dest, sizeof(union sctp_addr)); | ||
1039 | } | ||
1040 | |||
1041 | /* Extract the source address from a chunk. */ | ||
1042 | const union sctp_addr *sctp_source(const struct sctp_chunk *chunk) | ||
1043 | { | ||
1044 | /* If we have a known transport, use that. */ | ||
1045 | if (chunk->transport) { | ||
1046 | return &chunk->transport->ipaddr; | ||
1047 | } else { | ||
1048 | /* Otherwise, extract it from the IP header. */ | ||
1049 | return &chunk->source; | ||
1050 | } | ||
1051 | } | ||
1052 | |||
1053 | /* Create a new chunk, setting the type and flags headers from the | ||
1054 | * arguments, reserving enough space for a 'paylen' byte payload. | ||
1055 | */ | ||
1056 | SCTP_STATIC | ||
1057 | struct sctp_chunk *sctp_make_chunk(const struct sctp_association *asoc, | ||
1058 | __u8 type, __u8 flags, int paylen) | ||
1059 | { | ||
1060 | struct sctp_chunk *retval; | ||
1061 | sctp_chunkhdr_t *chunk_hdr; | ||
1062 | struct sk_buff *skb; | ||
1063 | struct sock *sk; | ||
1064 | |||
1065 | /* No need to allocate LL here, as this is only a chunk. */ | ||
1066 | skb = alloc_skb(WORD_ROUND(sizeof(sctp_chunkhdr_t) + paylen), | ||
1067 | GFP_ATOMIC); | ||
1068 | if (!skb) | ||
1069 | goto nodata; | ||
1070 | |||
1071 | /* Make room for the chunk header. */ | ||
1072 | chunk_hdr = (sctp_chunkhdr_t *)skb_put(skb, sizeof(sctp_chunkhdr_t)); | ||
1073 | chunk_hdr->type = type; | ||
1074 | chunk_hdr->flags = flags; | ||
1075 | chunk_hdr->length = htons(sizeof(sctp_chunkhdr_t)); | ||
1076 | |||
1077 | sk = asoc ? asoc->base.sk : NULL; | ||
1078 | retval = sctp_chunkify(skb, asoc, sk); | ||
1079 | if (!retval) { | ||
1080 | kfree_skb(skb); | ||
1081 | goto nodata; | ||
1082 | } | ||
1083 | |||
1084 | retval->chunk_hdr = chunk_hdr; | ||
1085 | retval->chunk_end = ((__u8 *)chunk_hdr) + sizeof(struct sctp_chunkhdr); | ||
1086 | |||
1087 | /* Set the skb to the belonging sock for accounting. */ | ||
1088 | skb->sk = sk; | ||
1089 | |||
1090 | return retval; | ||
1091 | nodata: | ||
1092 | return NULL; | ||
1093 | } | ||
1094 | |||
1095 | |||
1096 | /* Release the memory occupied by a chunk. */ | ||
1097 | static void sctp_chunk_destroy(struct sctp_chunk *chunk) | ||
1098 | { | ||
1099 | /* Free the chunk skb data and the SCTP_chunk stub itself. */ | ||
1100 | dev_kfree_skb(chunk->skb); | ||
1101 | |||
1102 | SCTP_DBG_OBJCNT_DEC(chunk); | ||
1103 | kmem_cache_free(sctp_chunk_cachep, chunk); | ||
1104 | } | ||
1105 | |||
1106 | /* Possibly, free the chunk. */ | ||
1107 | void sctp_chunk_free(struct sctp_chunk *chunk) | ||
1108 | { | ||
1109 | /* Make sure that we are not on any list. */ | ||
1110 | skb_unlink((struct sk_buff *) chunk); | ||
1111 | list_del_init(&chunk->transmitted_list); | ||
1112 | |||
1113 | /* Release our reference on the message tracker. */ | ||
1114 | if (chunk->msg) | ||
1115 | sctp_datamsg_put(chunk->msg); | ||
1116 | |||
1117 | sctp_chunk_put(chunk); | ||
1118 | } | ||
1119 | |||
1120 | /* Grab a reference to the chunk. */ | ||
1121 | void sctp_chunk_hold(struct sctp_chunk *ch) | ||
1122 | { | ||
1123 | atomic_inc(&ch->refcnt); | ||
1124 | } | ||
1125 | |||
1126 | /* Release a reference to the chunk. */ | ||
1127 | void sctp_chunk_put(struct sctp_chunk *ch) | ||
1128 | { | ||
1129 | if (atomic_dec_and_test(&ch->refcnt)) | ||
1130 | sctp_chunk_destroy(ch); | ||
1131 | } | ||
1132 | |||
1133 | /* Append bytes to the end of a chunk. Will panic if chunk is not big | ||
1134 | * enough. | ||
1135 | */ | ||
1136 | void *sctp_addto_chunk(struct sctp_chunk *chunk, int len, const void *data) | ||
1137 | { | ||
1138 | void *target; | ||
1139 | void *padding; | ||
1140 | int chunklen = ntohs(chunk->chunk_hdr->length); | ||
1141 | int padlen = chunklen % 4; | ||
1142 | |||
1143 | padding = skb_put(chunk->skb, padlen); | ||
1144 | target = skb_put(chunk->skb, len); | ||
1145 | |||
1146 | memset(padding, 0, padlen); | ||
1147 | memcpy(target, data, len); | ||
1148 | |||
1149 | /* Adjust the chunk length field. */ | ||
1150 | chunk->chunk_hdr->length = htons(chunklen + padlen + len); | ||
1151 | chunk->chunk_end = chunk->skb->tail; | ||
1152 | |||
1153 | return target; | ||
1154 | } | ||
1155 | |||
1156 | /* Append bytes from user space to the end of a chunk. Will panic if | ||
1157 | * chunk is not big enough. | ||
1158 | * Returns a kernel err value. | ||
1159 | */ | ||
1160 | int sctp_user_addto_chunk(struct sctp_chunk *chunk, int off, int len, | ||
1161 | struct iovec *data) | ||
1162 | { | ||
1163 | __u8 *target; | ||
1164 | int err = 0; | ||
1165 | |||
1166 | /* Make room in chunk for data. */ | ||
1167 | target = skb_put(chunk->skb, len); | ||
1168 | |||
1169 | /* Copy data (whole iovec) into chunk */ | ||
1170 | if ((err = memcpy_fromiovecend(target, data, off, len))) | ||
1171 | goto out; | ||
1172 | |||
1173 | /* Adjust the chunk length field. */ | ||
1174 | chunk->chunk_hdr->length = | ||
1175 | htons(ntohs(chunk->chunk_hdr->length) + len); | ||
1176 | chunk->chunk_end = chunk->skb->tail; | ||
1177 | |||
1178 | out: | ||
1179 | return err; | ||
1180 | } | ||
1181 | |||
1182 | /* Helper function to assign a TSN if needed. This assumes that both | ||
1183 | * the data_hdr and association have already been assigned. | ||
1184 | */ | ||
1185 | void sctp_chunk_assign_ssn(struct sctp_chunk *chunk) | ||
1186 | { | ||
1187 | __u16 ssn; | ||
1188 | __u16 sid; | ||
1189 | |||
1190 | if (chunk->has_ssn) | ||
1191 | return; | ||
1192 | |||
1193 | /* This is the last possible instant to assign a SSN. */ | ||
1194 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | ||
1195 | ssn = 0; | ||
1196 | } else { | ||
1197 | sid = htons(chunk->subh.data_hdr->stream); | ||
1198 | if (chunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG) | ||
1199 | ssn = sctp_ssn_next(&chunk->asoc->ssnmap->out, sid); | ||
1200 | else | ||
1201 | ssn = sctp_ssn_peek(&chunk->asoc->ssnmap->out, sid); | ||
1202 | ssn = htons(ssn); | ||
1203 | } | ||
1204 | |||
1205 | chunk->subh.data_hdr->ssn = ssn; | ||
1206 | chunk->has_ssn = 1; | ||
1207 | } | ||
1208 | |||
1209 | /* Helper function to assign a TSN if needed. This assumes that both | ||
1210 | * the data_hdr and association have already been assigned. | ||
1211 | */ | ||
1212 | void sctp_chunk_assign_tsn(struct sctp_chunk *chunk) | ||
1213 | { | ||
1214 | if (!chunk->has_tsn) { | ||
1215 | /* This is the last possible instant to | ||
1216 | * assign a TSN. | ||
1217 | */ | ||
1218 | chunk->subh.data_hdr->tsn = | ||
1219 | htonl(sctp_association_get_next_tsn(chunk->asoc)); | ||
1220 | chunk->has_tsn = 1; | ||
1221 | } | ||
1222 | } | ||
1223 | |||
1224 | /* Create a CLOSED association to use with an incoming packet. */ | ||
1225 | struct sctp_association *sctp_make_temp_asoc(const struct sctp_endpoint *ep, | ||
1226 | struct sctp_chunk *chunk, int gfp) | ||
1227 | { | ||
1228 | struct sctp_association *asoc; | ||
1229 | struct sk_buff *skb; | ||
1230 | sctp_scope_t scope; | ||
1231 | struct sctp_af *af; | ||
1232 | |||
1233 | /* Create the bare association. */ | ||
1234 | scope = sctp_scope(sctp_source(chunk)); | ||
1235 | asoc = sctp_association_new(ep, ep->base.sk, scope, gfp); | ||
1236 | if (!asoc) | ||
1237 | goto nodata; | ||
1238 | asoc->temp = 1; | ||
1239 | skb = chunk->skb; | ||
1240 | /* Create an entry for the source address of the packet. */ | ||
1241 | af = sctp_get_af_specific(ipver2af(skb->nh.iph->version)); | ||
1242 | if (unlikely(!af)) | ||
1243 | goto fail; | ||
1244 | af->from_skb(&asoc->c.peer_addr, skb, 1); | ||
1245 | nodata: | ||
1246 | return asoc; | ||
1247 | |||
1248 | fail: | ||
1249 | sctp_association_free(asoc); | ||
1250 | return NULL; | ||
1251 | } | ||
1252 | |||
1253 | /* Build a cookie representing asoc. | ||
1254 | * This INCLUDES the param header needed to put the cookie in the INIT ACK. | ||
1255 | */ | ||
1256 | static sctp_cookie_param_t *sctp_pack_cookie(const struct sctp_endpoint *ep, | ||
1257 | const struct sctp_association *asoc, | ||
1258 | const struct sctp_chunk *init_chunk, | ||
1259 | int *cookie_len, | ||
1260 | const __u8 *raw_addrs, int addrs_len) | ||
1261 | { | ||
1262 | sctp_cookie_param_t *retval; | ||
1263 | struct sctp_signed_cookie *cookie; | ||
1264 | struct scatterlist sg; | ||
1265 | int headersize, bodysize; | ||
1266 | unsigned int keylen; | ||
1267 | char *key; | ||
1268 | |||
1269 | headersize = sizeof(sctp_paramhdr_t) + SCTP_SECRET_SIZE; | ||
1270 | bodysize = sizeof(struct sctp_cookie) | ||
1271 | + ntohs(init_chunk->chunk_hdr->length) + addrs_len; | ||
1272 | |||
1273 | /* Pad out the cookie to a multiple to make the signature | ||
1274 | * functions simpler to write. | ||
1275 | */ | ||
1276 | if (bodysize % SCTP_COOKIE_MULTIPLE) | ||
1277 | bodysize += SCTP_COOKIE_MULTIPLE | ||
1278 | - (bodysize % SCTP_COOKIE_MULTIPLE); | ||
1279 | *cookie_len = headersize + bodysize; | ||
1280 | |||
1281 | retval = (sctp_cookie_param_t *)kmalloc(*cookie_len, GFP_ATOMIC); | ||
1282 | |||
1283 | if (!retval) { | ||
1284 | *cookie_len = 0; | ||
1285 | goto nodata; | ||
1286 | } | ||
1287 | |||
1288 | /* Clear this memory since we are sending this data structure | ||
1289 | * out on the network. | ||
1290 | */ | ||
1291 | memset(retval, 0x00, *cookie_len); | ||
1292 | cookie = (struct sctp_signed_cookie *) retval->body; | ||
1293 | |||
1294 | /* Set up the parameter header. */ | ||
1295 | retval->p.type = SCTP_PARAM_STATE_COOKIE; | ||
1296 | retval->p.length = htons(*cookie_len); | ||
1297 | |||
1298 | /* Copy the cookie part of the association itself. */ | ||
1299 | cookie->c = asoc->c; | ||
1300 | /* Save the raw address list length in the cookie. */ | ||
1301 | cookie->c.raw_addr_list_len = addrs_len; | ||
1302 | |||
1303 | /* Remember PR-SCTP capability. */ | ||
1304 | cookie->c.prsctp_capable = asoc->peer.prsctp_capable; | ||
1305 | |||
1306 | /* Save adaption indication in the cookie. */ | ||
1307 | cookie->c.adaption_ind = asoc->peer.adaption_ind; | ||
1308 | |||
1309 | /* Set an expiration time for the cookie. */ | ||
1310 | do_gettimeofday(&cookie->c.expiration); | ||
1311 | TIMEVAL_ADD(asoc->cookie_life, cookie->c.expiration); | ||
1312 | |||
1313 | /* Copy the peer's init packet. */ | ||
1314 | memcpy(&cookie->c.peer_init[0], init_chunk->chunk_hdr, | ||
1315 | ntohs(init_chunk->chunk_hdr->length)); | ||
1316 | |||
1317 | /* Copy the raw local address list of the association. */ | ||
1318 | memcpy((__u8 *)&cookie->c.peer_init[0] + | ||
1319 | ntohs(init_chunk->chunk_hdr->length), raw_addrs, addrs_len); | ||
1320 | |||
1321 | if (sctp_sk(ep->base.sk)->hmac) { | ||
1322 | /* Sign the message. */ | ||
1323 | sg.page = virt_to_page(&cookie->c); | ||
1324 | sg.offset = (unsigned long)(&cookie->c) % PAGE_SIZE; | ||
1325 | sg.length = bodysize; | ||
1326 | keylen = SCTP_SECRET_SIZE; | ||
1327 | key = (char *)ep->secret_key[ep->current_key]; | ||
1328 | |||
1329 | sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, | ||
1330 | &sg, 1, cookie->signature); | ||
1331 | } | ||
1332 | |||
1333 | nodata: | ||
1334 | return retval; | ||
1335 | } | ||
1336 | |||
1337 | /* Unpack the cookie from COOKIE ECHO chunk, recreating the association. */ | ||
1338 | struct sctp_association *sctp_unpack_cookie( | ||
1339 | const struct sctp_endpoint *ep, | ||
1340 | const struct sctp_association *asoc, | ||
1341 | struct sctp_chunk *chunk, int gfp, | ||
1342 | int *error, struct sctp_chunk **errp) | ||
1343 | { | ||
1344 | struct sctp_association *retval = NULL; | ||
1345 | struct sctp_signed_cookie *cookie; | ||
1346 | struct sctp_cookie *bear_cookie; | ||
1347 | int headersize, bodysize, fixed_size; | ||
1348 | __u8 digest[SCTP_SIGNATURE_SIZE]; | ||
1349 | struct scatterlist sg; | ||
1350 | unsigned int keylen, len; | ||
1351 | char *key; | ||
1352 | sctp_scope_t scope; | ||
1353 | struct sk_buff *skb = chunk->skb; | ||
1354 | |||
1355 | headersize = sizeof(sctp_chunkhdr_t) + SCTP_SECRET_SIZE; | ||
1356 | bodysize = ntohs(chunk->chunk_hdr->length) - headersize; | ||
1357 | fixed_size = headersize + sizeof(struct sctp_cookie); | ||
1358 | |||
1359 | /* Verify that the chunk looks like it even has a cookie. | ||
1360 | * There must be enough room for our cookie and our peer's | ||
1361 | * INIT chunk. | ||
1362 | */ | ||
1363 | len = ntohs(chunk->chunk_hdr->length); | ||
1364 | if (len < fixed_size + sizeof(struct sctp_chunkhdr)) | ||
1365 | goto malformed; | ||
1366 | |||
1367 | /* Verify that the cookie has been padded out. */ | ||
1368 | if (bodysize % SCTP_COOKIE_MULTIPLE) | ||
1369 | goto malformed; | ||
1370 | |||
1371 | /* Process the cookie. */ | ||
1372 | cookie = chunk->subh.cookie_hdr; | ||
1373 | bear_cookie = &cookie->c; | ||
1374 | |||
1375 | if (!sctp_sk(ep->base.sk)->hmac) | ||
1376 | goto no_hmac; | ||
1377 | |||
1378 | /* Check the signature. */ | ||
1379 | keylen = SCTP_SECRET_SIZE; | ||
1380 | sg.page = virt_to_page(bear_cookie); | ||
1381 | sg.offset = (unsigned long)(bear_cookie) % PAGE_SIZE; | ||
1382 | sg.length = bodysize; | ||
1383 | key = (char *)ep->secret_key[ep->current_key]; | ||
1384 | |||
1385 | memset(digest, 0x00, sizeof(digest)); | ||
1386 | sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, &sg, | ||
1387 | 1, digest); | ||
1388 | |||
1389 | if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { | ||
1390 | /* Try the previous key. */ | ||
1391 | key = (char *)ep->secret_key[ep->last_key]; | ||
1392 | memset(digest, 0x00, sizeof(digest)); | ||
1393 | sctp_crypto_hmac(sctp_sk(ep->base.sk)->hmac, key, &keylen, | ||
1394 | &sg, 1, digest); | ||
1395 | |||
1396 | if (memcmp(digest, cookie->signature, SCTP_SIGNATURE_SIZE)) { | ||
1397 | /* Yikes! Still bad signature! */ | ||
1398 | *error = -SCTP_IERROR_BAD_SIG; | ||
1399 | goto fail; | ||
1400 | } | ||
1401 | } | ||
1402 | |||
1403 | no_hmac: | ||
1404 | /* IG Section 2.35.2: | ||
1405 | * 3) Compare the port numbers and the verification tag contained | ||
1406 | * within the COOKIE ECHO chunk to the actual port numbers and the | ||
1407 | * verification tag within the SCTP common header of the received | ||
1408 | * packet. If these values do not match the packet MUST be silently | ||
1409 | * discarded, | ||
1410 | */ | ||
1411 | if (ntohl(chunk->sctp_hdr->vtag) != bear_cookie->my_vtag) { | ||
1412 | *error = -SCTP_IERROR_BAD_TAG; | ||
1413 | goto fail; | ||
1414 | } | ||
1415 | |||
1416 | if (ntohs(chunk->sctp_hdr->source) != bear_cookie->peer_addr.v4.sin_port || | ||
1417 | ntohs(chunk->sctp_hdr->dest) != bear_cookie->my_port) { | ||
1418 | *error = -SCTP_IERROR_BAD_PORTS; | ||
1419 | goto fail; | ||
1420 | } | ||
1421 | |||
1422 | /* Check to see if the cookie is stale. If there is already | ||
1423 | * an association, there is no need to check cookie's expiration | ||
1424 | * for init collision case of lost COOKIE ACK. | ||
1425 | */ | ||
1426 | if (!asoc && tv_lt(bear_cookie->expiration, skb->stamp)) { | ||
1427 | __u16 len; | ||
1428 | /* | ||
1429 | * Section 3.3.10.3 Stale Cookie Error (3) | ||
1430 | * | ||
1431 | * Cause of error | ||
1432 | * --------------- | ||
1433 | * Stale Cookie Error: Indicates the receipt of a valid State | ||
1434 | * Cookie that has expired. | ||
1435 | */ | ||
1436 | len = ntohs(chunk->chunk_hdr->length); | ||
1437 | *errp = sctp_make_op_error_space(asoc, chunk, len); | ||
1438 | if (*errp) { | ||
1439 | suseconds_t usecs = (skb->stamp.tv_sec - | ||
1440 | bear_cookie->expiration.tv_sec) * 1000000L + | ||
1441 | skb->stamp.tv_usec - | ||
1442 | bear_cookie->expiration.tv_usec; | ||
1443 | |||
1444 | usecs = htonl(usecs); | ||
1445 | sctp_init_cause(*errp, SCTP_ERROR_STALE_COOKIE, | ||
1446 | &usecs, sizeof(usecs)); | ||
1447 | *error = -SCTP_IERROR_STALE_COOKIE; | ||
1448 | } else | ||
1449 | *error = -SCTP_IERROR_NOMEM; | ||
1450 | |||
1451 | goto fail; | ||
1452 | } | ||
1453 | |||
1454 | /* Make a new base association. */ | ||
1455 | scope = sctp_scope(sctp_source(chunk)); | ||
1456 | retval = sctp_association_new(ep, ep->base.sk, scope, gfp); | ||
1457 | if (!retval) { | ||
1458 | *error = -SCTP_IERROR_NOMEM; | ||
1459 | goto fail; | ||
1460 | } | ||
1461 | |||
1462 | /* Set up our peer's port number. */ | ||
1463 | retval->peer.port = ntohs(chunk->sctp_hdr->source); | ||
1464 | |||
1465 | /* Populate the association from the cookie. */ | ||
1466 | memcpy(&retval->c, bear_cookie, sizeof(*bear_cookie)); | ||
1467 | |||
1468 | if (sctp_assoc_set_bind_addr_from_cookie(retval, bear_cookie, | ||
1469 | GFP_ATOMIC) < 0) { | ||
1470 | *error = -SCTP_IERROR_NOMEM; | ||
1471 | goto fail; | ||
1472 | } | ||
1473 | |||
1474 | /* Also, add the destination address. */ | ||
1475 | if (list_empty(&retval->base.bind_addr.address_list)) { | ||
1476 | sctp_add_bind_addr(&retval->base.bind_addr, &chunk->dest, | ||
1477 | GFP_ATOMIC); | ||
1478 | } | ||
1479 | |||
1480 | retval->next_tsn = retval->c.initial_tsn; | ||
1481 | retval->ctsn_ack_point = retval->next_tsn - 1; | ||
1482 | retval->addip_serial = retval->c.initial_tsn; | ||
1483 | retval->adv_peer_ack_point = retval->ctsn_ack_point; | ||
1484 | retval->peer.prsctp_capable = retval->c.prsctp_capable; | ||
1485 | retval->peer.adaption_ind = retval->c.adaption_ind; | ||
1486 | |||
1487 | /* The INIT stuff will be done by the side effects. */ | ||
1488 | return retval; | ||
1489 | |||
1490 | fail: | ||
1491 | if (retval) | ||
1492 | sctp_association_free(retval); | ||
1493 | |||
1494 | return NULL; | ||
1495 | |||
1496 | malformed: | ||
1497 | /* Yikes! The packet is either corrupt or deliberately | ||
1498 | * malformed. | ||
1499 | */ | ||
1500 | *error = -SCTP_IERROR_MALFORMED; | ||
1501 | goto fail; | ||
1502 | } | ||
1503 | |||
1504 | /******************************************************************** | ||
1505 | * 3rd Level Abstractions | ||
1506 | ********************************************************************/ | ||
1507 | |||
1508 | struct __sctp_missing { | ||
1509 | __u32 num_missing; | ||
1510 | __u16 type; | ||
1511 | } __attribute__((packed)); | ||
1512 | |||
1513 | /* | ||
1514 | * Report a missing mandatory parameter. | ||
1515 | */ | ||
1516 | static int sctp_process_missing_param(const struct sctp_association *asoc, | ||
1517 | sctp_param_t paramtype, | ||
1518 | struct sctp_chunk *chunk, | ||
1519 | struct sctp_chunk **errp) | ||
1520 | { | ||
1521 | struct __sctp_missing report; | ||
1522 | __u16 len; | ||
1523 | |||
1524 | len = WORD_ROUND(sizeof(report)); | ||
1525 | |||
1526 | /* Make an ERROR chunk, preparing enough room for | ||
1527 | * returning multiple unknown parameters. | ||
1528 | */ | ||
1529 | if (!*errp) | ||
1530 | *errp = sctp_make_op_error_space(asoc, chunk, len); | ||
1531 | |||
1532 | if (*errp) { | ||
1533 | report.num_missing = htonl(1); | ||
1534 | report.type = paramtype; | ||
1535 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, | ||
1536 | &report, sizeof(report)); | ||
1537 | } | ||
1538 | |||
1539 | /* Stop processing this chunk. */ | ||
1540 | return 0; | ||
1541 | } | ||
1542 | |||
1543 | /* Report an Invalid Mandatory Parameter. */ | ||
1544 | static int sctp_process_inv_mandatory(const struct sctp_association *asoc, | ||
1545 | struct sctp_chunk *chunk, | ||
1546 | struct sctp_chunk **errp) | ||
1547 | { | ||
1548 | /* Invalid Mandatory Parameter Error has no payload. */ | ||
1549 | |||
1550 | if (!*errp) | ||
1551 | *errp = sctp_make_op_error_space(asoc, chunk, 0); | ||
1552 | |||
1553 | if (*errp) | ||
1554 | sctp_init_cause(*errp, SCTP_ERROR_INV_PARAM, NULL, 0); | ||
1555 | |||
1556 | /* Stop processing this chunk. */ | ||
1557 | return 0; | ||
1558 | } | ||
1559 | |||
1560 | static int sctp_process_inv_paramlength(const struct sctp_association *asoc, | ||
1561 | struct sctp_paramhdr *param, | ||
1562 | const struct sctp_chunk *chunk, | ||
1563 | struct sctp_chunk **errp) | ||
1564 | { | ||
1565 | char error[] = "The following parameter had invalid length:"; | ||
1566 | size_t payload_len = WORD_ROUND(sizeof(error)) + | ||
1567 | sizeof(sctp_paramhdr_t); | ||
1568 | |||
1569 | |||
1570 | /* Create an error chunk and fill it in with our payload. */ | ||
1571 | if (!*errp) | ||
1572 | *errp = sctp_make_op_error_space(asoc, chunk, payload_len); | ||
1573 | |||
1574 | if (*errp) { | ||
1575 | sctp_init_cause(*errp, SCTP_ERROR_PROTO_VIOLATION, error, | ||
1576 | sizeof(error)); | ||
1577 | sctp_addto_chunk(*errp, sizeof(sctp_paramhdr_t), param); | ||
1578 | } | ||
1579 | |||
1580 | return 0; | ||
1581 | } | ||
1582 | |||
1583 | |||
1584 | /* Do not attempt to handle the HOST_NAME parm. However, do | ||
1585 | * send back an indicator to the peer. | ||
1586 | */ | ||
1587 | static int sctp_process_hn_param(const struct sctp_association *asoc, | ||
1588 | union sctp_params param, | ||
1589 | struct sctp_chunk *chunk, | ||
1590 | struct sctp_chunk **errp) | ||
1591 | { | ||
1592 | __u16 len = ntohs(param.p->length); | ||
1593 | |||
1594 | /* Make an ERROR chunk. */ | ||
1595 | if (!*errp) | ||
1596 | *errp = sctp_make_op_error_space(asoc, chunk, len); | ||
1597 | |||
1598 | if (*errp) | ||
1599 | sctp_init_cause(*errp, SCTP_ERROR_DNS_FAILED, | ||
1600 | param.v, len); | ||
1601 | |||
1602 | /* Stop processing this chunk. */ | ||
1603 | return 0; | ||
1604 | } | ||
1605 | |||
1606 | /* RFC 3.2.1 & the Implementers Guide 2.2. | ||
1607 | * | ||
1608 | * The Parameter Types are encoded such that the | ||
1609 | * highest-order two bits specify the action that must be | ||
1610 | * taken if the processing endpoint does not recognize the | ||
1611 | * Parameter Type. | ||
1612 | * | ||
1613 | * 00 - Stop processing this SCTP chunk and discard it, | ||
1614 | * do not process any further chunks within it. | ||
1615 | * | ||
1616 | * 01 - Stop processing this SCTP chunk and discard it, | ||
1617 | * do not process any further chunks within it, and report | ||
1618 | * the unrecognized parameter in an 'Unrecognized | ||
1619 | * Parameter Type' (in either an ERROR or in the INIT ACK). | ||
1620 | * | ||
1621 | * 10 - Skip this parameter and continue processing. | ||
1622 | * | ||
1623 | * 11 - Skip this parameter and continue processing but | ||
1624 | * report the unrecognized parameter in an | ||
1625 | * 'Unrecognized Parameter Type' (in either an ERROR or in | ||
1626 | * the INIT ACK). | ||
1627 | * | ||
1628 | * Return value: | ||
1629 | * 0 - discard the chunk | ||
1630 | * 1 - continue with the chunk | ||
1631 | */ | ||
1632 | static int sctp_process_unk_param(const struct sctp_association *asoc, | ||
1633 | union sctp_params param, | ||
1634 | struct sctp_chunk *chunk, | ||
1635 | struct sctp_chunk **errp) | ||
1636 | { | ||
1637 | int retval = 1; | ||
1638 | |||
1639 | switch (param.p->type & SCTP_PARAM_ACTION_MASK) { | ||
1640 | case SCTP_PARAM_ACTION_DISCARD: | ||
1641 | retval = 0; | ||
1642 | break; | ||
1643 | case SCTP_PARAM_ACTION_DISCARD_ERR: | ||
1644 | retval = 0; | ||
1645 | /* Make an ERROR chunk, preparing enough room for | ||
1646 | * returning multiple unknown parameters. | ||
1647 | */ | ||
1648 | if (NULL == *errp) | ||
1649 | *errp = sctp_make_op_error_space(asoc, chunk, | ||
1650 | ntohs(chunk->chunk_hdr->length)); | ||
1651 | |||
1652 | if (*errp) | ||
1653 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | ||
1654 | param.v, | ||
1655 | WORD_ROUND(ntohs(param.p->length))); | ||
1656 | |||
1657 | break; | ||
1658 | case SCTP_PARAM_ACTION_SKIP: | ||
1659 | break; | ||
1660 | case SCTP_PARAM_ACTION_SKIP_ERR: | ||
1661 | /* Make an ERROR chunk, preparing enough room for | ||
1662 | * returning multiple unknown parameters. | ||
1663 | */ | ||
1664 | if (NULL == *errp) | ||
1665 | *errp = sctp_make_op_error_space(asoc, chunk, | ||
1666 | ntohs(chunk->chunk_hdr->length)); | ||
1667 | |||
1668 | if (*errp) { | ||
1669 | sctp_init_cause(*errp, SCTP_ERROR_UNKNOWN_PARAM, | ||
1670 | param.v, | ||
1671 | WORD_ROUND(ntohs(param.p->length))); | ||
1672 | } else { | ||
1673 | /* If there is no memory for generating the ERROR | ||
1674 | * report as specified, an ABORT will be triggered | ||
1675 | * to the peer and the association won't be | ||
1676 | * established. | ||
1677 | */ | ||
1678 | retval = 0; | ||
1679 | } | ||
1680 | |||
1681 | break; | ||
1682 | default: | ||
1683 | break; | ||
1684 | } | ||
1685 | |||
1686 | return retval; | ||
1687 | } | ||
1688 | |||
1689 | /* Find unrecognized parameters in the chunk. | ||
1690 | * Return values: | ||
1691 | * 0 - discard the chunk | ||
1692 | * 1 - continue with the chunk | ||
1693 | */ | ||
1694 | static int sctp_verify_param(const struct sctp_association *asoc, | ||
1695 | union sctp_params param, | ||
1696 | sctp_cid_t cid, | ||
1697 | struct sctp_chunk *chunk, | ||
1698 | struct sctp_chunk **err_chunk) | ||
1699 | { | ||
1700 | int retval = 1; | ||
1701 | |||
1702 | /* FIXME - This routine is not looking at each parameter per the | ||
1703 | * chunk type, i.e., unrecognized parameters should be further | ||
1704 | * identified based on the chunk id. | ||
1705 | */ | ||
1706 | |||
1707 | switch (param.p->type) { | ||
1708 | case SCTP_PARAM_IPV4_ADDRESS: | ||
1709 | case SCTP_PARAM_IPV6_ADDRESS: | ||
1710 | case SCTP_PARAM_COOKIE_PRESERVATIVE: | ||
1711 | case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: | ||
1712 | case SCTP_PARAM_STATE_COOKIE: | ||
1713 | case SCTP_PARAM_HEARTBEAT_INFO: | ||
1714 | case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: | ||
1715 | case SCTP_PARAM_ECN_CAPABLE: | ||
1716 | case SCTP_PARAM_ADAPTION_LAYER_IND: | ||
1717 | break; | ||
1718 | |||
1719 | case SCTP_PARAM_HOST_NAME_ADDRESS: | ||
1720 | /* Tell the peer, we won't support this param. */ | ||
1721 | return sctp_process_hn_param(asoc, param, chunk, err_chunk); | ||
1722 | case SCTP_PARAM_FWD_TSN_SUPPORT: | ||
1723 | if (sctp_prsctp_enable) | ||
1724 | break; | ||
1725 | /* Fall Through */ | ||
1726 | default: | ||
1727 | SCTP_DEBUG_PRINTK("Unrecognized param: %d for chunk %d.\n", | ||
1728 | ntohs(param.p->type), cid); | ||
1729 | return sctp_process_unk_param(asoc, param, chunk, err_chunk); | ||
1730 | |||
1731 | break; | ||
1732 | } | ||
1733 | return retval; | ||
1734 | } | ||
1735 | |||
1736 | /* Verify the INIT packet before we process it. */ | ||
1737 | int sctp_verify_init(const struct sctp_association *asoc, | ||
1738 | sctp_cid_t cid, | ||
1739 | sctp_init_chunk_t *peer_init, | ||
1740 | struct sctp_chunk *chunk, | ||
1741 | struct sctp_chunk **errp) | ||
1742 | { | ||
1743 | union sctp_params param; | ||
1744 | int has_cookie = 0; | ||
1745 | |||
1746 | /* Verify stream values are non-zero. */ | ||
1747 | if ((0 == peer_init->init_hdr.num_outbound_streams) || | ||
1748 | (0 == peer_init->init_hdr.num_inbound_streams)) { | ||
1749 | |||
1750 | sctp_process_inv_mandatory(asoc, chunk, errp); | ||
1751 | return 0; | ||
1752 | } | ||
1753 | |||
1754 | /* Check for missing mandatory parameters. */ | ||
1755 | sctp_walk_params(param, peer_init, init_hdr.params) { | ||
1756 | |||
1757 | if (SCTP_PARAM_STATE_COOKIE == param.p->type) | ||
1758 | has_cookie = 1; | ||
1759 | |||
1760 | } /* for (loop through all parameters) */ | ||
1761 | |||
1762 | /* There is a possibility that a parameter length was bad and | ||
1763 | * in that case we would have stoped walking the parameters. | ||
1764 | * The current param.p would point at the bad one. | ||
1765 | * Current consensus on the mailing list is to generate a PROTOCOL | ||
1766 | * VIOLATION error. We build the ERROR chunk here and let the normal | ||
1767 | * error handling code build and send the packet. | ||
1768 | */ | ||
1769 | if (param.v < (void*)chunk->chunk_end - sizeof(sctp_paramhdr_t)) { | ||
1770 | sctp_process_inv_paramlength(asoc, param.p, chunk, errp); | ||
1771 | return 0; | ||
1772 | } | ||
1773 | |||
1774 | /* The only missing mandatory param possible today is | ||
1775 | * the state cookie for an INIT-ACK chunk. | ||
1776 | */ | ||
1777 | if ((SCTP_CID_INIT_ACK == cid) && !has_cookie) { | ||
1778 | sctp_process_missing_param(asoc, SCTP_PARAM_STATE_COOKIE, | ||
1779 | chunk, errp); | ||
1780 | return 0; | ||
1781 | } | ||
1782 | |||
1783 | /* Find unrecognized parameters. */ | ||
1784 | |||
1785 | sctp_walk_params(param, peer_init, init_hdr.params) { | ||
1786 | |||
1787 | if (!sctp_verify_param(asoc, param, cid, chunk, errp)) { | ||
1788 | if (SCTP_PARAM_HOST_NAME_ADDRESS == param.p->type) | ||
1789 | return 0; | ||
1790 | else | ||
1791 | return 1; | ||
1792 | } | ||
1793 | |||
1794 | } /* for (loop through all parameters) */ | ||
1795 | |||
1796 | return 1; | ||
1797 | } | ||
1798 | |||
1799 | /* Unpack the parameters in an INIT packet into an association. | ||
1800 | * Returns 0 on failure, else success. | ||
1801 | * FIXME: This is an association method. | ||
1802 | */ | ||
1803 | int sctp_process_init(struct sctp_association *asoc, sctp_cid_t cid, | ||
1804 | const union sctp_addr *peer_addr, | ||
1805 | sctp_init_chunk_t *peer_init, int gfp) | ||
1806 | { | ||
1807 | union sctp_params param; | ||
1808 | struct sctp_transport *transport; | ||
1809 | struct list_head *pos, *temp; | ||
1810 | char *cookie; | ||
1811 | |||
1812 | /* We must include the address that the INIT packet came from. | ||
1813 | * This is the only address that matters for an INIT packet. | ||
1814 | * When processing a COOKIE ECHO, we retrieve the from address | ||
1815 | * of the INIT from the cookie. | ||
1816 | */ | ||
1817 | |||
1818 | /* This implementation defaults to making the first transport | ||
1819 | * added as the primary transport. The source address seems to | ||
1820 | * be a a better choice than any of the embedded addresses. | ||
1821 | */ | ||
1822 | if (peer_addr) | ||
1823 | if(!sctp_assoc_add_peer(asoc, peer_addr, gfp)) | ||
1824 | goto nomem; | ||
1825 | |||
1826 | /* Process the initialization parameters. */ | ||
1827 | |||
1828 | sctp_walk_params(param, peer_init, init_hdr.params) { | ||
1829 | |||
1830 | if (!sctp_process_param(asoc, param, peer_addr, gfp)) | ||
1831 | goto clean_up; | ||
1832 | } | ||
1833 | |||
1834 | /* The fixed INIT headers are always in network byte | ||
1835 | * order. | ||
1836 | */ | ||
1837 | asoc->peer.i.init_tag = | ||
1838 | ntohl(peer_init->init_hdr.init_tag); | ||
1839 | asoc->peer.i.a_rwnd = | ||
1840 | ntohl(peer_init->init_hdr.a_rwnd); | ||
1841 | asoc->peer.i.num_outbound_streams = | ||
1842 | ntohs(peer_init->init_hdr.num_outbound_streams); | ||
1843 | asoc->peer.i.num_inbound_streams = | ||
1844 | ntohs(peer_init->init_hdr.num_inbound_streams); | ||
1845 | asoc->peer.i.initial_tsn = | ||
1846 | ntohl(peer_init->init_hdr.initial_tsn); | ||
1847 | |||
1848 | /* Apply the upper bounds for output streams based on peer's | ||
1849 | * number of inbound streams. | ||
1850 | */ | ||
1851 | if (asoc->c.sinit_num_ostreams > | ||
1852 | ntohs(peer_init->init_hdr.num_inbound_streams)) { | ||
1853 | asoc->c.sinit_num_ostreams = | ||
1854 | ntohs(peer_init->init_hdr.num_inbound_streams); | ||
1855 | } | ||
1856 | |||
1857 | if (asoc->c.sinit_max_instreams > | ||
1858 | ntohs(peer_init->init_hdr.num_outbound_streams)) { | ||
1859 | asoc->c.sinit_max_instreams = | ||
1860 | ntohs(peer_init->init_hdr.num_outbound_streams); | ||
1861 | } | ||
1862 | |||
1863 | /* Copy Initiation tag from INIT to VT_peer in cookie. */ | ||
1864 | asoc->c.peer_vtag = asoc->peer.i.init_tag; | ||
1865 | |||
1866 | /* Peer Rwnd : Current calculated value of the peer's rwnd. */ | ||
1867 | asoc->peer.rwnd = asoc->peer.i.a_rwnd; | ||
1868 | |||
1869 | /* Copy cookie in case we need to resend COOKIE-ECHO. */ | ||
1870 | cookie = asoc->peer.cookie; | ||
1871 | if (cookie) { | ||
1872 | asoc->peer.cookie = kmalloc(asoc->peer.cookie_len, gfp); | ||
1873 | if (!asoc->peer.cookie) | ||
1874 | goto clean_up; | ||
1875 | memcpy(asoc->peer.cookie, cookie, asoc->peer.cookie_len); | ||
1876 | } | ||
1877 | |||
1878 | /* RFC 2960 7.2.1 The initial value of ssthresh MAY be arbitrarily | ||
1879 | * high (for example, implementations MAY use the size of the receiver | ||
1880 | * advertised window). | ||
1881 | */ | ||
1882 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
1883 | transport = list_entry(pos, struct sctp_transport, transports); | ||
1884 | transport->ssthresh = asoc->peer.i.a_rwnd; | ||
1885 | } | ||
1886 | |||
1887 | /* Set up the TSN tracking pieces. */ | ||
1888 | sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_SIZE, | ||
1889 | asoc->peer.i.initial_tsn); | ||
1890 | |||
1891 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number | ||
1892 | * | ||
1893 | * The stream sequence number in all the streams shall start | ||
1894 | * from 0 when the association is established. Also, when the | ||
1895 | * stream sequence number reaches the value 65535 the next | ||
1896 | * stream sequence number shall be set to 0. | ||
1897 | */ | ||
1898 | |||
1899 | /* Allocate storage for the negotiated streams if it is not a temporary * association. | ||
1900 | */ | ||
1901 | if (!asoc->temp) { | ||
1902 | int assoc_id; | ||
1903 | int error; | ||
1904 | |||
1905 | asoc->ssnmap = sctp_ssnmap_new(asoc->c.sinit_max_instreams, | ||
1906 | asoc->c.sinit_num_ostreams, gfp); | ||
1907 | if (!asoc->ssnmap) | ||
1908 | goto clean_up; | ||
1909 | |||
1910 | retry: | ||
1911 | if (unlikely(!idr_pre_get(&sctp_assocs_id, gfp))) | ||
1912 | goto clean_up; | ||
1913 | spin_lock_bh(&sctp_assocs_id_lock); | ||
1914 | error = idr_get_new_above(&sctp_assocs_id, (void *)asoc, 1, | ||
1915 | &assoc_id); | ||
1916 | spin_unlock_bh(&sctp_assocs_id_lock); | ||
1917 | if (error == -EAGAIN) | ||
1918 | goto retry; | ||
1919 | else if (error) | ||
1920 | goto clean_up; | ||
1921 | |||
1922 | asoc->assoc_id = (sctp_assoc_t) assoc_id; | ||
1923 | } | ||
1924 | |||
1925 | /* ADDIP Section 4.1 ASCONF Chunk Procedures | ||
1926 | * | ||
1927 | * When an endpoint has an ASCONF signaled change to be sent to the | ||
1928 | * remote endpoint it should do the following: | ||
1929 | * ... | ||
1930 | * A2) A serial number should be assigned to the Chunk. The serial | ||
1931 | * number should be a monotonically increasing number. All serial | ||
1932 | * numbers are defined to be initialized at the start of the | ||
1933 | * association to the same value as the Initial TSN. | ||
1934 | */ | ||
1935 | asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1; | ||
1936 | return 1; | ||
1937 | |||
1938 | clean_up: | ||
1939 | /* Release the transport structures. */ | ||
1940 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
1941 | transport = list_entry(pos, struct sctp_transport, transports); | ||
1942 | list_del_init(pos); | ||
1943 | sctp_transport_free(transport); | ||
1944 | } | ||
1945 | nomem: | ||
1946 | return 0; | ||
1947 | } | ||
1948 | |||
1949 | |||
1950 | /* Update asoc with the option described in param. | ||
1951 | * | ||
1952 | * RFC2960 3.3.2.1 Optional/Variable Length Parameters in INIT | ||
1953 | * | ||
1954 | * asoc is the association to update. | ||
1955 | * param is the variable length parameter to use for update. | ||
1956 | * cid tells us if this is an INIT, INIT ACK or COOKIE ECHO. | ||
1957 | * If the current packet is an INIT we want to minimize the amount of | ||
1958 | * work we do. In particular, we should not build transport | ||
1959 | * structures for the addresses. | ||
1960 | */ | ||
1961 | static int sctp_process_param(struct sctp_association *asoc, | ||
1962 | union sctp_params param, | ||
1963 | const union sctp_addr *peer_addr, | ||
1964 | int gfp) | ||
1965 | { | ||
1966 | union sctp_addr addr; | ||
1967 | int i; | ||
1968 | __u16 sat; | ||
1969 | int retval = 1; | ||
1970 | sctp_scope_t scope; | ||
1971 | time_t stale; | ||
1972 | struct sctp_af *af; | ||
1973 | |||
1974 | /* We maintain all INIT parameters in network byte order all the | ||
1975 | * time. This allows us to not worry about whether the parameters | ||
1976 | * came from a fresh INIT, and INIT ACK, or were stored in a cookie. | ||
1977 | */ | ||
1978 | switch (param.p->type) { | ||
1979 | case SCTP_PARAM_IPV6_ADDRESS: | ||
1980 | if (PF_INET6 != asoc->base.sk->sk_family) | ||
1981 | break; | ||
1982 | /* Fall through. */ | ||
1983 | case SCTP_PARAM_IPV4_ADDRESS: | ||
1984 | af = sctp_get_af_specific(param_type2af(param.p->type)); | ||
1985 | af->from_addr_param(&addr, param.addr, asoc->peer.port, 0); | ||
1986 | scope = sctp_scope(peer_addr); | ||
1987 | if (sctp_in_scope(&addr, scope)) | ||
1988 | if (!sctp_assoc_add_peer(asoc, &addr, gfp)) | ||
1989 | return 0; | ||
1990 | break; | ||
1991 | |||
1992 | case SCTP_PARAM_COOKIE_PRESERVATIVE: | ||
1993 | if (!sctp_cookie_preserve_enable) | ||
1994 | break; | ||
1995 | |||
1996 | stale = ntohl(param.life->lifespan_increment); | ||
1997 | |||
1998 | /* Suggested Cookie Life span increment's unit is msec, | ||
1999 | * (1/1000sec). | ||
2000 | */ | ||
2001 | asoc->cookie_life.tv_sec += stale / 1000; | ||
2002 | asoc->cookie_life.tv_usec += (stale % 1000) * 1000; | ||
2003 | break; | ||
2004 | |||
2005 | case SCTP_PARAM_HOST_NAME_ADDRESS: | ||
2006 | SCTP_DEBUG_PRINTK("unimplemented SCTP_HOST_NAME_ADDRESS\n"); | ||
2007 | break; | ||
2008 | |||
2009 | case SCTP_PARAM_SUPPORTED_ADDRESS_TYPES: | ||
2010 | /* Turn off the default values first so we'll know which | ||
2011 | * ones are really set by the peer. | ||
2012 | */ | ||
2013 | asoc->peer.ipv4_address = 0; | ||
2014 | asoc->peer.ipv6_address = 0; | ||
2015 | |||
2016 | /* Cycle through address types; avoid divide by 0. */ | ||
2017 | sat = ntohs(param.p->length) - sizeof(sctp_paramhdr_t); | ||
2018 | if (sat) | ||
2019 | sat /= sizeof(__u16); | ||
2020 | |||
2021 | for (i = 0; i < sat; ++i) { | ||
2022 | switch (param.sat->types[i]) { | ||
2023 | case SCTP_PARAM_IPV4_ADDRESS: | ||
2024 | asoc->peer.ipv4_address = 1; | ||
2025 | break; | ||
2026 | |||
2027 | case SCTP_PARAM_IPV6_ADDRESS: | ||
2028 | asoc->peer.ipv6_address = 1; | ||
2029 | break; | ||
2030 | |||
2031 | case SCTP_PARAM_HOST_NAME_ADDRESS: | ||
2032 | asoc->peer.hostname_address = 1; | ||
2033 | break; | ||
2034 | |||
2035 | default: /* Just ignore anything else. */ | ||
2036 | break; | ||
2037 | }; | ||
2038 | } | ||
2039 | break; | ||
2040 | |||
2041 | case SCTP_PARAM_STATE_COOKIE: | ||
2042 | asoc->peer.cookie_len = | ||
2043 | ntohs(param.p->length) - sizeof(sctp_paramhdr_t); | ||
2044 | asoc->peer.cookie = param.cookie->body; | ||
2045 | break; | ||
2046 | |||
2047 | case SCTP_PARAM_HEARTBEAT_INFO: | ||
2048 | /* Would be odd to receive, but it causes no problems. */ | ||
2049 | break; | ||
2050 | |||
2051 | case SCTP_PARAM_UNRECOGNIZED_PARAMETERS: | ||
2052 | /* Rejected during verify stage. */ | ||
2053 | break; | ||
2054 | |||
2055 | case SCTP_PARAM_ECN_CAPABLE: | ||
2056 | asoc->peer.ecn_capable = 1; | ||
2057 | break; | ||
2058 | |||
2059 | case SCTP_PARAM_ADAPTION_LAYER_IND: | ||
2060 | asoc->peer.adaption_ind = param.aind->adaption_ind; | ||
2061 | break; | ||
2062 | |||
2063 | case SCTP_PARAM_FWD_TSN_SUPPORT: | ||
2064 | if (sctp_prsctp_enable) { | ||
2065 | asoc->peer.prsctp_capable = 1; | ||
2066 | break; | ||
2067 | } | ||
2068 | /* Fall Through */ | ||
2069 | default: | ||
2070 | /* Any unrecognized parameters should have been caught | ||
2071 | * and handled by sctp_verify_param() which should be | ||
2072 | * called prior to this routine. Simply log the error | ||
2073 | * here. | ||
2074 | */ | ||
2075 | SCTP_DEBUG_PRINTK("Ignoring param: %d for association %p.\n", | ||
2076 | ntohs(param.p->type), asoc); | ||
2077 | break; | ||
2078 | }; | ||
2079 | |||
2080 | return retval; | ||
2081 | } | ||
2082 | |||
2083 | /* Select a new verification tag. */ | ||
2084 | __u32 sctp_generate_tag(const struct sctp_endpoint *ep) | ||
2085 | { | ||
2086 | /* I believe that this random number generator complies with RFC1750. | ||
2087 | * A tag of 0 is reserved for special cases (e.g. INIT). | ||
2088 | */ | ||
2089 | __u32 x; | ||
2090 | |||
2091 | do { | ||
2092 | get_random_bytes(&x, sizeof(__u32)); | ||
2093 | } while (x == 0); | ||
2094 | |||
2095 | return x; | ||
2096 | } | ||
2097 | |||
2098 | /* Select an initial TSN to send during startup. */ | ||
2099 | __u32 sctp_generate_tsn(const struct sctp_endpoint *ep) | ||
2100 | { | ||
2101 | __u32 retval; | ||
2102 | |||
2103 | get_random_bytes(&retval, sizeof(__u32)); | ||
2104 | return retval; | ||
2105 | } | ||
2106 | |||
2107 | /* | ||
2108 | * ADDIP 3.1.1 Address Configuration Change Chunk (ASCONF) | ||
2109 | * 0 1 2 3 | ||
2110 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
2111 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2112 | * | Type = 0xC1 | Chunk Flags | Chunk Length | | ||
2113 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2114 | * | Serial Number | | ||
2115 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2116 | * | Address Parameter | | ||
2117 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2118 | * | ASCONF Parameter #1 | | ||
2119 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2120 | * \ \ | ||
2121 | * / .... / | ||
2122 | * \ \ | ||
2123 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2124 | * | ASCONF Parameter #N | | ||
2125 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2126 | * | ||
2127 | * Address Parameter and other parameter will not be wrapped in this function | ||
2128 | */ | ||
2129 | static struct sctp_chunk *sctp_make_asconf(struct sctp_association *asoc, | ||
2130 | union sctp_addr *addr, | ||
2131 | int vparam_len) | ||
2132 | { | ||
2133 | sctp_addiphdr_t asconf; | ||
2134 | struct sctp_chunk *retval; | ||
2135 | int length = sizeof(asconf) + vparam_len; | ||
2136 | union sctp_addr_param addrparam; | ||
2137 | int addrlen; | ||
2138 | struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); | ||
2139 | |||
2140 | addrlen = af->to_addr_param(addr, &addrparam); | ||
2141 | if (!addrlen) | ||
2142 | return NULL; | ||
2143 | length += addrlen; | ||
2144 | |||
2145 | /* Create the chunk. */ | ||
2146 | retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF, 0, length); | ||
2147 | if (!retval) | ||
2148 | return NULL; | ||
2149 | |||
2150 | asconf.serial = htonl(asoc->addip_serial++); | ||
2151 | |||
2152 | retval->subh.addip_hdr = | ||
2153 | sctp_addto_chunk(retval, sizeof(asconf), &asconf); | ||
2154 | retval->param_hdr.v = | ||
2155 | sctp_addto_chunk(retval, addrlen, &addrparam); | ||
2156 | |||
2157 | return retval; | ||
2158 | } | ||
2159 | |||
2160 | /* ADDIP | ||
2161 | * 3.2.1 Add IP Address | ||
2162 | * 0 1 2 3 | ||
2163 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
2164 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2165 | * | Type = 0xC001 | Length = Variable | | ||
2166 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2167 | * | ASCONF-Request Correlation ID | | ||
2168 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2169 | * | Address Parameter | | ||
2170 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2171 | * | ||
2172 | * 3.2.2 Delete IP Address | ||
2173 | * 0 1 2 3 | ||
2174 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
2175 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2176 | * | Type = 0xC002 | Length = Variable | | ||
2177 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2178 | * | ASCONF-Request Correlation ID | | ||
2179 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2180 | * | Address Parameter | | ||
2181 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2182 | * | ||
2183 | */ | ||
2184 | struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *asoc, | ||
2185 | union sctp_addr *laddr, | ||
2186 | struct sockaddr *addrs, | ||
2187 | int addrcnt, | ||
2188 | __u16 flags) | ||
2189 | { | ||
2190 | sctp_addip_param_t param; | ||
2191 | struct sctp_chunk *retval; | ||
2192 | union sctp_addr_param addr_param; | ||
2193 | union sctp_addr *addr; | ||
2194 | void *addr_buf; | ||
2195 | struct sctp_af *af; | ||
2196 | int paramlen = sizeof(param); | ||
2197 | int addr_param_len = 0; | ||
2198 | int totallen = 0; | ||
2199 | int i; | ||
2200 | |||
2201 | /* Get total length of all the address parameters. */ | ||
2202 | addr_buf = addrs; | ||
2203 | for (i = 0; i < addrcnt; i++) { | ||
2204 | addr = (union sctp_addr *)addr_buf; | ||
2205 | af = sctp_get_af_specific(addr->v4.sin_family); | ||
2206 | addr_param_len = af->to_addr_param(addr, &addr_param); | ||
2207 | |||
2208 | totallen += paramlen; | ||
2209 | totallen += addr_param_len; | ||
2210 | |||
2211 | addr_buf += af->sockaddr_len; | ||
2212 | } | ||
2213 | |||
2214 | /* Create an asconf chunk with the required length. */ | ||
2215 | retval = sctp_make_asconf(asoc, laddr, totallen); | ||
2216 | if (!retval) | ||
2217 | return NULL; | ||
2218 | |||
2219 | /* Add the address parameters to the asconf chunk. */ | ||
2220 | addr_buf = addrs; | ||
2221 | for (i = 0; i < addrcnt; i++) { | ||
2222 | addr = (union sctp_addr *)addr_buf; | ||
2223 | af = sctp_get_af_specific(addr->v4.sin_family); | ||
2224 | addr_param_len = af->to_addr_param(addr, &addr_param); | ||
2225 | param.param_hdr.type = flags; | ||
2226 | param.param_hdr.length = htons(paramlen + addr_param_len); | ||
2227 | param.crr_id = i; | ||
2228 | |||
2229 | sctp_addto_chunk(retval, paramlen, ¶m); | ||
2230 | sctp_addto_chunk(retval, addr_param_len, &addr_param); | ||
2231 | |||
2232 | addr_buf += af->sockaddr_len; | ||
2233 | } | ||
2234 | return retval; | ||
2235 | } | ||
2236 | |||
2237 | /* ADDIP | ||
2238 | * 3.2.4 Set Primary IP Address | ||
2239 | * 0 1 2 3 | ||
2240 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
2241 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2242 | * | Type =0xC004 | Length = Variable | | ||
2243 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2244 | * | ASCONF-Request Correlation ID | | ||
2245 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2246 | * | Address Parameter | | ||
2247 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2248 | * | ||
2249 | * Create an ASCONF chunk with Set Primary IP address parameter. | ||
2250 | */ | ||
2251 | struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc, | ||
2252 | union sctp_addr *addr) | ||
2253 | { | ||
2254 | sctp_addip_param_t param; | ||
2255 | struct sctp_chunk *retval; | ||
2256 | int len = sizeof(param); | ||
2257 | union sctp_addr_param addrparam; | ||
2258 | int addrlen; | ||
2259 | struct sctp_af *af = sctp_get_af_specific(addr->v4.sin_family); | ||
2260 | |||
2261 | addrlen = af->to_addr_param(addr, &addrparam); | ||
2262 | if (!addrlen) | ||
2263 | return NULL; | ||
2264 | len += addrlen; | ||
2265 | |||
2266 | /* Create the chunk and make asconf header. */ | ||
2267 | retval = sctp_make_asconf(asoc, addr, len); | ||
2268 | if (!retval) | ||
2269 | return NULL; | ||
2270 | |||
2271 | param.param_hdr.type = SCTP_PARAM_SET_PRIMARY; | ||
2272 | param.param_hdr.length = htons(len); | ||
2273 | param.crr_id = 0; | ||
2274 | |||
2275 | sctp_addto_chunk(retval, sizeof(param), ¶m); | ||
2276 | sctp_addto_chunk(retval, addrlen, &addrparam); | ||
2277 | |||
2278 | return retval; | ||
2279 | } | ||
2280 | |||
2281 | /* ADDIP 3.1.2 Address Configuration Acknowledgement Chunk (ASCONF-ACK) | ||
2282 | * 0 1 2 3 | ||
2283 | * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 | ||
2284 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2285 | * | Type = 0x80 | Chunk Flags | Chunk Length | | ||
2286 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2287 | * | Serial Number | | ||
2288 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2289 | * | ASCONF Parameter Response#1 | | ||
2290 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2291 | * \ \ | ||
2292 | * / .... / | ||
2293 | * \ \ | ||
2294 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2295 | * | ASCONF Parameter Response#N | | ||
2296 | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ||
2297 | * | ||
2298 | * Create an ASCONF_ACK chunk with enough space for the parameter responses. | ||
2299 | */ | ||
2300 | static struct sctp_chunk *sctp_make_asconf_ack(const struct sctp_association *asoc, | ||
2301 | __u32 serial, int vparam_len) | ||
2302 | { | ||
2303 | sctp_addiphdr_t asconf; | ||
2304 | struct sctp_chunk *retval; | ||
2305 | int length = sizeof(asconf) + vparam_len; | ||
2306 | |||
2307 | /* Create the chunk. */ | ||
2308 | retval = sctp_make_chunk(asoc, SCTP_CID_ASCONF_ACK, 0, length); | ||
2309 | if (!retval) | ||
2310 | return NULL; | ||
2311 | |||
2312 | asconf.serial = htonl(serial); | ||
2313 | |||
2314 | retval->subh.addip_hdr = | ||
2315 | sctp_addto_chunk(retval, sizeof(asconf), &asconf); | ||
2316 | |||
2317 | return retval; | ||
2318 | } | ||
2319 | |||
2320 | /* Add response parameters to an ASCONF_ACK chunk. */ | ||
2321 | static void sctp_add_asconf_response(struct sctp_chunk *chunk, __u32 crr_id, | ||
2322 | __u16 err_code, sctp_addip_param_t *asconf_param) | ||
2323 | { | ||
2324 | sctp_addip_param_t ack_param; | ||
2325 | sctp_errhdr_t err_param; | ||
2326 | int asconf_param_len = 0; | ||
2327 | int err_param_len = 0; | ||
2328 | __u16 response_type; | ||
2329 | |||
2330 | if (SCTP_ERROR_NO_ERROR == err_code) { | ||
2331 | response_type = SCTP_PARAM_SUCCESS_REPORT; | ||
2332 | } else { | ||
2333 | response_type = SCTP_PARAM_ERR_CAUSE; | ||
2334 | err_param_len = sizeof(err_param); | ||
2335 | if (asconf_param) | ||
2336 | asconf_param_len = | ||
2337 | ntohs(asconf_param->param_hdr.length); | ||
2338 | } | ||
2339 | |||
2340 | /* Add Success Indication or Error Cause Indication parameter. */ | ||
2341 | ack_param.param_hdr.type = response_type; | ||
2342 | ack_param.param_hdr.length = htons(sizeof(ack_param) + | ||
2343 | err_param_len + | ||
2344 | asconf_param_len); | ||
2345 | ack_param.crr_id = crr_id; | ||
2346 | sctp_addto_chunk(chunk, sizeof(ack_param), &ack_param); | ||
2347 | |||
2348 | if (SCTP_ERROR_NO_ERROR == err_code) | ||
2349 | return; | ||
2350 | |||
2351 | /* Add Error Cause parameter. */ | ||
2352 | err_param.cause = err_code; | ||
2353 | err_param.length = htons(err_param_len + asconf_param_len); | ||
2354 | sctp_addto_chunk(chunk, err_param_len, &err_param); | ||
2355 | |||
2356 | /* Add the failed TLV copied from ASCONF chunk. */ | ||
2357 | if (asconf_param) | ||
2358 | sctp_addto_chunk(chunk, asconf_param_len, asconf_param); | ||
2359 | } | ||
2360 | |||
2361 | /* Process a asconf parameter. */ | ||
2362 | static __u16 sctp_process_asconf_param(struct sctp_association *asoc, | ||
2363 | struct sctp_chunk *asconf, | ||
2364 | sctp_addip_param_t *asconf_param) | ||
2365 | { | ||
2366 | struct sctp_transport *peer; | ||
2367 | struct sctp_af *af; | ||
2368 | union sctp_addr addr; | ||
2369 | struct list_head *pos; | ||
2370 | union sctp_addr_param *addr_param; | ||
2371 | |||
2372 | addr_param = (union sctp_addr_param *) | ||
2373 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | ||
2374 | |||
2375 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); | ||
2376 | if (unlikely(!af)) | ||
2377 | return SCTP_ERROR_INV_PARAM; | ||
2378 | |||
2379 | af->from_addr_param(&addr, addr_param, asoc->peer.port, 0); | ||
2380 | switch (asconf_param->param_hdr.type) { | ||
2381 | case SCTP_PARAM_ADD_IP: | ||
2382 | /* ADDIP 4.3 D9) If an endpoint receives an ADD IP address | ||
2383 | * request and does not have the local resources to add this | ||
2384 | * new address to the association, it MUST return an Error | ||
2385 | * Cause TLV set to the new error code 'Operation Refused | ||
2386 | * Due to Resource Shortage'. | ||
2387 | */ | ||
2388 | |||
2389 | peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC); | ||
2390 | if (!peer) | ||
2391 | return SCTP_ERROR_RSRC_LOW; | ||
2392 | |||
2393 | /* Start the heartbeat timer. */ | ||
2394 | if (!mod_timer(&peer->hb_timer, sctp_transport_timeout(peer))) | ||
2395 | sctp_transport_hold(peer); | ||
2396 | break; | ||
2397 | case SCTP_PARAM_DEL_IP: | ||
2398 | /* ADDIP 4.3 D7) If a request is received to delete the | ||
2399 | * last remaining IP address of a peer endpoint, the receiver | ||
2400 | * MUST send an Error Cause TLV with the error cause set to the | ||
2401 | * new error code 'Request to Delete Last Remaining IP Address'. | ||
2402 | */ | ||
2403 | pos = asoc->peer.transport_addr_list.next; | ||
2404 | if (pos->next == &asoc->peer.transport_addr_list) | ||
2405 | return SCTP_ERROR_DEL_LAST_IP; | ||
2406 | |||
2407 | /* ADDIP 4.3 D8) If a request is received to delete an IP | ||
2408 | * address which is also the source address of the IP packet | ||
2409 | * which contained the ASCONF chunk, the receiver MUST reject | ||
2410 | * this request. To reject the request the receiver MUST send | ||
2411 | * an Error Cause TLV set to the new error code 'Request to | ||
2412 | * Delete Source IP Address' | ||
2413 | */ | ||
2414 | if (sctp_cmp_addr_exact(sctp_source(asconf), &addr)) | ||
2415 | return SCTP_ERROR_DEL_SRC_IP; | ||
2416 | |||
2417 | sctp_assoc_del_peer(asoc, &addr); | ||
2418 | break; | ||
2419 | case SCTP_PARAM_SET_PRIMARY: | ||
2420 | peer = sctp_assoc_lookup_paddr(asoc, &addr); | ||
2421 | if (!peer) | ||
2422 | return SCTP_ERROR_INV_PARAM; | ||
2423 | |||
2424 | sctp_assoc_set_primary(asoc, peer); | ||
2425 | break; | ||
2426 | default: | ||
2427 | return SCTP_ERROR_INV_PARAM; | ||
2428 | break; | ||
2429 | } | ||
2430 | |||
2431 | return SCTP_ERROR_NO_ERROR; | ||
2432 | } | ||
2433 | |||
2434 | /* Process an incoming ASCONF chunk with the next expected serial no. and | ||
2435 | * return an ASCONF_ACK chunk to be sent in response. | ||
2436 | */ | ||
2437 | struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc, | ||
2438 | struct sctp_chunk *asconf) | ||
2439 | { | ||
2440 | sctp_addiphdr_t *hdr; | ||
2441 | union sctp_addr_param *addr_param; | ||
2442 | sctp_addip_param_t *asconf_param; | ||
2443 | struct sctp_chunk *asconf_ack; | ||
2444 | |||
2445 | __u16 err_code; | ||
2446 | int length = 0; | ||
2447 | int chunk_len = asconf->skb->len; | ||
2448 | __u32 serial; | ||
2449 | int all_param_pass = 1; | ||
2450 | |||
2451 | hdr = (sctp_addiphdr_t *)asconf->skb->data; | ||
2452 | serial = ntohl(hdr->serial); | ||
2453 | |||
2454 | /* Skip the addiphdr and store a pointer to address parameter. */ | ||
2455 | length = sizeof(sctp_addiphdr_t); | ||
2456 | addr_param = (union sctp_addr_param *)(asconf->skb->data + length); | ||
2457 | chunk_len -= length; | ||
2458 | |||
2459 | /* Skip the address parameter and store a pointer to the first | ||
2460 | * asconf paramter. | ||
2461 | */ | ||
2462 | length = ntohs(addr_param->v4.param_hdr.length); | ||
2463 | asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); | ||
2464 | chunk_len -= length; | ||
2465 | |||
2466 | /* create an ASCONF_ACK chunk. | ||
2467 | * Based on the definitions of parameters, we know that the size of | ||
2468 | * ASCONF_ACK parameters are less than or equal to the twice of ASCONF | ||
2469 | * paramters. | ||
2470 | */ | ||
2471 | asconf_ack = sctp_make_asconf_ack(asoc, serial, chunk_len * 2); | ||
2472 | if (!asconf_ack) | ||
2473 | goto done; | ||
2474 | |||
2475 | /* Process the TLVs contained within the ASCONF chunk. */ | ||
2476 | while (chunk_len > 0) { | ||
2477 | err_code = sctp_process_asconf_param(asoc, asconf, | ||
2478 | asconf_param); | ||
2479 | /* ADDIP 4.1 A7) | ||
2480 | * If an error response is received for a TLV parameter, | ||
2481 | * all TLVs with no response before the failed TLV are | ||
2482 | * considered successful if not reported. All TLVs after | ||
2483 | * the failed response are considered unsuccessful unless | ||
2484 | * a specific success indication is present for the parameter. | ||
2485 | */ | ||
2486 | if (SCTP_ERROR_NO_ERROR != err_code) | ||
2487 | all_param_pass = 0; | ||
2488 | |||
2489 | if (!all_param_pass) | ||
2490 | sctp_add_asconf_response(asconf_ack, | ||
2491 | asconf_param->crr_id, err_code, | ||
2492 | asconf_param); | ||
2493 | |||
2494 | /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add | ||
2495 | * an IP address sends an 'Out of Resource' in its response, it | ||
2496 | * MUST also fail any subsequent add or delete requests bundled | ||
2497 | * in the ASCONF. | ||
2498 | */ | ||
2499 | if (SCTP_ERROR_RSRC_LOW == err_code) | ||
2500 | goto done; | ||
2501 | |||
2502 | /* Move to the next ASCONF param. */ | ||
2503 | length = ntohs(asconf_param->param_hdr.length); | ||
2504 | asconf_param = (sctp_addip_param_t *)((void *)asconf_param + | ||
2505 | length); | ||
2506 | chunk_len -= length; | ||
2507 | } | ||
2508 | |||
2509 | done: | ||
2510 | asoc->peer.addip_serial++; | ||
2511 | |||
2512 | /* If we are sending a new ASCONF_ACK hold a reference to it in assoc | ||
2513 | * after freeing the reference to old asconf ack if any. | ||
2514 | */ | ||
2515 | if (asconf_ack) { | ||
2516 | if (asoc->addip_last_asconf_ack) | ||
2517 | sctp_chunk_free(asoc->addip_last_asconf_ack); | ||
2518 | |||
2519 | sctp_chunk_hold(asconf_ack); | ||
2520 | asoc->addip_last_asconf_ack = asconf_ack; | ||
2521 | } | ||
2522 | |||
2523 | return asconf_ack; | ||
2524 | } | ||
2525 | |||
2526 | /* Process a asconf parameter that is successfully acked. */ | ||
2527 | static int sctp_asconf_param_success(struct sctp_association *asoc, | ||
2528 | sctp_addip_param_t *asconf_param) | ||
2529 | { | ||
2530 | struct sctp_af *af; | ||
2531 | union sctp_addr addr; | ||
2532 | struct sctp_bind_addr *bp = &asoc->base.bind_addr; | ||
2533 | union sctp_addr_param *addr_param; | ||
2534 | struct list_head *pos; | ||
2535 | struct sctp_transport *transport; | ||
2536 | int retval = 0; | ||
2537 | |||
2538 | addr_param = (union sctp_addr_param *) | ||
2539 | ((void *)asconf_param + sizeof(sctp_addip_param_t)); | ||
2540 | |||
2541 | /* We have checked the packet before, so we do not check again. */ | ||
2542 | af = sctp_get_af_specific(param_type2af(addr_param->v4.param_hdr.type)); | ||
2543 | af->from_addr_param(&addr, addr_param, bp->port, 0); | ||
2544 | |||
2545 | switch (asconf_param->param_hdr.type) { | ||
2546 | case SCTP_PARAM_ADD_IP: | ||
2547 | sctp_local_bh_disable(); | ||
2548 | sctp_write_lock(&asoc->base.addr_lock); | ||
2549 | retval = sctp_add_bind_addr(bp, &addr, GFP_ATOMIC); | ||
2550 | sctp_write_unlock(&asoc->base.addr_lock); | ||
2551 | sctp_local_bh_enable(); | ||
2552 | break; | ||
2553 | case SCTP_PARAM_DEL_IP: | ||
2554 | sctp_local_bh_disable(); | ||
2555 | sctp_write_lock(&asoc->base.addr_lock); | ||
2556 | retval = sctp_del_bind_addr(bp, &addr); | ||
2557 | sctp_write_unlock(&asoc->base.addr_lock); | ||
2558 | sctp_local_bh_enable(); | ||
2559 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
2560 | transport = list_entry(pos, struct sctp_transport, | ||
2561 | transports); | ||
2562 | sctp_transport_route(transport, NULL, | ||
2563 | sctp_sk(asoc->base.sk)); | ||
2564 | } | ||
2565 | break; | ||
2566 | default: | ||
2567 | break; | ||
2568 | } | ||
2569 | |||
2570 | return retval; | ||
2571 | } | ||
2572 | |||
2573 | /* Get the corresponding ASCONF response error code from the ASCONF_ACK chunk | ||
2574 | * for the given asconf parameter. If there is no response for this parameter, | ||
2575 | * return the error code based on the third argument 'no_err'. | ||
2576 | * ADDIP 4.1 | ||
2577 | * A7) If an error response is received for a TLV parameter, all TLVs with no | ||
2578 | * response before the failed TLV are considered successful if not reported. | ||
2579 | * All TLVs after the failed response are considered unsuccessful unless a | ||
2580 | * specific success indication is present for the parameter. | ||
2581 | */ | ||
2582 | static __u16 sctp_get_asconf_response(struct sctp_chunk *asconf_ack, | ||
2583 | sctp_addip_param_t *asconf_param, | ||
2584 | int no_err) | ||
2585 | { | ||
2586 | sctp_addip_param_t *asconf_ack_param; | ||
2587 | sctp_errhdr_t *err_param; | ||
2588 | int length; | ||
2589 | int asconf_ack_len = asconf_ack->skb->len; | ||
2590 | __u16 err_code; | ||
2591 | |||
2592 | if (no_err) | ||
2593 | err_code = SCTP_ERROR_NO_ERROR; | ||
2594 | else | ||
2595 | err_code = SCTP_ERROR_REQ_REFUSED; | ||
2596 | |||
2597 | /* Skip the addiphdr from the asconf_ack chunk and store a pointer to | ||
2598 | * the first asconf_ack parameter. | ||
2599 | */ | ||
2600 | length = sizeof(sctp_addiphdr_t); | ||
2601 | asconf_ack_param = (sctp_addip_param_t *)(asconf_ack->skb->data + | ||
2602 | length); | ||
2603 | asconf_ack_len -= length; | ||
2604 | |||
2605 | while (asconf_ack_len > 0) { | ||
2606 | if (asconf_ack_param->crr_id == asconf_param->crr_id) { | ||
2607 | switch(asconf_ack_param->param_hdr.type) { | ||
2608 | case SCTP_PARAM_SUCCESS_REPORT: | ||
2609 | return SCTP_ERROR_NO_ERROR; | ||
2610 | case SCTP_PARAM_ERR_CAUSE: | ||
2611 | length = sizeof(sctp_addip_param_t); | ||
2612 | err_param = (sctp_errhdr_t *) | ||
2613 | ((void *)asconf_ack_param + length); | ||
2614 | asconf_ack_len -= length; | ||
2615 | if (asconf_ack_len > 0) | ||
2616 | return err_param->cause; | ||
2617 | else | ||
2618 | return SCTP_ERROR_INV_PARAM; | ||
2619 | break; | ||
2620 | default: | ||
2621 | return SCTP_ERROR_INV_PARAM; | ||
2622 | } | ||
2623 | } | ||
2624 | |||
2625 | length = ntohs(asconf_ack_param->param_hdr.length); | ||
2626 | asconf_ack_param = (sctp_addip_param_t *) | ||
2627 | ((void *)asconf_ack_param + length); | ||
2628 | asconf_ack_len -= length; | ||
2629 | } | ||
2630 | |||
2631 | return err_code; | ||
2632 | } | ||
2633 | |||
2634 | /* Process an incoming ASCONF_ACK chunk against the cached last ASCONF chunk. */ | ||
2635 | int sctp_process_asconf_ack(struct sctp_association *asoc, | ||
2636 | struct sctp_chunk *asconf_ack) | ||
2637 | { | ||
2638 | struct sctp_chunk *asconf = asoc->addip_last_asconf; | ||
2639 | union sctp_addr_param *addr_param; | ||
2640 | sctp_addip_param_t *asconf_param; | ||
2641 | int length = 0; | ||
2642 | int asconf_len = asconf->skb->len; | ||
2643 | int all_param_pass = 0; | ||
2644 | int no_err = 1; | ||
2645 | int retval = 0; | ||
2646 | __u16 err_code = SCTP_ERROR_NO_ERROR; | ||
2647 | |||
2648 | /* Skip the chunkhdr and addiphdr from the last asconf sent and store | ||
2649 | * a pointer to address parameter. | ||
2650 | */ | ||
2651 | length = sizeof(sctp_addip_chunk_t); | ||
2652 | addr_param = (union sctp_addr_param *)(asconf->skb->data + length); | ||
2653 | asconf_len -= length; | ||
2654 | |||
2655 | /* Skip the address parameter in the last asconf sent and store a | ||
2656 | * pointer to the first asconf paramter. | ||
2657 | */ | ||
2658 | length = ntohs(addr_param->v4.param_hdr.length); | ||
2659 | asconf_param = (sctp_addip_param_t *)((void *)addr_param + length); | ||
2660 | asconf_len -= length; | ||
2661 | |||
2662 | /* ADDIP 4.1 | ||
2663 | * A8) If there is no response(s) to specific TLV parameter(s), and no | ||
2664 | * failures are indicated, then all request(s) are considered | ||
2665 | * successful. | ||
2666 | */ | ||
2667 | if (asconf_ack->skb->len == sizeof(sctp_addiphdr_t)) | ||
2668 | all_param_pass = 1; | ||
2669 | |||
2670 | /* Process the TLVs contained in the last sent ASCONF chunk. */ | ||
2671 | while (asconf_len > 0) { | ||
2672 | if (all_param_pass) | ||
2673 | err_code = SCTP_ERROR_NO_ERROR; | ||
2674 | else { | ||
2675 | err_code = sctp_get_asconf_response(asconf_ack, | ||
2676 | asconf_param, | ||
2677 | no_err); | ||
2678 | if (no_err && (SCTP_ERROR_NO_ERROR != err_code)) | ||
2679 | no_err = 0; | ||
2680 | } | ||
2681 | |||
2682 | switch (err_code) { | ||
2683 | case SCTP_ERROR_NO_ERROR: | ||
2684 | retval = sctp_asconf_param_success(asoc, asconf_param); | ||
2685 | break; | ||
2686 | |||
2687 | case SCTP_ERROR_RSRC_LOW: | ||
2688 | retval = 1; | ||
2689 | break; | ||
2690 | |||
2691 | case SCTP_ERROR_INV_PARAM: | ||
2692 | /* Disable sending this type of asconf parameter in | ||
2693 | * future. | ||
2694 | */ | ||
2695 | asoc->peer.addip_disabled_mask |= | ||
2696 | asconf_param->param_hdr.type; | ||
2697 | break; | ||
2698 | |||
2699 | case SCTP_ERROR_REQ_REFUSED: | ||
2700 | case SCTP_ERROR_DEL_LAST_IP: | ||
2701 | case SCTP_ERROR_DEL_SRC_IP: | ||
2702 | default: | ||
2703 | break; | ||
2704 | } | ||
2705 | |||
2706 | /* Skip the processed asconf parameter and move to the next | ||
2707 | * one. | ||
2708 | */ | ||
2709 | length = ntohs(asconf_param->param_hdr.length); | ||
2710 | asconf_param = (sctp_addip_param_t *)((void *)asconf_param + | ||
2711 | length); | ||
2712 | asconf_len -= length; | ||
2713 | } | ||
2714 | |||
2715 | /* Free the cached last sent asconf chunk. */ | ||
2716 | sctp_chunk_free(asconf); | ||
2717 | asoc->addip_last_asconf = NULL; | ||
2718 | |||
2719 | /* Send the next asconf chunk from the addip chunk queue. */ | ||
2720 | asconf = (struct sctp_chunk *)__skb_dequeue(&asoc->addip_chunks); | ||
2721 | if (asconf) { | ||
2722 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
2723 | sctp_chunk_hold(asconf); | ||
2724 | if (sctp_primitive_ASCONF(asoc, asconf)) | ||
2725 | sctp_chunk_free(asconf); | ||
2726 | else | ||
2727 | asoc->addip_last_asconf = asconf; | ||
2728 | } | ||
2729 | |||
2730 | return retval; | ||
2731 | } | ||
2732 | |||
2733 | /* Make a FWD TSN chunk. */ | ||
2734 | struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc, | ||
2735 | __u32 new_cum_tsn, size_t nstreams, | ||
2736 | struct sctp_fwdtsn_skip *skiplist) | ||
2737 | { | ||
2738 | struct sctp_chunk *retval = NULL; | ||
2739 | struct sctp_fwdtsn_chunk *ftsn_chunk; | ||
2740 | struct sctp_fwdtsn_hdr ftsn_hdr; | ||
2741 | struct sctp_fwdtsn_skip skip; | ||
2742 | size_t hint; | ||
2743 | int i; | ||
2744 | |||
2745 | hint = (nstreams + 1) * sizeof(__u32); | ||
2746 | |||
2747 | /* Maybe set the T-bit if we have no association. */ | ||
2748 | retval = sctp_make_chunk(asoc, SCTP_CID_FWD_TSN, 0, hint); | ||
2749 | |||
2750 | if (!retval) | ||
2751 | return NULL; | ||
2752 | |||
2753 | ftsn_chunk = (struct sctp_fwdtsn_chunk *)retval->subh.fwdtsn_hdr; | ||
2754 | |||
2755 | ftsn_hdr.new_cum_tsn = htonl(new_cum_tsn); | ||
2756 | retval->subh.fwdtsn_hdr = | ||
2757 | sctp_addto_chunk(retval, sizeof(ftsn_hdr), &ftsn_hdr); | ||
2758 | |||
2759 | for (i = 0; i < nstreams; i++) { | ||
2760 | skip.stream = skiplist[i].stream; | ||
2761 | skip.ssn = skiplist[i].ssn; | ||
2762 | sctp_addto_chunk(retval, sizeof(skip), &skip); | ||
2763 | } | ||
2764 | |||
2765 | return retval; | ||
2766 | } | ||
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c new file mode 100644 index 000000000000..f65fa441952f --- /dev/null +++ b/net/sctp/sm_sideeffect.c | |||
@@ -0,0 +1,1395 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * | ||
6 | * This file is part of the SCTP kernel reference Implementation | ||
7 | * | ||
8 | * These functions work with the state functions in sctp_sm_statefuns.c | ||
9 | * to implement that state operations. These functions implement the | ||
10 | * steps which require modifying existing data structures. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Jon Grimm <jgrimm@austin.ibm.com> | ||
40 | * Hui Huang <hui.huang@nokia.com> | ||
41 | * Dajiang Zhang <dajiang.zhang@nokia.com> | ||
42 | * Daisy Chang <daisyc@us.ibm.com> | ||
43 | * Sridhar Samudrala <sri@us.ibm.com> | ||
44 | * Ardelle Fan <ardelle.fan@intel.com> | ||
45 | * | ||
46 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
47 | * be incorporated into the next SCTP release. | ||
48 | */ | ||
49 | |||
50 | #include <linux/skbuff.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/socket.h> | ||
53 | #include <linux/ip.h> | ||
54 | #include <net/sock.h> | ||
55 | #include <net/sctp/sctp.h> | ||
56 | #include <net/sctp/sm.h> | ||
57 | |||
58 | static int sctp_cmd_interpreter(sctp_event_t event_type, | ||
59 | sctp_subtype_t subtype, | ||
60 | sctp_state_t state, | ||
61 | struct sctp_endpoint *ep, | ||
62 | struct sctp_association *asoc, | ||
63 | void *event_arg, | ||
64 | sctp_disposition_t status, | ||
65 | sctp_cmd_seq_t *commands, | ||
66 | int gfp); | ||
67 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | ||
68 | sctp_state_t state, | ||
69 | struct sctp_endpoint *ep, | ||
70 | struct sctp_association *asoc, | ||
71 | void *event_arg, | ||
72 | sctp_disposition_t status, | ||
73 | sctp_cmd_seq_t *commands, | ||
74 | int gfp); | ||
75 | |||
76 | /******************************************************************** | ||
77 | * Helper functions | ||
78 | ********************************************************************/ | ||
79 | |||
80 | /* A helper function for delayed processing of INET ECN CE bit. */ | ||
81 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, | ||
82 | __u32 lowest_tsn) | ||
83 | { | ||
84 | /* Save the TSN away for comparison when we receive CWR */ | ||
85 | |||
86 | asoc->last_ecne_tsn = lowest_tsn; | ||
87 | asoc->need_ecne = 1; | ||
88 | } | ||
89 | |||
90 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | ||
91 | /* RFC 2960 Appendix A | ||
92 | * | ||
93 | * RFC 2481 details a specific bit for a sender to send in | ||
94 | * the header of its next outbound TCP segment to indicate to | ||
95 | * its peer that it has reduced its congestion window. This | ||
96 | * is termed the CWR bit. For SCTP the same indication is made | ||
97 | * by including the CWR chunk. This chunk contains one data | ||
98 | * element, i.e. the TSN number that was sent in the ECNE chunk. | ||
99 | * This element represents the lowest TSN number in the datagram | ||
100 | * that was originally marked with the CE bit. | ||
101 | */ | ||
102 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | ||
103 | __u32 lowest_tsn, | ||
104 | struct sctp_chunk *chunk) | ||
105 | { | ||
106 | struct sctp_chunk *repl; | ||
107 | |||
108 | /* Our previously transmitted packet ran into some congestion | ||
109 | * so we should take action by reducing cwnd and ssthresh | ||
110 | * and then ACK our peer that we we've done so by | ||
111 | * sending a CWR. | ||
112 | */ | ||
113 | |||
114 | /* First, try to determine if we want to actually lower | ||
115 | * our cwnd variables. Only lower them if the ECNE looks more | ||
116 | * recent than the last response. | ||
117 | */ | ||
118 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | ||
119 | struct sctp_transport *transport; | ||
120 | |||
121 | /* Find which transport's congestion variables | ||
122 | * need to be adjusted. | ||
123 | */ | ||
124 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | ||
125 | |||
126 | /* Update the congestion variables. */ | ||
127 | if (transport) | ||
128 | sctp_transport_lower_cwnd(transport, | ||
129 | SCTP_LOWER_CWND_ECNE); | ||
130 | asoc->last_cwr_tsn = lowest_tsn; | ||
131 | } | ||
132 | |||
133 | /* Always try to quiet the other end. In case of lost CWR, | ||
134 | * resend last_cwr_tsn. | ||
135 | */ | ||
136 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | ||
137 | |||
138 | /* If we run out of memory, it will look like a lost CWR. We'll | ||
139 | * get back in sync eventually. | ||
140 | */ | ||
141 | return repl; | ||
142 | } | ||
143 | |||
144 | /* Helper function to do delayed processing of ECN CWR chunk. */ | ||
145 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | ||
146 | __u32 lowest_tsn) | ||
147 | { | ||
148 | /* Turn off ECNE getting auto-prepended to every outgoing | ||
149 | * packet | ||
150 | */ | ||
151 | asoc->need_ecne = 0; | ||
152 | } | ||
153 | |||
154 | /* Generate SACK if necessary. We call this at the end of a packet. */ | ||
155 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | ||
156 | sctp_cmd_seq_t *commands) | ||
157 | { | ||
158 | __u32 ctsn, max_tsn_seen; | ||
159 | struct sctp_chunk *sack; | ||
160 | int error = 0; | ||
161 | |||
162 | if (force) | ||
163 | asoc->peer.sack_needed = 1; | ||
164 | |||
165 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | ||
166 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | ||
167 | |||
168 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | ||
169 | * | ||
170 | * Ack State : This flag indicates if the next received packet | ||
171 | * : is to be responded to with a SACK. ... | ||
172 | * : When DATA chunks are out of order, SACK's | ||
173 | * : are not delayed (see Section 6). | ||
174 | * | ||
175 | * [This is actually not mentioned in Section 6, but we | ||
176 | * implement it here anyway. --piggy] | ||
177 | */ | ||
178 | if (max_tsn_seen != ctsn) | ||
179 | asoc->peer.sack_needed = 1; | ||
180 | |||
181 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | ||
182 | * | ||
183 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | ||
184 | * an acknowledgement SHOULD be generated for at least every | ||
185 | * second packet (not every second DATA chunk) received, and | ||
186 | * SHOULD be generated within 200 ms of the arrival of any | ||
187 | * unacknowledged DATA chunk. ... | ||
188 | */ | ||
189 | if (!asoc->peer.sack_needed) { | ||
190 | /* We will need a SACK for the next packet. */ | ||
191 | asoc->peer.sack_needed = 1; | ||
192 | goto out; | ||
193 | } else { | ||
194 | if (asoc->a_rwnd > asoc->rwnd) | ||
195 | asoc->a_rwnd = asoc->rwnd; | ||
196 | sack = sctp_make_sack(asoc); | ||
197 | if (!sack) | ||
198 | goto nomem; | ||
199 | |||
200 | asoc->peer.sack_needed = 0; | ||
201 | |||
202 | error = sctp_outq_tail(&asoc->outqueue, sack); | ||
203 | |||
204 | /* Stop the SACK timer. */ | ||
205 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
206 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | ||
207 | } | ||
208 | out: | ||
209 | return error; | ||
210 | nomem: | ||
211 | error = -ENOMEM; | ||
212 | return error; | ||
213 | } | ||
214 | |||
215 | /* When the T3-RTX timer expires, it calls this function to create the | ||
216 | * relevant state machine event. | ||
217 | */ | ||
218 | void sctp_generate_t3_rtx_event(unsigned long peer) | ||
219 | { | ||
220 | int error; | ||
221 | struct sctp_transport *transport = (struct sctp_transport *) peer; | ||
222 | struct sctp_association *asoc = transport->asoc; | ||
223 | |||
224 | /* Check whether a task is in the sock. */ | ||
225 | |||
226 | sctp_bh_lock_sock(asoc->base.sk); | ||
227 | if (sock_owned_by_user(asoc->base.sk)) { | ||
228 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | ||
229 | |||
230 | /* Try again later. */ | ||
231 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | ||
232 | sctp_transport_hold(transport); | ||
233 | goto out_unlock; | ||
234 | } | ||
235 | |||
236 | /* Is this transport really dead and just waiting around for | ||
237 | * the timer to let go of the reference? | ||
238 | */ | ||
239 | if (transport->dead) | ||
240 | goto out_unlock; | ||
241 | |||
242 | /* Run through the state machine. */ | ||
243 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
244 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), | ||
245 | asoc->state, | ||
246 | asoc->ep, asoc, | ||
247 | transport, GFP_ATOMIC); | ||
248 | |||
249 | if (error) | ||
250 | asoc->base.sk->sk_err = -error; | ||
251 | |||
252 | out_unlock: | ||
253 | sctp_bh_unlock_sock(asoc->base.sk); | ||
254 | sctp_transport_put(transport); | ||
255 | } | ||
256 | |||
257 | /* This is a sa interface for producing timeout events. It works | ||
258 | * for timeouts which use the association as their parameter. | ||
259 | */ | ||
260 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | ||
261 | sctp_event_timeout_t timeout_type) | ||
262 | { | ||
263 | int error = 0; | ||
264 | |||
265 | sctp_bh_lock_sock(asoc->base.sk); | ||
266 | if (sock_owned_by_user(asoc->base.sk)) { | ||
267 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", | ||
268 | __FUNCTION__, | ||
269 | timeout_type); | ||
270 | |||
271 | /* Try again later. */ | ||
272 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | ||
273 | sctp_association_hold(asoc); | ||
274 | goto out_unlock; | ||
275 | } | ||
276 | |||
277 | /* Is this association really dead and just waiting around for | ||
278 | * the timer to let go of the reference? | ||
279 | */ | ||
280 | if (asoc->base.dead) | ||
281 | goto out_unlock; | ||
282 | |||
283 | /* Run through the state machine. */ | ||
284 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
285 | SCTP_ST_TIMEOUT(timeout_type), | ||
286 | asoc->state, asoc->ep, asoc, | ||
287 | (void *)timeout_type, GFP_ATOMIC); | ||
288 | |||
289 | if (error) | ||
290 | asoc->base.sk->sk_err = -error; | ||
291 | |||
292 | out_unlock: | ||
293 | sctp_bh_unlock_sock(asoc->base.sk); | ||
294 | sctp_association_put(asoc); | ||
295 | } | ||
296 | |||
297 | static void sctp_generate_t1_cookie_event(unsigned long data) | ||
298 | { | ||
299 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
300 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | ||
301 | } | ||
302 | |||
303 | static void sctp_generate_t1_init_event(unsigned long data) | ||
304 | { | ||
305 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
306 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | ||
307 | } | ||
308 | |||
309 | static void sctp_generate_t2_shutdown_event(unsigned long data) | ||
310 | { | ||
311 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
312 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | ||
313 | } | ||
314 | |||
315 | static void sctp_generate_t4_rto_event(unsigned long data) | ||
316 | { | ||
317 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
318 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | ||
319 | } | ||
320 | |||
321 | static void sctp_generate_t5_shutdown_guard_event(unsigned long data) | ||
322 | { | ||
323 | struct sctp_association *asoc = (struct sctp_association *)data; | ||
324 | sctp_generate_timeout_event(asoc, | ||
325 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); | ||
326 | |||
327 | } /* sctp_generate_t5_shutdown_guard_event() */ | ||
328 | |||
329 | static void sctp_generate_autoclose_event(unsigned long data) | ||
330 | { | ||
331 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
332 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | ||
333 | } | ||
334 | |||
335 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | ||
336 | * sure that the transport is still valid. | ||
337 | */ | ||
338 | void sctp_generate_heartbeat_event(unsigned long data) | ||
339 | { | ||
340 | int error = 0; | ||
341 | struct sctp_transport *transport = (struct sctp_transport *) data; | ||
342 | struct sctp_association *asoc = transport->asoc; | ||
343 | |||
344 | sctp_bh_lock_sock(asoc->base.sk); | ||
345 | if (sock_owned_by_user(asoc->base.sk)) { | ||
346 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | ||
347 | |||
348 | /* Try again later. */ | ||
349 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | ||
350 | sctp_transport_hold(transport); | ||
351 | goto out_unlock; | ||
352 | } | ||
353 | |||
354 | /* Is this structure just waiting around for us to actually | ||
355 | * get destroyed? | ||
356 | */ | ||
357 | if (transport->dead) | ||
358 | goto out_unlock; | ||
359 | |||
360 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
361 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | ||
362 | asoc->state, asoc->ep, asoc, | ||
363 | transport, GFP_ATOMIC); | ||
364 | |||
365 | if (error) | ||
366 | asoc->base.sk->sk_err = -error; | ||
367 | |||
368 | out_unlock: | ||
369 | sctp_bh_unlock_sock(asoc->base.sk); | ||
370 | sctp_transport_put(transport); | ||
371 | } | ||
372 | |||
373 | /* Inject a SACK Timeout event into the state machine. */ | ||
374 | static void sctp_generate_sack_event(unsigned long data) | ||
375 | { | ||
376 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
377 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | ||
378 | } | ||
379 | |||
380 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | ||
381 | NULL, | ||
382 | sctp_generate_t1_cookie_event, | ||
383 | sctp_generate_t1_init_event, | ||
384 | sctp_generate_t2_shutdown_event, | ||
385 | NULL, | ||
386 | sctp_generate_t4_rto_event, | ||
387 | sctp_generate_t5_shutdown_guard_event, | ||
388 | sctp_generate_heartbeat_event, | ||
389 | sctp_generate_sack_event, | ||
390 | sctp_generate_autoclose_event, | ||
391 | }; | ||
392 | |||
393 | |||
394 | /* RFC 2960 8.2 Path Failure Detection | ||
395 | * | ||
396 | * When its peer endpoint is multi-homed, an endpoint should keep a | ||
397 | * error counter for each of the destination transport addresses of the | ||
398 | * peer endpoint. | ||
399 | * | ||
400 | * Each time the T3-rtx timer expires on any address, or when a | ||
401 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | ||
402 | * the error counter of that destination address will be incremented. | ||
403 | * When the value in the error counter exceeds the protocol parameter | ||
404 | * 'Path.Max.Retrans' of that destination address, the endpoint should | ||
405 | * mark the destination transport address as inactive, and a | ||
406 | * notification SHOULD be sent to the upper layer. | ||
407 | * | ||
408 | */ | ||
409 | static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | ||
410 | struct sctp_transport *transport) | ||
411 | { | ||
412 | /* The check for association's overall error counter exceeding the | ||
413 | * threshold is done in the state function. | ||
414 | */ | ||
415 | asoc->overall_error_count++; | ||
416 | |||
417 | if (transport->active && | ||
418 | (transport->error_count++ >= transport->max_retrans)) { | ||
419 | SCTP_DEBUG_PRINTK("transport_strike: transport " | ||
420 | "IP:%d.%d.%d.%d failed.\n", | ||
421 | NIPQUAD(transport->ipaddr.v4.sin_addr)); | ||
422 | sctp_assoc_control_transport(asoc, transport, | ||
423 | SCTP_TRANSPORT_DOWN, | ||
424 | SCTP_FAILED_THRESHOLD); | ||
425 | } | ||
426 | |||
427 | /* E2) For the destination address for which the timer | ||
428 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | ||
429 | * maximum value discussed in rule C7 above (RTO.max) may be | ||
430 | * used to provide an upper bound to this doubling operation. | ||
431 | */ | ||
432 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); | ||
433 | } | ||
434 | |||
435 | /* Worker routine to handle INIT command failure. */ | ||
436 | static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | ||
437 | struct sctp_association *asoc, | ||
438 | unsigned error) | ||
439 | { | ||
440 | struct sctp_ulpevent *event; | ||
441 | |||
442 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, | ||
443 | (__u16)error, 0, 0, | ||
444 | GFP_ATOMIC); | ||
445 | |||
446 | if (event) | ||
447 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
448 | SCTP_ULPEVENT(event)); | ||
449 | |||
450 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
451 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
452 | |||
453 | /* SEND_FAILED sent later when cleaning up the association. */ | ||
454 | asoc->outqueue.error = error; | ||
455 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
456 | } | ||
457 | |||
458 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | ||
459 | static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | ||
460 | struct sctp_association *asoc, | ||
461 | sctp_event_t event_type, | ||
462 | sctp_subtype_t subtype, | ||
463 | struct sctp_chunk *chunk, | ||
464 | unsigned error) | ||
465 | { | ||
466 | struct sctp_ulpevent *event; | ||
467 | |||
468 | /* Cancel any partial delivery in progress. */ | ||
469 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | ||
470 | |||
471 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | ||
472 | (__u16)error, 0, 0, | ||
473 | GFP_ATOMIC); | ||
474 | if (event) | ||
475 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
476 | SCTP_ULPEVENT(event)); | ||
477 | |||
478 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
479 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
480 | |||
481 | /* Set sk_err to ECONNRESET on a 1-1 style socket. */ | ||
482 | if (!sctp_style(asoc->base.sk, UDP)) | ||
483 | asoc->base.sk->sk_err = ECONNRESET; | ||
484 | |||
485 | /* SEND_FAILED sent later when cleaning up the association. */ | ||
486 | asoc->outqueue.error = error; | ||
487 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
488 | } | ||
489 | |||
490 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | ||
491 | * inside the cookie. In reality, this is only used for INIT-ACK processing | ||
492 | * since all other cases use "temporary" associations and can do all | ||
493 | * their work in statefuns directly. | ||
494 | */ | ||
495 | static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, | ||
496 | struct sctp_association *asoc, | ||
497 | struct sctp_chunk *chunk, | ||
498 | sctp_init_chunk_t *peer_init, int gfp) | ||
499 | { | ||
500 | int error; | ||
501 | |||
502 | /* We only process the init as a sideeffect in a single | ||
503 | * case. This is when we process the INIT-ACK. If we | ||
504 | * fail during INIT processing (due to malloc problems), | ||
505 | * just return the error and stop processing the stack. | ||
506 | */ | ||
507 | if (!sctp_process_init(asoc, chunk->chunk_hdr->type, | ||
508 | sctp_source(chunk), peer_init, gfp)) | ||
509 | error = -ENOMEM; | ||
510 | else | ||
511 | error = 0; | ||
512 | |||
513 | return error; | ||
514 | } | ||
515 | |||
516 | /* Helper function to break out starting up of heartbeat timers. */ | ||
517 | static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | ||
518 | struct sctp_association *asoc) | ||
519 | { | ||
520 | struct sctp_transport *t; | ||
521 | struct list_head *pos; | ||
522 | |||
523 | /* Start a heartbeat timer for each transport on the association. | ||
524 | * hold a reference on the transport to make sure none of | ||
525 | * the needed data structures go away. | ||
526 | */ | ||
527 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
528 | t = list_entry(pos, struct sctp_transport, transports); | ||
529 | |||
530 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
531 | sctp_transport_hold(t); | ||
532 | } | ||
533 | } | ||
534 | |||
535 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | ||
536 | struct sctp_association *asoc) | ||
537 | { | ||
538 | struct sctp_transport *t; | ||
539 | struct list_head *pos; | ||
540 | |||
541 | /* Stop all heartbeat timers. */ | ||
542 | |||
543 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
544 | t = list_entry(pos, struct sctp_transport, transports); | ||
545 | if (del_timer(&t->hb_timer)) | ||
546 | sctp_transport_put(t); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | /* Helper function to stop any pending T3-RTX timers */ | ||
551 | static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | ||
552 | struct sctp_association *asoc) | ||
553 | { | ||
554 | struct sctp_transport *t; | ||
555 | struct list_head *pos; | ||
556 | |||
557 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
558 | t = list_entry(pos, struct sctp_transport, transports); | ||
559 | if (timer_pending(&t->T3_rtx_timer) && | ||
560 | del_timer(&t->T3_rtx_timer)) { | ||
561 | sctp_transport_put(t); | ||
562 | } | ||
563 | } | ||
564 | } | ||
565 | |||
566 | |||
567 | /* Helper function to update the heartbeat timer. */ | ||
568 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | ||
569 | struct sctp_association *asoc, | ||
570 | struct sctp_transport *t) | ||
571 | { | ||
572 | /* Update the heartbeat timer. */ | ||
573 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
574 | sctp_transport_hold(t); | ||
575 | } | ||
576 | |||
577 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | ||
578 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | ||
579 | struct sctp_association *asoc, | ||
580 | struct sctp_transport *t, | ||
581 | struct sctp_chunk *chunk) | ||
582 | { | ||
583 | sctp_sender_hb_info_t *hbinfo; | ||
584 | |||
585 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | ||
586 | * HEARTBEAT should clear the error counter of the destination | ||
587 | * transport address to which the HEARTBEAT was sent. | ||
588 | * The association's overall error count is also cleared. | ||
589 | */ | ||
590 | t->error_count = 0; | ||
591 | t->asoc->overall_error_count = 0; | ||
592 | |||
593 | /* Mark the destination transport address as active if it is not so | ||
594 | * marked. | ||
595 | */ | ||
596 | if (!t->active) | ||
597 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, | ||
598 | SCTP_HEARTBEAT_SUCCESS); | ||
599 | |||
600 | /* The receiver of the HEARTBEAT ACK should also perform an | ||
601 | * RTT measurement for that destination transport address | ||
602 | * using the time value carried in the HEARTBEAT ACK chunk. | ||
603 | */ | ||
604 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; | ||
605 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | ||
606 | } | ||
607 | |||
608 | /* Helper function to do a transport reset at the expiry of the hearbeat | ||
609 | * timer. | ||
610 | */ | ||
611 | static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds, | ||
612 | struct sctp_association *asoc, | ||
613 | struct sctp_transport *t) | ||
614 | { | ||
615 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | ||
616 | |||
617 | /* Mark one strike against a transport. */ | ||
618 | sctp_do_8_2_transport_strike(asoc, t); | ||
619 | } | ||
620 | |||
621 | /* Helper function to process the process SACK command. */ | ||
622 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | ||
623 | struct sctp_association *asoc, | ||
624 | struct sctp_sackhdr *sackh) | ||
625 | { | ||
626 | int err; | ||
627 | |||
628 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | ||
629 | /* There are no more TSNs awaiting SACK. */ | ||
630 | err = sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
631 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | ||
632 | asoc->state, asoc->ep, asoc, NULL, | ||
633 | GFP_ATOMIC); | ||
634 | } else { | ||
635 | /* Windows may have opened, so we need | ||
636 | * to check if we have DATA to transmit | ||
637 | */ | ||
638 | err = sctp_outq_flush(&asoc->outqueue, 0); | ||
639 | } | ||
640 | |||
641 | return err; | ||
642 | } | ||
643 | |||
644 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | ||
645 | * the transport for a shutdown chunk. | ||
646 | */ | ||
647 | static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, | ||
648 | struct sctp_association *asoc, | ||
649 | struct sctp_chunk *chunk) | ||
650 | { | ||
651 | struct sctp_transport *t; | ||
652 | |||
653 | t = sctp_assoc_choose_shutdown_transport(asoc); | ||
654 | asoc->shutdown_last_sent_to = t; | ||
655 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | ||
656 | chunk->transport = t; | ||
657 | } | ||
658 | |||
659 | /* Helper function to change the state of an association. */ | ||
660 | static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, | ||
661 | struct sctp_association *asoc, | ||
662 | sctp_state_t state) | ||
663 | { | ||
664 | struct sock *sk = asoc->base.sk; | ||
665 | |||
666 | asoc->state = state; | ||
667 | |||
668 | if (sctp_style(sk, TCP)) { | ||
669 | /* Change the sk->sk_state of a TCP-style socket that has | ||
670 | * sucessfully completed a connect() call. | ||
671 | */ | ||
672 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | ||
673 | sk->sk_state = SCTP_SS_ESTABLISHED; | ||
674 | |||
675 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | ||
676 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | ||
677 | sctp_sstate(sk, ESTABLISHED)) | ||
678 | sk->sk_shutdown |= RCV_SHUTDOWN; | ||
679 | } | ||
680 | |||
681 | if (sctp_state(asoc, ESTABLISHED) || | ||
682 | sctp_state(asoc, CLOSED) || | ||
683 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | ||
684 | /* Wake up any processes waiting in the asoc's wait queue in | ||
685 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | ||
686 | */ | ||
687 | if (waitqueue_active(&asoc->wait)) | ||
688 | wake_up_interruptible(&asoc->wait); | ||
689 | |||
690 | /* Wake up any processes waiting in the sk's sleep queue of | ||
691 | * a TCP-style or UDP-style peeled-off socket in | ||
692 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | ||
693 | * For a UDP-style socket, the waiters are woken up by the | ||
694 | * notifications. | ||
695 | */ | ||
696 | if (!sctp_style(sk, UDP)) | ||
697 | sk->sk_state_change(sk); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /* Helper function to delete an association. */ | ||
702 | static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | ||
703 | struct sctp_association *asoc) | ||
704 | { | ||
705 | struct sock *sk = asoc->base.sk; | ||
706 | |||
707 | /* If it is a non-temporary association belonging to a TCP-style | ||
708 | * listening socket that is not closed, do not free it so that accept() | ||
709 | * can pick it up later. | ||
710 | */ | ||
711 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && | ||
712 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | ||
713 | return; | ||
714 | |||
715 | sctp_unhash_established(asoc); | ||
716 | sctp_association_free(asoc); | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * ADDIP Section 4.1 ASCONF Chunk Procedures | ||
721 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | ||
722 | * destination address (we use active path instead of primary path just | ||
723 | * because primary path may be inactive. | ||
724 | */ | ||
725 | static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | ||
726 | struct sctp_association *asoc, | ||
727 | struct sctp_chunk *chunk) | ||
728 | { | ||
729 | struct sctp_transport *t; | ||
730 | |||
731 | t = asoc->peer.active_path; | ||
732 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; | ||
733 | chunk->transport = t; | ||
734 | } | ||
735 | |||
736 | /* Process an incoming Operation Error Chunk. */ | ||
737 | static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, | ||
738 | struct sctp_association *asoc, | ||
739 | struct sctp_chunk *chunk) | ||
740 | { | ||
741 | struct sctp_operr_chunk *operr_chunk; | ||
742 | struct sctp_errhdr *err_hdr; | ||
743 | |||
744 | operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; | ||
745 | err_hdr = &operr_chunk->err_hdr; | ||
746 | |||
747 | switch (err_hdr->cause) { | ||
748 | case SCTP_ERROR_UNKNOWN_CHUNK: | ||
749 | { | ||
750 | struct sctp_chunkhdr *unk_chunk_hdr; | ||
751 | |||
752 | unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; | ||
753 | switch (unk_chunk_hdr->type) { | ||
754 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an | ||
755 | * ERROR chunk reporting that it did not recognized the ASCONF | ||
756 | * chunk type, the sender of the ASCONF MUST NOT send any | ||
757 | * further ASCONF chunks and MUST stop its T-4 timer. | ||
758 | */ | ||
759 | case SCTP_CID_ASCONF: | ||
760 | asoc->peer.asconf_capable = 0; | ||
761 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | ||
762 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
763 | break; | ||
764 | default: | ||
765 | break; | ||
766 | } | ||
767 | break; | ||
768 | } | ||
769 | default: | ||
770 | break; | ||
771 | } | ||
772 | } | ||
773 | |||
774 | /* Process variable FWDTSN chunk information. */ | ||
775 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, | ||
776 | struct sctp_chunk *chunk) | ||
777 | { | ||
778 | struct sctp_fwdtsn_skip *skip; | ||
779 | /* Walk through all the skipped SSNs */ | ||
780 | sctp_walk_fwdtsn(skip, chunk) { | ||
781 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | ||
782 | } | ||
783 | |||
784 | return; | ||
785 | } | ||
786 | |||
787 | /* Helper function to remove the association non-primary peer | ||
788 | * transports. | ||
789 | */ | ||
790 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | ||
791 | { | ||
792 | struct sctp_transport *t; | ||
793 | struct list_head *pos; | ||
794 | struct list_head *temp; | ||
795 | |||
796 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
797 | t = list_entry(pos, struct sctp_transport, transports); | ||
798 | if (!sctp_cmp_addr_exact(&t->ipaddr, | ||
799 | &asoc->peer.primary_addr)) { | ||
800 | sctp_assoc_del_peer(asoc, &t->ipaddr); | ||
801 | } | ||
802 | } | ||
803 | |||
804 | return; | ||
805 | } | ||
806 | |||
807 | /* These three macros allow us to pull the debugging code out of the | ||
808 | * main flow of sctp_do_sm() to keep attention focused on the real | ||
809 | * functionality there. | ||
810 | */ | ||
811 | #define DEBUG_PRE \ | ||
812 | SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \ | ||
813 | "ep %p, %s, %s, asoc %p[%s], %s\n", \ | ||
814 | ep, sctp_evttype_tbl[event_type], \ | ||
815 | (*debug_fn)(subtype), asoc, \ | ||
816 | sctp_state_tbl[state], state_fn->name) | ||
817 | |||
818 | #define DEBUG_POST \ | ||
819 | SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \ | ||
820 | "asoc %p, status: %s\n", \ | ||
821 | asoc, sctp_status_tbl[status]) | ||
822 | |||
823 | #define DEBUG_POST_SFX \ | ||
824 | SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \ | ||
825 | error, asoc, \ | ||
826 | sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | ||
827 | sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED]) | ||
828 | |||
829 | /* | ||
830 | * This is the master state machine processing function. | ||
831 | * | ||
832 | * If you want to understand all of lksctp, this is a | ||
833 | * good place to start. | ||
834 | */ | ||
835 | int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | ||
836 | sctp_state_t state, | ||
837 | struct sctp_endpoint *ep, | ||
838 | struct sctp_association *asoc, | ||
839 | void *event_arg, | ||
840 | int gfp) | ||
841 | { | ||
842 | sctp_cmd_seq_t commands; | ||
843 | const sctp_sm_table_entry_t *state_fn; | ||
844 | sctp_disposition_t status; | ||
845 | int error = 0; | ||
846 | typedef const char *(printfn_t)(sctp_subtype_t); | ||
847 | |||
848 | static printfn_t *table[] = { | ||
849 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | ||
850 | }; | ||
851 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | ||
852 | |||
853 | /* Look up the state function, run it, and then process the | ||
854 | * side effects. These three steps are the heart of lksctp. | ||
855 | */ | ||
856 | state_fn = sctp_sm_lookup_event(event_type, state, subtype); | ||
857 | |||
858 | sctp_init_cmd_seq(&commands); | ||
859 | |||
860 | DEBUG_PRE; | ||
861 | status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands); | ||
862 | DEBUG_POST; | ||
863 | |||
864 | error = sctp_side_effects(event_type, subtype, state, | ||
865 | ep, asoc, event_arg, status, | ||
866 | &commands, gfp); | ||
867 | DEBUG_POST_SFX; | ||
868 | |||
869 | return error; | ||
870 | } | ||
871 | |||
872 | #undef DEBUG_PRE | ||
873 | #undef DEBUG_POST | ||
874 | |||
875 | /***************************************************************** | ||
876 | * This the master state function side effect processing function. | ||
877 | *****************************************************************/ | ||
878 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | ||
879 | sctp_state_t state, | ||
880 | struct sctp_endpoint *ep, | ||
881 | struct sctp_association *asoc, | ||
882 | void *event_arg, | ||
883 | sctp_disposition_t status, | ||
884 | sctp_cmd_seq_t *commands, | ||
885 | int gfp) | ||
886 | { | ||
887 | int error; | ||
888 | |||
889 | /* FIXME - Most of the dispositions left today would be categorized | ||
890 | * as "exceptional" dispositions. For those dispositions, it | ||
891 | * may not be proper to run through any of the commands at all. | ||
892 | * For example, the command interpreter might be run only with | ||
893 | * disposition SCTP_DISPOSITION_CONSUME. | ||
894 | */ | ||
895 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | ||
896 | ep, asoc, | ||
897 | event_arg, status, | ||
898 | commands, gfp))) | ||
899 | goto bail; | ||
900 | |||
901 | switch (status) { | ||
902 | case SCTP_DISPOSITION_DISCARD: | ||
903 | SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, " | ||
904 | "event_type %d, event_id %d\n", | ||
905 | state, event_type, subtype.chunk); | ||
906 | break; | ||
907 | |||
908 | case SCTP_DISPOSITION_NOMEM: | ||
909 | /* We ran out of memory, so we need to discard this | ||
910 | * packet. | ||
911 | */ | ||
912 | /* BUG--we should now recover some memory, probably by | ||
913 | * reneging... | ||
914 | */ | ||
915 | error = -ENOMEM; | ||
916 | break; | ||
917 | |||
918 | case SCTP_DISPOSITION_DELETE_TCB: | ||
919 | /* This should now be a command. */ | ||
920 | break; | ||
921 | |||
922 | case SCTP_DISPOSITION_CONSUME: | ||
923 | case SCTP_DISPOSITION_ABORT: | ||
924 | /* | ||
925 | * We should no longer have much work to do here as the | ||
926 | * real work has been done as explicit commands above. | ||
927 | */ | ||
928 | break; | ||
929 | |||
930 | case SCTP_DISPOSITION_VIOLATION: | ||
931 | printk(KERN_ERR "sctp protocol violation state %d " | ||
932 | "chunkid %d\n", state, subtype.chunk); | ||
933 | break; | ||
934 | |||
935 | case SCTP_DISPOSITION_NOT_IMPL: | ||
936 | printk(KERN_WARNING "sctp unimplemented feature in state %d, " | ||
937 | "event_type %d, event_id %d\n", | ||
938 | state, event_type, subtype.chunk); | ||
939 | break; | ||
940 | |||
941 | case SCTP_DISPOSITION_BUG: | ||
942 | printk(KERN_ERR "sctp bug in state %d, " | ||
943 | "event_type %d, event_id %d\n", | ||
944 | state, event_type, subtype.chunk); | ||
945 | BUG(); | ||
946 | break; | ||
947 | |||
948 | default: | ||
949 | printk(KERN_ERR "sctp impossible disposition %d " | ||
950 | "in state %d, event_type %d, event_id %d\n", | ||
951 | status, state, event_type, subtype.chunk); | ||
952 | BUG(); | ||
953 | break; | ||
954 | }; | ||
955 | |||
956 | bail: | ||
957 | return error; | ||
958 | } | ||
959 | |||
960 | /******************************************************************** | ||
961 | * 2nd Level Abstractions | ||
962 | ********************************************************************/ | ||
963 | |||
964 | /* This is the side-effect interpreter. */ | ||
965 | static int sctp_cmd_interpreter(sctp_event_t event_type, | ||
966 | sctp_subtype_t subtype, | ||
967 | sctp_state_t state, | ||
968 | struct sctp_endpoint *ep, | ||
969 | struct sctp_association *asoc, | ||
970 | void *event_arg, | ||
971 | sctp_disposition_t status, | ||
972 | sctp_cmd_seq_t *commands, | ||
973 | int gfp) | ||
974 | { | ||
975 | int error = 0; | ||
976 | int force; | ||
977 | sctp_cmd_t *cmd; | ||
978 | struct sctp_chunk *new_obj; | ||
979 | struct sctp_chunk *chunk = NULL; | ||
980 | struct sctp_packet *packet; | ||
981 | struct list_head *pos; | ||
982 | struct timer_list *timer; | ||
983 | unsigned long timeout; | ||
984 | struct sctp_transport *t; | ||
985 | struct sctp_sackhdr sackh; | ||
986 | int local_cork = 0; | ||
987 | |||
988 | if (SCTP_EVENT_T_TIMEOUT != event_type) | ||
989 | chunk = (struct sctp_chunk *) event_arg; | ||
990 | |||
991 | /* Note: This whole file is a huge candidate for rework. | ||
992 | * For example, each command could either have its own handler, so | ||
993 | * the loop would look like: | ||
994 | * while (cmds) | ||
995 | * cmd->handle(x, y, z) | ||
996 | * --jgrimm | ||
997 | */ | ||
998 | while (NULL != (cmd = sctp_next_cmd(commands))) { | ||
999 | switch (cmd->verb) { | ||
1000 | case SCTP_CMD_NOP: | ||
1001 | /* Do nothing. */ | ||
1002 | break; | ||
1003 | |||
1004 | case SCTP_CMD_NEW_ASOC: | ||
1005 | /* Register a new association. */ | ||
1006 | if (local_cork) { | ||
1007 | sctp_outq_uncork(&asoc->outqueue); | ||
1008 | local_cork = 0; | ||
1009 | } | ||
1010 | asoc = cmd->obj.ptr; | ||
1011 | /* Register with the endpoint. */ | ||
1012 | sctp_endpoint_add_asoc(ep, asoc); | ||
1013 | sctp_hash_established(asoc); | ||
1014 | break; | ||
1015 | |||
1016 | case SCTP_CMD_UPDATE_ASSOC: | ||
1017 | sctp_assoc_update(asoc, cmd->obj.ptr); | ||
1018 | break; | ||
1019 | |||
1020 | case SCTP_CMD_PURGE_OUTQUEUE: | ||
1021 | sctp_outq_teardown(&asoc->outqueue); | ||
1022 | break; | ||
1023 | |||
1024 | case SCTP_CMD_DELETE_TCB: | ||
1025 | if (local_cork) { | ||
1026 | sctp_outq_uncork(&asoc->outqueue); | ||
1027 | local_cork = 0; | ||
1028 | } | ||
1029 | /* Delete the current association. */ | ||
1030 | sctp_cmd_delete_tcb(commands, asoc); | ||
1031 | asoc = NULL; | ||
1032 | break; | ||
1033 | |||
1034 | case SCTP_CMD_NEW_STATE: | ||
1035 | /* Enter a new state. */ | ||
1036 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | ||
1037 | break; | ||
1038 | |||
1039 | case SCTP_CMD_REPORT_TSN: | ||
1040 | /* Record the arrival of a TSN. */ | ||
1041 | sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32); | ||
1042 | break; | ||
1043 | |||
1044 | case SCTP_CMD_REPORT_FWDTSN: | ||
1045 | /* Move the Cumulattive TSN Ack ahead. */ | ||
1046 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | ||
1047 | |||
1048 | /* Abort any in progress partial delivery. */ | ||
1049 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | ||
1050 | break; | ||
1051 | |||
1052 | case SCTP_CMD_PROCESS_FWDTSN: | ||
1053 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); | ||
1054 | break; | ||
1055 | |||
1056 | case SCTP_CMD_GEN_SACK: | ||
1057 | /* Generate a Selective ACK. | ||
1058 | * The argument tells us whether to just count | ||
1059 | * the packet and MAYBE generate a SACK, or | ||
1060 | * force a SACK out. | ||
1061 | */ | ||
1062 | force = cmd->obj.i32; | ||
1063 | error = sctp_gen_sack(asoc, force, commands); | ||
1064 | break; | ||
1065 | |||
1066 | case SCTP_CMD_PROCESS_SACK: | ||
1067 | /* Process an inbound SACK. */ | ||
1068 | error = sctp_cmd_process_sack(commands, asoc, | ||
1069 | cmd->obj.ptr); | ||
1070 | break; | ||
1071 | |||
1072 | case SCTP_CMD_GEN_INIT_ACK: | ||
1073 | /* Generate an INIT ACK chunk. */ | ||
1074 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | ||
1075 | 0); | ||
1076 | if (!new_obj) | ||
1077 | goto nomem; | ||
1078 | |||
1079 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1080 | SCTP_CHUNK(new_obj)); | ||
1081 | break; | ||
1082 | |||
1083 | case SCTP_CMD_PEER_INIT: | ||
1084 | /* Process a unified INIT from the peer. | ||
1085 | * Note: Only used during INIT-ACK processing. If | ||
1086 | * there is an error just return to the outter | ||
1087 | * layer which will bail. | ||
1088 | */ | ||
1089 | error = sctp_cmd_process_init(commands, asoc, chunk, | ||
1090 | cmd->obj.ptr, gfp); | ||
1091 | break; | ||
1092 | |||
1093 | case SCTP_CMD_GEN_COOKIE_ECHO: | ||
1094 | /* Generate a COOKIE ECHO chunk. */ | ||
1095 | new_obj = sctp_make_cookie_echo(asoc, chunk); | ||
1096 | if (!new_obj) { | ||
1097 | if (cmd->obj.ptr) | ||
1098 | sctp_chunk_free(cmd->obj.ptr); | ||
1099 | goto nomem; | ||
1100 | } | ||
1101 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1102 | SCTP_CHUNK(new_obj)); | ||
1103 | |||
1104 | /* If there is an ERROR chunk to be sent along with | ||
1105 | * the COOKIE_ECHO, send it, too. | ||
1106 | */ | ||
1107 | if (cmd->obj.ptr) | ||
1108 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1109 | SCTP_CHUNK(cmd->obj.ptr)); | ||
1110 | |||
1111 | /* FIXME - Eventually come up with a cleaner way to | ||
1112 | * enabling COOKIE-ECHO + DATA bundling during | ||
1113 | * multihoming stale cookie scenarios, the following | ||
1114 | * command plays with asoc->peer.retran_path to | ||
1115 | * avoid the problem of sending the COOKIE-ECHO and | ||
1116 | * DATA in different paths, which could result | ||
1117 | * in the association being ABORTed if the DATA chunk | ||
1118 | * is processed first by the server. Checking the | ||
1119 | * init error counter simply causes this command | ||
1120 | * to be executed only during failed attempts of | ||
1121 | * association establishment. | ||
1122 | */ | ||
1123 | if ((asoc->peer.retran_path != | ||
1124 | asoc->peer.primary_path) && | ||
1125 | (asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) { | ||
1126 | sctp_add_cmd_sf(commands, | ||
1127 | SCTP_CMD_FORCE_PRIM_RETRAN, | ||
1128 | SCTP_NULL()); | ||
1129 | } | ||
1130 | |||
1131 | break; | ||
1132 | |||
1133 | case SCTP_CMD_GEN_SHUTDOWN: | ||
1134 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | ||
1135 | * Reset error counts. | ||
1136 | */ | ||
1137 | asoc->overall_error_count = 0; | ||
1138 | |||
1139 | /* Generate a SHUTDOWN chunk. */ | ||
1140 | new_obj = sctp_make_shutdown(asoc, chunk); | ||
1141 | if (!new_obj) | ||
1142 | goto nomem; | ||
1143 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1144 | SCTP_CHUNK(new_obj)); | ||
1145 | break; | ||
1146 | |||
1147 | case SCTP_CMD_CHUNK_ULP: | ||
1148 | /* Send a chunk to the sockets layer. */ | ||
1149 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | ||
1150 | "chunk_up:", cmd->obj.ptr, | ||
1151 | "ulpq:", &asoc->ulpq); | ||
1152 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, | ||
1153 | GFP_ATOMIC); | ||
1154 | break; | ||
1155 | |||
1156 | case SCTP_CMD_EVENT_ULP: | ||
1157 | /* Send a notification to the sockets layer. */ | ||
1158 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | ||
1159 | "event_up:",cmd->obj.ptr, | ||
1160 | "ulpq:",&asoc->ulpq); | ||
1161 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); | ||
1162 | break; | ||
1163 | |||
1164 | case SCTP_CMD_REPLY: | ||
1165 | /* If an caller has not already corked, do cork. */ | ||
1166 | if (!asoc->outqueue.cork) { | ||
1167 | sctp_outq_cork(&asoc->outqueue); | ||
1168 | local_cork = 1; | ||
1169 | } | ||
1170 | /* Send a chunk to our peer. */ | ||
1171 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); | ||
1172 | break; | ||
1173 | |||
1174 | case SCTP_CMD_SEND_PKT: | ||
1175 | /* Send a full packet to our peer. */ | ||
1176 | packet = cmd->obj.ptr; | ||
1177 | sctp_packet_transmit(packet); | ||
1178 | sctp_ootb_pkt_free(packet); | ||
1179 | break; | ||
1180 | |||
1181 | case SCTP_CMD_RETRAN: | ||
1182 | /* Mark a transport for retransmission. */ | ||
1183 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | ||
1184 | SCTP_RTXR_T3_RTX); | ||
1185 | break; | ||
1186 | |||
1187 | case SCTP_CMD_TRANSMIT: | ||
1188 | /* Kick start transmission. */ | ||
1189 | error = sctp_outq_uncork(&asoc->outqueue); | ||
1190 | local_cork = 0; | ||
1191 | break; | ||
1192 | |||
1193 | case SCTP_CMD_ECN_CE: | ||
1194 | /* Do delayed CE processing. */ | ||
1195 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | ||
1196 | break; | ||
1197 | |||
1198 | case SCTP_CMD_ECN_ECNE: | ||
1199 | /* Do delayed ECNE processing. */ | ||
1200 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | ||
1201 | chunk); | ||
1202 | if (new_obj) | ||
1203 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1204 | SCTP_CHUNK(new_obj)); | ||
1205 | break; | ||
1206 | |||
1207 | case SCTP_CMD_ECN_CWR: | ||
1208 | /* Do delayed CWR processing. */ | ||
1209 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | ||
1210 | break; | ||
1211 | |||
1212 | case SCTP_CMD_SETUP_T2: | ||
1213 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | ||
1214 | break; | ||
1215 | |||
1216 | case SCTP_CMD_TIMER_START: | ||
1217 | timer = &asoc->timers[cmd->obj.to]; | ||
1218 | timeout = asoc->timeouts[cmd->obj.to]; | ||
1219 | if (!timeout) | ||
1220 | BUG(); | ||
1221 | |||
1222 | timer->expires = jiffies + timeout; | ||
1223 | sctp_association_hold(asoc); | ||
1224 | add_timer(timer); | ||
1225 | break; | ||
1226 | |||
1227 | case SCTP_CMD_TIMER_RESTART: | ||
1228 | timer = &asoc->timers[cmd->obj.to]; | ||
1229 | timeout = asoc->timeouts[cmd->obj.to]; | ||
1230 | if (!mod_timer(timer, jiffies + timeout)) | ||
1231 | sctp_association_hold(asoc); | ||
1232 | break; | ||
1233 | |||
1234 | case SCTP_CMD_TIMER_STOP: | ||
1235 | timer = &asoc->timers[cmd->obj.to]; | ||
1236 | if (timer_pending(timer) && del_timer(timer)) | ||
1237 | sctp_association_put(asoc); | ||
1238 | break; | ||
1239 | |||
1240 | case SCTP_CMD_INIT_RESTART: | ||
1241 | /* Do the needed accounting and updates | ||
1242 | * associated with restarting an initialization | ||
1243 | * timer. | ||
1244 | */ | ||
1245 | asoc->counters[SCTP_COUNTER_INIT_ERROR]++; | ||
1246 | asoc->timeouts[cmd->obj.to] *= 2; | ||
1247 | if (asoc->timeouts[cmd->obj.to] > | ||
1248 | asoc->max_init_timeo) { | ||
1249 | asoc->timeouts[cmd->obj.to] = | ||
1250 | asoc->max_init_timeo; | ||
1251 | } | ||
1252 | |||
1253 | /* If we've sent any data bundled with | ||
1254 | * COOKIE-ECHO we need to resend. | ||
1255 | */ | ||
1256 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
1257 | t = list_entry(pos, struct sctp_transport, | ||
1258 | transports); | ||
1259 | sctp_retransmit_mark(&asoc->outqueue, t, 0); | ||
1260 | } | ||
1261 | |||
1262 | sctp_add_cmd_sf(commands, | ||
1263 | SCTP_CMD_TIMER_RESTART, | ||
1264 | SCTP_TO(cmd->obj.to)); | ||
1265 | break; | ||
1266 | |||
1267 | case SCTP_CMD_INIT_FAILED: | ||
1268 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); | ||
1269 | break; | ||
1270 | |||
1271 | case SCTP_CMD_ASSOC_FAILED: | ||
1272 | sctp_cmd_assoc_failed(commands, asoc, event_type, | ||
1273 | subtype, chunk, cmd->obj.u32); | ||
1274 | break; | ||
1275 | |||
1276 | case SCTP_CMD_COUNTER_INC: | ||
1277 | asoc->counters[cmd->obj.counter]++; | ||
1278 | break; | ||
1279 | |||
1280 | case SCTP_CMD_COUNTER_RESET: | ||
1281 | asoc->counters[cmd->obj.counter] = 0; | ||
1282 | break; | ||
1283 | |||
1284 | case SCTP_CMD_REPORT_DUP: | ||
1285 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | ||
1286 | cmd->obj.u32); | ||
1287 | break; | ||
1288 | |||
1289 | case SCTP_CMD_REPORT_BAD_TAG: | ||
1290 | SCTP_DEBUG_PRINTK("vtag mismatch!\n"); | ||
1291 | break; | ||
1292 | |||
1293 | case SCTP_CMD_STRIKE: | ||
1294 | /* Mark one strike against a transport. */ | ||
1295 | sctp_do_8_2_transport_strike(asoc, cmd->obj.transport); | ||
1296 | break; | ||
1297 | |||
1298 | case SCTP_CMD_TRANSPORT_RESET: | ||
1299 | t = cmd->obj.transport; | ||
1300 | sctp_cmd_transport_reset(commands, asoc, t); | ||
1301 | break; | ||
1302 | |||
1303 | case SCTP_CMD_TRANSPORT_ON: | ||
1304 | t = cmd->obj.transport; | ||
1305 | sctp_cmd_transport_on(commands, asoc, t, chunk); | ||
1306 | break; | ||
1307 | |||
1308 | case SCTP_CMD_HB_TIMERS_START: | ||
1309 | sctp_cmd_hb_timers_start(commands, asoc); | ||
1310 | break; | ||
1311 | |||
1312 | case SCTP_CMD_HB_TIMER_UPDATE: | ||
1313 | t = cmd->obj.transport; | ||
1314 | sctp_cmd_hb_timer_update(commands, asoc, t); | ||
1315 | break; | ||
1316 | |||
1317 | case SCTP_CMD_HB_TIMERS_STOP: | ||
1318 | sctp_cmd_hb_timers_stop(commands, asoc); | ||
1319 | break; | ||
1320 | |||
1321 | case SCTP_CMD_REPORT_ERROR: | ||
1322 | error = cmd->obj.error; | ||
1323 | break; | ||
1324 | |||
1325 | case SCTP_CMD_PROCESS_CTSN: | ||
1326 | /* Dummy up a SACK for processing. */ | ||
1327 | sackh.cum_tsn_ack = cmd->obj.u32; | ||
1328 | sackh.a_rwnd = 0; | ||
1329 | sackh.num_gap_ack_blocks = 0; | ||
1330 | sackh.num_dup_tsns = 0; | ||
1331 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | ||
1332 | SCTP_SACKH(&sackh)); | ||
1333 | break; | ||
1334 | |||
1335 | case SCTP_CMD_DISCARD_PACKET: | ||
1336 | /* We need to discard the whole packet. */ | ||
1337 | chunk->pdiscard = 1; | ||
1338 | break; | ||
1339 | |||
1340 | case SCTP_CMD_RTO_PENDING: | ||
1341 | t = cmd->obj.transport; | ||
1342 | t->rto_pending = 1; | ||
1343 | break; | ||
1344 | |||
1345 | case SCTP_CMD_PART_DELIVER: | ||
1346 | sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, | ||
1347 | GFP_ATOMIC); | ||
1348 | break; | ||
1349 | |||
1350 | case SCTP_CMD_RENEGE: | ||
1351 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, | ||
1352 | GFP_ATOMIC); | ||
1353 | break; | ||
1354 | |||
1355 | case SCTP_CMD_SETUP_T4: | ||
1356 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); | ||
1357 | break; | ||
1358 | |||
1359 | case SCTP_CMD_PROCESS_OPERR: | ||
1360 | sctp_cmd_process_operr(commands, asoc, chunk); | ||
1361 | break; | ||
1362 | case SCTP_CMD_CLEAR_INIT_TAG: | ||
1363 | asoc->peer.i.init_tag = 0; | ||
1364 | break; | ||
1365 | case SCTP_CMD_DEL_NON_PRIMARY: | ||
1366 | sctp_cmd_del_non_primary(asoc); | ||
1367 | break; | ||
1368 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | ||
1369 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | ||
1370 | break; | ||
1371 | case SCTP_CMD_FORCE_PRIM_RETRAN: | ||
1372 | t = asoc->peer.retran_path; | ||
1373 | asoc->peer.retran_path = asoc->peer.primary_path; | ||
1374 | error = sctp_outq_uncork(&asoc->outqueue); | ||
1375 | local_cork = 0; | ||
1376 | asoc->peer.retran_path = t; | ||
1377 | break; | ||
1378 | default: | ||
1379 | printk(KERN_WARNING "Impossible command: %u, %p\n", | ||
1380 | cmd->verb, cmd->obj.ptr); | ||
1381 | break; | ||
1382 | }; | ||
1383 | if (error) | ||
1384 | break; | ||
1385 | } | ||
1386 | |||
1387 | out: | ||
1388 | if (local_cork) | ||
1389 | sctp_outq_uncork(&asoc->outqueue); | ||
1390 | return error; | ||
1391 | nomem: | ||
1392 | error = -ENOMEM; | ||
1393 | goto out; | ||
1394 | } | ||
1395 | |||
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c new file mode 100644 index 000000000000..278c56a2d076 --- /dev/null +++ b/net/sctp/sm_statefuns.c | |||
@@ -0,0 +1,5238 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001-2002 Intel Corp. | ||
6 | * Copyright (c) 2002 Nokia Corp. | ||
7 | * | ||
8 | * This file is part of the SCTP kernel reference Implementation | ||
9 | * | ||
10 | * This is part of the SCTP Linux Kernel Reference Implementation. | ||
11 | * | ||
12 | * These are the state functions for the state machine. | ||
13 | * | ||
14 | * The SCTP reference implementation is free software; | ||
15 | * you can redistribute it and/or modify it under the terms of | ||
16 | * the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2, or (at your option) | ||
18 | * any later version. | ||
19 | * | ||
20 | * The SCTP reference implementation is distributed in the hope that it | ||
21 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
22 | * ************************ | ||
23 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
24 | * See the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with GNU CC; see the file COPYING. If not, write to | ||
28 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
29 | * Boston, MA 02111-1307, USA. | ||
30 | * | ||
31 | * Please send any bug reports or fixes you make to the | ||
32 | * email address(es): | ||
33 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
34 | * | ||
35 | * Or submit a bug report through the following website: | ||
36 | * http://www.sf.net/projects/lksctp | ||
37 | * | ||
38 | * Written or modified by: | ||
39 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
40 | * Karl Knutson <karl@athena.chicago.il.us> | ||
41 | * Mathew Kotowsky <kotowsky@sctp.org> | ||
42 | * Sridhar Samudrala <samudrala@us.ibm.com> | ||
43 | * Jon Grimm <jgrimm@us.ibm.com> | ||
44 | * Hui Huang <hui.huang@nokia.com> | ||
45 | * Dajiang Zhang <dajiang.zhang@nokia.com> | ||
46 | * Daisy Chang <daisyc@us.ibm.com> | ||
47 | * Ardelle Fan <ardelle.fan@intel.com> | ||
48 | * Ryan Layer <rmlayer@us.ibm.com> | ||
49 | * Kevin Gao <kevin.gao@intel.com> | ||
50 | * | ||
51 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
52 | * be incorporated into the next SCTP release. | ||
53 | */ | ||
54 | |||
55 | #include <linux/types.h> | ||
56 | #include <linux/kernel.h> | ||
57 | #include <linux/ip.h> | ||
58 | #include <linux/ipv6.h> | ||
59 | #include <linux/net.h> | ||
60 | #include <linux/inet.h> | ||
61 | #include <net/sock.h> | ||
62 | #include <net/inet_ecn.h> | ||
63 | #include <linux/skbuff.h> | ||
64 | #include <net/sctp/sctp.h> | ||
65 | #include <net/sctp/sm.h> | ||
66 | #include <net/sctp/structs.h> | ||
67 | |||
68 | static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep, | ||
69 | const struct sctp_association *asoc, | ||
70 | struct sctp_chunk *chunk, | ||
71 | const void *payload, | ||
72 | size_t paylen); | ||
73 | static int sctp_eat_data(const struct sctp_association *asoc, | ||
74 | struct sctp_chunk *chunk, | ||
75 | sctp_cmd_seq_t *commands); | ||
76 | static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc, | ||
77 | const struct sctp_chunk *chunk); | ||
78 | static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep, | ||
79 | const struct sctp_association *asoc, | ||
80 | const struct sctp_chunk *chunk, | ||
81 | sctp_cmd_seq_t *commands, | ||
82 | struct sctp_chunk *err_chunk); | ||
83 | static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | ||
84 | const struct sctp_association *asoc, | ||
85 | const sctp_subtype_t type, | ||
86 | void *arg, | ||
87 | sctp_cmd_seq_t *commands); | ||
88 | static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | ||
89 | const struct sctp_association *asoc, | ||
90 | const sctp_subtype_t type, | ||
91 | void *arg, | ||
92 | sctp_cmd_seq_t *commands); | ||
93 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk); | ||
94 | |||
95 | |||
96 | /* Small helper function that checks if the chunk length | ||
97 | * is of the appropriate length. The 'required_length' argument | ||
98 | * is set to be the size of a specific chunk we are testing. | ||
99 | * Return Values: 1 = Valid length | ||
100 | * 0 = Invalid length | ||
101 | * | ||
102 | */ | ||
103 | static inline int | ||
104 | sctp_chunk_length_valid(struct sctp_chunk *chunk, | ||
105 | __u16 required_length) | ||
106 | { | ||
107 | __u16 chunk_length = ntohs(chunk->chunk_hdr->length); | ||
108 | |||
109 | if (unlikely(chunk_length < required_length)) | ||
110 | return 0; | ||
111 | |||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | /********************************************************** | ||
116 | * These are the state functions for handling chunk events. | ||
117 | **********************************************************/ | ||
118 | |||
119 | /* | ||
120 | * Process the final SHUTDOWN COMPLETE. | ||
121 | * | ||
122 | * Section: 4 (C) (diagram), 9.2 | ||
123 | * Upon reception of the SHUTDOWN COMPLETE chunk the endpoint will verify | ||
124 | * that it is in SHUTDOWN-ACK-SENT state, if it is not the chunk should be | ||
125 | * discarded. If the endpoint is in the SHUTDOWN-ACK-SENT state the endpoint | ||
126 | * should stop the T2-shutdown timer and remove all knowledge of the | ||
127 | * association (and thus the association enters the CLOSED state). | ||
128 | * | ||
129 | * Verification Tag: 8.5.1(C) | ||
130 | * C) Rules for packet carrying SHUTDOWN COMPLETE: | ||
131 | * ... | ||
132 | * - The receiver of a SHUTDOWN COMPLETE shall accept the packet if the | ||
133 | * Verification Tag field of the packet matches its own tag OR it is | ||
134 | * set to its peer's tag and the T bit is set in the Chunk Flags. | ||
135 | * Otherwise, the receiver MUST silently discard the packet and take | ||
136 | * no further action. An endpoint MUST ignore the SHUTDOWN COMPLETE if | ||
137 | * it is not in the SHUTDOWN-ACK-SENT state. | ||
138 | * | ||
139 | * Inputs | ||
140 | * (endpoint, asoc, chunk) | ||
141 | * | ||
142 | * Outputs | ||
143 | * (asoc, reply_msg, msg_up, timers, counters) | ||
144 | * | ||
145 | * The return value is the disposition of the chunk. | ||
146 | */ | ||
147 | sctp_disposition_t sctp_sf_do_4_C(const struct sctp_endpoint *ep, | ||
148 | const struct sctp_association *asoc, | ||
149 | const sctp_subtype_t type, | ||
150 | void *arg, | ||
151 | sctp_cmd_seq_t *commands) | ||
152 | { | ||
153 | struct sctp_chunk *chunk = arg; | ||
154 | struct sctp_ulpevent *ev; | ||
155 | |||
156 | /* RFC 2960 6.10 Bundling | ||
157 | * | ||
158 | * An endpoint MUST NOT bundle INIT, INIT ACK or | ||
159 | * SHUTDOWN COMPLETE with any other chunks. | ||
160 | */ | ||
161 | if (!chunk->singleton) | ||
162 | return SCTP_DISPOSITION_VIOLATION; | ||
163 | |||
164 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
165 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
166 | |||
167 | /* RFC 2960 10.2 SCTP-to-ULP | ||
168 | * | ||
169 | * H) SHUTDOWN COMPLETE notification | ||
170 | * | ||
171 | * When SCTP completes the shutdown procedures (section 9.2) this | ||
172 | * notification is passed to the upper layer. | ||
173 | */ | ||
174 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, | ||
175 | 0, 0, 0, GFP_ATOMIC); | ||
176 | if (!ev) | ||
177 | goto nomem; | ||
178 | |||
179 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
180 | |||
181 | /* Upon reception of the SHUTDOWN COMPLETE chunk the endpoint | ||
182 | * will verify that it is in SHUTDOWN-ACK-SENT state, if it is | ||
183 | * not the chunk should be discarded. If the endpoint is in | ||
184 | * the SHUTDOWN-ACK-SENT state the endpoint should stop the | ||
185 | * T2-shutdown timer and remove all knowledge of the | ||
186 | * association (and thus the association enters the CLOSED | ||
187 | * state). | ||
188 | */ | ||
189 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
190 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
191 | |||
192 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
193 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
194 | |||
195 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
196 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
197 | |||
198 | SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); | ||
199 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
200 | |||
201 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
202 | |||
203 | return SCTP_DISPOSITION_DELETE_TCB; | ||
204 | |||
205 | nomem: | ||
206 | return SCTP_DISPOSITION_NOMEM; | ||
207 | } | ||
208 | |||
209 | /* | ||
210 | * Respond to a normal INIT chunk. | ||
211 | * We are the side that is being asked for an association. | ||
212 | * | ||
213 | * Section: 5.1 Normal Establishment of an Association, B | ||
214 | * B) "Z" shall respond immediately with an INIT ACK chunk. The | ||
215 | * destination IP address of the INIT ACK MUST be set to the source | ||
216 | * IP address of the INIT to which this INIT ACK is responding. In | ||
217 | * the response, besides filling in other parameters, "Z" must set the | ||
218 | * Verification Tag field to Tag_A, and also provide its own | ||
219 | * Verification Tag (Tag_Z) in the Initiate Tag field. | ||
220 | * | ||
221 | * Verification Tag: Must be 0. | ||
222 | * | ||
223 | * Inputs | ||
224 | * (endpoint, asoc, chunk) | ||
225 | * | ||
226 | * Outputs | ||
227 | * (asoc, reply_msg, msg_up, timers, counters) | ||
228 | * | ||
229 | * The return value is the disposition of the chunk. | ||
230 | */ | ||
231 | sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep, | ||
232 | const struct sctp_association *asoc, | ||
233 | const sctp_subtype_t type, | ||
234 | void *arg, | ||
235 | sctp_cmd_seq_t *commands) | ||
236 | { | ||
237 | struct sctp_chunk *chunk = arg; | ||
238 | struct sctp_chunk *repl; | ||
239 | struct sctp_association *new_asoc; | ||
240 | struct sctp_chunk *err_chunk; | ||
241 | struct sctp_packet *packet; | ||
242 | sctp_unrecognized_param_t *unk_param; | ||
243 | struct sock *sk; | ||
244 | int len; | ||
245 | |||
246 | /* 6.10 Bundling | ||
247 | * An endpoint MUST NOT bundle INIT, INIT ACK or | ||
248 | * SHUTDOWN COMPLETE with any other chunks. | ||
249 | * | ||
250 | * IG Section 2.11.2 | ||
251 | * Furthermore, we require that the receiver of an INIT chunk MUST | ||
252 | * enforce these rules by silently discarding an arriving packet | ||
253 | * with an INIT chunk that is bundled with other chunks. | ||
254 | */ | ||
255 | if (!chunk->singleton) | ||
256 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
257 | |||
258 | /* If the packet is an OOTB packet which is temporarily on the | ||
259 | * control endpoint, respond with an ABORT. | ||
260 | */ | ||
261 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | ||
262 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
263 | |||
264 | sk = ep->base.sk; | ||
265 | /* If the endpoint is not listening or if the number of associations | ||
266 | * on the TCP-style socket exceed the max backlog, respond with an | ||
267 | * ABORT. | ||
268 | */ | ||
269 | if (!sctp_sstate(sk, LISTENING) || | ||
270 | (sctp_style(sk, TCP) && | ||
271 | sk_acceptq_is_full(sk))) | ||
272 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
273 | |||
274 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification | ||
275 | * Tag. | ||
276 | */ | ||
277 | if (chunk->sctp_hdr->vtag != 0) | ||
278 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
279 | |||
280 | /* Make sure that the INIT chunk has a valid length. | ||
281 | * Normally, this would cause an ABORT with a Protocol Violation | ||
282 | * error, but since we don't have an association, we'll | ||
283 | * just discard the packet. | ||
284 | */ | ||
285 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) | ||
286 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
287 | |||
288 | /* Verify the INIT chunk before processing it. */ | ||
289 | err_chunk = NULL; | ||
290 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, | ||
291 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | ||
292 | &err_chunk)) { | ||
293 | /* This chunk contains fatal error. It is to be discarded. | ||
294 | * Send an ABORT, with causes if there is any. | ||
295 | */ | ||
296 | if (err_chunk) { | ||
297 | packet = sctp_abort_pkt_new(ep, asoc, arg, | ||
298 | (__u8 *)(err_chunk->chunk_hdr) + | ||
299 | sizeof(sctp_chunkhdr_t), | ||
300 | ntohs(err_chunk->chunk_hdr->length) - | ||
301 | sizeof(sctp_chunkhdr_t)); | ||
302 | |||
303 | sctp_chunk_free(err_chunk); | ||
304 | |||
305 | if (packet) { | ||
306 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
307 | SCTP_PACKET(packet)); | ||
308 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
309 | return SCTP_DISPOSITION_CONSUME; | ||
310 | } else { | ||
311 | return SCTP_DISPOSITION_NOMEM; | ||
312 | } | ||
313 | } else { | ||
314 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, | ||
315 | commands); | ||
316 | } | ||
317 | } | ||
318 | |||
319 | /* Grab the INIT header. */ | ||
320 | chunk->subh.init_hdr = (sctp_inithdr_t *)chunk->skb->data; | ||
321 | |||
322 | /* Tag the variable length parameters. */ | ||
323 | chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); | ||
324 | |||
325 | new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); | ||
326 | if (!new_asoc) | ||
327 | goto nomem; | ||
328 | |||
329 | /* The call, sctp_process_init(), can fail on memory allocation. */ | ||
330 | if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, | ||
331 | sctp_source(chunk), | ||
332 | (sctp_init_chunk_t *)chunk->chunk_hdr, | ||
333 | GFP_ATOMIC)) | ||
334 | goto nomem_init; | ||
335 | |||
336 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | ||
337 | |||
338 | /* B) "Z" shall respond immediately with an INIT ACK chunk. */ | ||
339 | |||
340 | /* If there are errors need to be reported for unknown parameters, | ||
341 | * make sure to reserve enough room in the INIT ACK for them. | ||
342 | */ | ||
343 | len = 0; | ||
344 | if (err_chunk) | ||
345 | len = ntohs(err_chunk->chunk_hdr->length) - | ||
346 | sizeof(sctp_chunkhdr_t); | ||
347 | |||
348 | if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) | ||
349 | goto nomem_ack; | ||
350 | |||
351 | repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); | ||
352 | if (!repl) | ||
353 | goto nomem_ack; | ||
354 | |||
355 | /* If there are errors need to be reported for unknown parameters, | ||
356 | * include them in the outgoing INIT ACK as "Unrecognized parameter" | ||
357 | * parameter. | ||
358 | */ | ||
359 | if (err_chunk) { | ||
360 | /* Get the "Unrecognized parameter" parameter(s) out of the | ||
361 | * ERROR chunk generated by sctp_verify_init(). Since the | ||
362 | * error cause code for "unknown parameter" and the | ||
363 | * "Unrecognized parameter" type is the same, we can | ||
364 | * construct the parameters in INIT ACK by copying the | ||
365 | * ERROR causes over. | ||
366 | */ | ||
367 | unk_param = (sctp_unrecognized_param_t *) | ||
368 | ((__u8 *)(err_chunk->chunk_hdr) + | ||
369 | sizeof(sctp_chunkhdr_t)); | ||
370 | /* Replace the cause code with the "Unrecognized parameter" | ||
371 | * parameter type. | ||
372 | */ | ||
373 | sctp_addto_chunk(repl, len, unk_param); | ||
374 | sctp_chunk_free(err_chunk); | ||
375 | } | ||
376 | |||
377 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
378 | |||
379 | /* | ||
380 | * Note: After sending out INIT ACK with the State Cookie parameter, | ||
381 | * "Z" MUST NOT allocate any resources, nor keep any states for the | ||
382 | * new association. Otherwise, "Z" will be vulnerable to resource | ||
383 | * attacks. | ||
384 | */ | ||
385 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
386 | |||
387 | return SCTP_DISPOSITION_DELETE_TCB; | ||
388 | |||
389 | nomem_ack: | ||
390 | if (err_chunk) | ||
391 | sctp_chunk_free(err_chunk); | ||
392 | nomem_init: | ||
393 | sctp_association_free(new_asoc); | ||
394 | nomem: | ||
395 | return SCTP_DISPOSITION_NOMEM; | ||
396 | } | ||
397 | |||
398 | /* | ||
399 | * Respond to a normal INIT ACK chunk. | ||
400 | * We are the side that is initiating the association. | ||
401 | * | ||
402 | * Section: 5.1 Normal Establishment of an Association, C | ||
403 | * C) Upon reception of the INIT ACK from "Z", "A" shall stop the T1-init | ||
404 | * timer and leave COOKIE-WAIT state. "A" shall then send the State | ||
405 | * Cookie received in the INIT ACK chunk in a COOKIE ECHO chunk, start | ||
406 | * the T1-cookie timer, and enter the COOKIE-ECHOED state. | ||
407 | * | ||
408 | * Note: The COOKIE ECHO chunk can be bundled with any pending outbound | ||
409 | * DATA chunks, but it MUST be the first chunk in the packet and | ||
410 | * until the COOKIE ACK is returned the sender MUST NOT send any | ||
411 | * other packets to the peer. | ||
412 | * | ||
413 | * Verification Tag: 3.3.3 | ||
414 | * If the value of the Initiate Tag in a received INIT ACK chunk is | ||
415 | * found to be 0, the receiver MUST treat it as an error and close the | ||
416 | * association by transmitting an ABORT. | ||
417 | * | ||
418 | * Inputs | ||
419 | * (endpoint, asoc, chunk) | ||
420 | * | ||
421 | * Outputs | ||
422 | * (asoc, reply_msg, msg_up, timers, counters) | ||
423 | * | ||
424 | * The return value is the disposition of the chunk. | ||
425 | */ | ||
426 | sctp_disposition_t sctp_sf_do_5_1C_ack(const struct sctp_endpoint *ep, | ||
427 | const struct sctp_association *asoc, | ||
428 | const sctp_subtype_t type, | ||
429 | void *arg, | ||
430 | sctp_cmd_seq_t *commands) | ||
431 | { | ||
432 | struct sctp_chunk *chunk = arg; | ||
433 | sctp_init_chunk_t *initchunk; | ||
434 | __u32 init_tag; | ||
435 | struct sctp_chunk *err_chunk; | ||
436 | struct sctp_packet *packet; | ||
437 | sctp_disposition_t ret; | ||
438 | |||
439 | if (!sctp_vtag_verify(chunk, asoc)) | ||
440 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
441 | |||
442 | /* Make sure that the INIT-ACK chunk has a valid length */ | ||
443 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_initack_chunk_t))) | ||
444 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
445 | commands); | ||
446 | /* 6.10 Bundling | ||
447 | * An endpoint MUST NOT bundle INIT, INIT ACK or | ||
448 | * SHUTDOWN COMPLETE with any other chunks. | ||
449 | */ | ||
450 | if (!chunk->singleton) | ||
451 | return SCTP_DISPOSITION_VIOLATION; | ||
452 | |||
453 | /* Grab the INIT header. */ | ||
454 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; | ||
455 | |||
456 | init_tag = ntohl(chunk->subh.init_hdr->init_tag); | ||
457 | |||
458 | /* Verification Tag: 3.3.3 | ||
459 | * If the value of the Initiate Tag in a received INIT ACK | ||
460 | * chunk is found to be 0, the receiver MUST treat it as an | ||
461 | * error and close the association by transmitting an ABORT. | ||
462 | */ | ||
463 | if (!init_tag) { | ||
464 | struct sctp_chunk *reply = sctp_make_abort(asoc, chunk, 0); | ||
465 | if (!reply) | ||
466 | goto nomem; | ||
467 | |||
468 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
469 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
470 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
471 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
472 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
473 | return SCTP_DISPOSITION_DELETE_TCB; | ||
474 | } | ||
475 | |||
476 | /* Verify the INIT chunk before processing it. */ | ||
477 | err_chunk = NULL; | ||
478 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, | ||
479 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | ||
480 | &err_chunk)) { | ||
481 | |||
482 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
483 | |||
484 | /* This chunk contains fatal error. It is to be discarded. | ||
485 | * Send an ABORT, with causes if there is any. | ||
486 | */ | ||
487 | if (err_chunk) { | ||
488 | packet = sctp_abort_pkt_new(ep, asoc, arg, | ||
489 | (__u8 *)(err_chunk->chunk_hdr) + | ||
490 | sizeof(sctp_chunkhdr_t), | ||
491 | ntohs(err_chunk->chunk_hdr->length) - | ||
492 | sizeof(sctp_chunkhdr_t)); | ||
493 | |||
494 | sctp_chunk_free(err_chunk); | ||
495 | |||
496 | if (packet) { | ||
497 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
498 | SCTP_PACKET(packet)); | ||
499 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
500 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
501 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
502 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
503 | SCTP_NULL()); | ||
504 | return SCTP_DISPOSITION_CONSUME; | ||
505 | } else { | ||
506 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
507 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
508 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
509 | SCTP_NULL()); | ||
510 | return SCTP_DISPOSITION_NOMEM; | ||
511 | } | ||
512 | } else { | ||
513 | ret = sctp_sf_tabort_8_4_8(ep, asoc, type, arg, | ||
514 | commands); | ||
515 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
516 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
517 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, | ||
518 | SCTP_NULL()); | ||
519 | return ret; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | /* Tag the variable length parameters. Note that we never | ||
524 | * convert the parameters in an INIT chunk. | ||
525 | */ | ||
526 | chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); | ||
527 | |||
528 | initchunk = (sctp_init_chunk_t *) chunk->chunk_hdr; | ||
529 | |||
530 | sctp_add_cmd_sf(commands, SCTP_CMD_PEER_INIT, | ||
531 | SCTP_PEER_INIT(initchunk)); | ||
532 | |||
533 | /* 5.1 C) "A" shall stop the T1-init timer and leave | ||
534 | * COOKIE-WAIT state. "A" shall then ... start the T1-cookie | ||
535 | * timer, and enter the COOKIE-ECHOED state. | ||
536 | */ | ||
537 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
538 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
539 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
540 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); | ||
541 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
542 | SCTP_STATE(SCTP_STATE_COOKIE_ECHOED)); | ||
543 | |||
544 | /* 5.1 C) "A" shall then send the State Cookie received in the | ||
545 | * INIT ACK chunk in a COOKIE ECHO chunk, ... | ||
546 | */ | ||
547 | /* If there is any errors to report, send the ERROR chunk generated | ||
548 | * for unknown parameters as well. | ||
549 | */ | ||
550 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_COOKIE_ECHO, | ||
551 | SCTP_CHUNK(err_chunk)); | ||
552 | |||
553 | return SCTP_DISPOSITION_CONSUME; | ||
554 | |||
555 | nomem: | ||
556 | return SCTP_DISPOSITION_NOMEM; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * Respond to a normal COOKIE ECHO chunk. | ||
561 | * We are the side that is being asked for an association. | ||
562 | * | ||
563 | * Section: 5.1 Normal Establishment of an Association, D | ||
564 | * D) Upon reception of the COOKIE ECHO chunk, Endpoint "Z" will reply | ||
565 | * with a COOKIE ACK chunk after building a TCB and moving to | ||
566 | * the ESTABLISHED state. A COOKIE ACK chunk may be bundled with | ||
567 | * any pending DATA chunks (and/or SACK chunks), but the COOKIE ACK | ||
568 | * chunk MUST be the first chunk in the packet. | ||
569 | * | ||
570 | * IMPLEMENTATION NOTE: An implementation may choose to send the | ||
571 | * Communication Up notification to the SCTP user upon reception | ||
572 | * of a valid COOKIE ECHO chunk. | ||
573 | * | ||
574 | * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules | ||
575 | * D) Rules for packet carrying a COOKIE ECHO | ||
576 | * | ||
577 | * - When sending a COOKIE ECHO, the endpoint MUST use the value of the | ||
578 | * Initial Tag received in the INIT ACK. | ||
579 | * | ||
580 | * - The receiver of a COOKIE ECHO follows the procedures in Section 5. | ||
581 | * | ||
582 | * Inputs | ||
583 | * (endpoint, asoc, chunk) | ||
584 | * | ||
585 | * Outputs | ||
586 | * (asoc, reply_msg, msg_up, timers, counters) | ||
587 | * | ||
588 | * The return value is the disposition of the chunk. | ||
589 | */ | ||
590 | sctp_disposition_t sctp_sf_do_5_1D_ce(const struct sctp_endpoint *ep, | ||
591 | const struct sctp_association *asoc, | ||
592 | const sctp_subtype_t type, void *arg, | ||
593 | sctp_cmd_seq_t *commands) | ||
594 | { | ||
595 | struct sctp_chunk *chunk = arg; | ||
596 | struct sctp_association *new_asoc; | ||
597 | sctp_init_chunk_t *peer_init; | ||
598 | struct sctp_chunk *repl; | ||
599 | struct sctp_ulpevent *ev; | ||
600 | int error = 0; | ||
601 | struct sctp_chunk *err_chk_p; | ||
602 | |||
603 | /* If the packet is an OOTB packet which is temporarily on the | ||
604 | * control endpoint, respond with an ABORT. | ||
605 | */ | ||
606 | if (ep == sctp_sk((sctp_get_ctl_sock()))->ep) | ||
607 | return sctp_sf_ootb(ep, asoc, type, arg, commands); | ||
608 | |||
609 | /* Make sure that the COOKIE_ECHO chunk has a valid length. | ||
610 | * In this case, we check that we have enough for at least a | ||
611 | * chunk header. More detailed verification is done | ||
612 | * in sctp_unpack_cookie(). | ||
613 | */ | ||
614 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
615 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
616 | |||
617 | /* "Decode" the chunk. We have no optional parameters so we | ||
618 | * are in good shape. | ||
619 | */ | ||
620 | chunk->subh.cookie_hdr = | ||
621 | (struct sctp_signed_cookie *)chunk->skb->data; | ||
622 | skb_pull(chunk->skb, | ||
623 | ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t)); | ||
624 | |||
625 | /* 5.1 D) Upon reception of the COOKIE ECHO chunk, Endpoint | ||
626 | * "Z" will reply with a COOKIE ACK chunk after building a TCB | ||
627 | * and moving to the ESTABLISHED state. | ||
628 | */ | ||
629 | new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, | ||
630 | &err_chk_p); | ||
631 | |||
632 | /* FIXME: | ||
633 | * If the re-build failed, what is the proper error path | ||
634 | * from here? | ||
635 | * | ||
636 | * [We should abort the association. --piggy] | ||
637 | */ | ||
638 | if (!new_asoc) { | ||
639 | /* FIXME: Several errors are possible. A bad cookie should | ||
640 | * be silently discarded, but think about logging it too. | ||
641 | */ | ||
642 | switch (error) { | ||
643 | case -SCTP_IERROR_NOMEM: | ||
644 | goto nomem; | ||
645 | |||
646 | case -SCTP_IERROR_STALE_COOKIE: | ||
647 | sctp_send_stale_cookie_err(ep, asoc, chunk, commands, | ||
648 | err_chk_p); | ||
649 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
650 | |||
651 | case -SCTP_IERROR_BAD_SIG: | ||
652 | default: | ||
653 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
654 | }; | ||
655 | } | ||
656 | |||
657 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | ||
658 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
659 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | ||
660 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | ||
661 | SCTP_INC_STATS(SCTP_MIB_PASSIVEESTABS); | ||
662 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); | ||
663 | |||
664 | if (new_asoc->autoclose) | ||
665 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
666 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
667 | |||
668 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
669 | |||
670 | /* Re-build the bind address for the association is done in | ||
671 | * the sctp_unpack_cookie() already. | ||
672 | */ | ||
673 | /* This is a brand-new association, so these are not yet side | ||
674 | * effects--it is safe to run them here. | ||
675 | */ | ||
676 | peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; | ||
677 | |||
678 | if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, | ||
679 | &chunk->subh.cookie_hdr->c.peer_addr, | ||
680 | peer_init, GFP_ATOMIC)) | ||
681 | goto nomem_init; | ||
682 | |||
683 | repl = sctp_make_cookie_ack(new_asoc, chunk); | ||
684 | if (!repl) | ||
685 | goto nomem_repl; | ||
686 | |||
687 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
688 | |||
689 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
690 | * | ||
691 | * D) IMPLEMENTATION NOTE: An implementation may choose to | ||
692 | * send the Communication Up notification to the SCTP user | ||
693 | * upon reception of a valid COOKIE ECHO chunk. | ||
694 | */ | ||
695 | ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, SCTP_COMM_UP, 0, | ||
696 | new_asoc->c.sinit_num_ostreams, | ||
697 | new_asoc->c.sinit_max_instreams, | ||
698 | GFP_ATOMIC); | ||
699 | if (!ev) | ||
700 | goto nomem_ev; | ||
701 | |||
702 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
703 | |||
704 | /* Sockets API Draft Section 5.3.1.6 | ||
705 | * When a peer sends a Adaption Layer Indication parameter , SCTP | ||
706 | * delivers this notification to inform the application that of the | ||
707 | * peers requested adaption layer. | ||
708 | */ | ||
709 | if (new_asoc->peer.adaption_ind) { | ||
710 | ev = sctp_ulpevent_make_adaption_indication(new_asoc, | ||
711 | GFP_ATOMIC); | ||
712 | if (!ev) | ||
713 | goto nomem_ev; | ||
714 | |||
715 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
716 | SCTP_ULPEVENT(ev)); | ||
717 | } | ||
718 | |||
719 | return SCTP_DISPOSITION_CONSUME; | ||
720 | |||
721 | nomem_ev: | ||
722 | sctp_chunk_free(repl); | ||
723 | nomem_repl: | ||
724 | nomem_init: | ||
725 | sctp_association_free(new_asoc); | ||
726 | nomem: | ||
727 | return SCTP_DISPOSITION_NOMEM; | ||
728 | } | ||
729 | |||
730 | /* | ||
731 | * Respond to a normal COOKIE ACK chunk. | ||
732 | * We are the side that is being asked for an association. | ||
733 | * | ||
734 | * RFC 2960 5.1 Normal Establishment of an Association | ||
735 | * | ||
736 | * E) Upon reception of the COOKIE ACK, endpoint "A" will move from the | ||
737 | * COOKIE-ECHOED state to the ESTABLISHED state, stopping the T1-cookie | ||
738 | * timer. It may also notify its ULP about the successful | ||
739 | * establishment of the association with a Communication Up | ||
740 | * notification (see Section 10). | ||
741 | * | ||
742 | * Verification Tag: | ||
743 | * Inputs | ||
744 | * (endpoint, asoc, chunk) | ||
745 | * | ||
746 | * Outputs | ||
747 | * (asoc, reply_msg, msg_up, timers, counters) | ||
748 | * | ||
749 | * The return value is the disposition of the chunk. | ||
750 | */ | ||
751 | sctp_disposition_t sctp_sf_do_5_1E_ca(const struct sctp_endpoint *ep, | ||
752 | const struct sctp_association *asoc, | ||
753 | const sctp_subtype_t type, void *arg, | ||
754 | sctp_cmd_seq_t *commands) | ||
755 | { | ||
756 | struct sctp_chunk *chunk = arg; | ||
757 | struct sctp_ulpevent *ev; | ||
758 | |||
759 | if (!sctp_vtag_verify(chunk, asoc)) | ||
760 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
761 | |||
762 | /* Verify that the chunk length for the COOKIE-ACK is OK. | ||
763 | * If we don't do this, any bundled chunks may be junked. | ||
764 | */ | ||
765 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
766 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
767 | commands); | ||
768 | |||
769 | /* Reset init error count upon receipt of COOKIE-ACK, | ||
770 | * to avoid problems with the managemement of this | ||
771 | * counter in stale cookie situations when a transition back | ||
772 | * from the COOKIE-ECHOED state to the COOKIE-WAIT | ||
773 | * state is performed. | ||
774 | */ | ||
775 | sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_RESET, | ||
776 | SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR)); | ||
777 | |||
778 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
779 | * | ||
780 | * E) Upon reception of the COOKIE ACK, endpoint "A" will move | ||
781 | * from the COOKIE-ECHOED state to the ESTABLISHED state, | ||
782 | * stopping the T1-cookie timer. | ||
783 | */ | ||
784 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
785 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); | ||
786 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
787 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | ||
788 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | ||
789 | SCTP_INC_STATS(SCTP_MIB_ACTIVEESTABS); | ||
790 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); | ||
791 | if (asoc->autoclose) | ||
792 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
793 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
794 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
795 | |||
796 | /* It may also notify its ULP about the successful | ||
797 | * establishment of the association with a Communication Up | ||
798 | * notification (see Section 10). | ||
799 | */ | ||
800 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, | ||
801 | 0, asoc->c.sinit_num_ostreams, | ||
802 | asoc->c.sinit_max_instreams, | ||
803 | GFP_ATOMIC); | ||
804 | |||
805 | if (!ev) | ||
806 | goto nomem; | ||
807 | |||
808 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
809 | |||
810 | /* Sockets API Draft Section 5.3.1.6 | ||
811 | * When a peer sends a Adaption Layer Indication parameter , SCTP | ||
812 | * delivers this notification to inform the application that of the | ||
813 | * peers requested adaption layer. | ||
814 | */ | ||
815 | if (asoc->peer.adaption_ind) { | ||
816 | ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); | ||
817 | if (!ev) | ||
818 | goto nomem; | ||
819 | |||
820 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
821 | SCTP_ULPEVENT(ev)); | ||
822 | } | ||
823 | |||
824 | return SCTP_DISPOSITION_CONSUME; | ||
825 | nomem: | ||
826 | return SCTP_DISPOSITION_NOMEM; | ||
827 | } | ||
828 | |||
829 | /* Generate and sendout a heartbeat packet. */ | ||
830 | static sctp_disposition_t sctp_sf_heartbeat(const struct sctp_endpoint *ep, | ||
831 | const struct sctp_association *asoc, | ||
832 | const sctp_subtype_t type, | ||
833 | void *arg, | ||
834 | sctp_cmd_seq_t *commands) | ||
835 | { | ||
836 | struct sctp_transport *transport = (struct sctp_transport *) arg; | ||
837 | struct sctp_chunk *reply; | ||
838 | sctp_sender_hb_info_t hbinfo; | ||
839 | size_t paylen = 0; | ||
840 | |||
841 | hbinfo.param_hdr.type = SCTP_PARAM_HEARTBEAT_INFO; | ||
842 | hbinfo.param_hdr.length = htons(sizeof(sctp_sender_hb_info_t)); | ||
843 | hbinfo.daddr = transport->ipaddr; | ||
844 | hbinfo.sent_at = jiffies; | ||
845 | |||
846 | /* Send a heartbeat to our peer. */ | ||
847 | paylen = sizeof(sctp_sender_hb_info_t); | ||
848 | reply = sctp_make_heartbeat(asoc, transport, &hbinfo, paylen); | ||
849 | if (!reply) | ||
850 | return SCTP_DISPOSITION_NOMEM; | ||
851 | |||
852 | /* Set rto_pending indicating that an RTT measurement | ||
853 | * is started with this heartbeat chunk. | ||
854 | */ | ||
855 | sctp_add_cmd_sf(commands, SCTP_CMD_RTO_PENDING, | ||
856 | SCTP_TRANSPORT(transport)); | ||
857 | |||
858 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
859 | return SCTP_DISPOSITION_CONSUME; | ||
860 | } | ||
861 | |||
862 | /* Generate a HEARTBEAT packet on the given transport. */ | ||
863 | sctp_disposition_t sctp_sf_sendbeat_8_3(const struct sctp_endpoint *ep, | ||
864 | const struct sctp_association *asoc, | ||
865 | const sctp_subtype_t type, | ||
866 | void *arg, | ||
867 | sctp_cmd_seq_t *commands) | ||
868 | { | ||
869 | struct sctp_transport *transport = (struct sctp_transport *) arg; | ||
870 | |||
871 | if (asoc->overall_error_count > asoc->max_retrans) { | ||
872 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | ||
873 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
874 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
875 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
876 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
877 | return SCTP_DISPOSITION_DELETE_TCB; | ||
878 | } | ||
879 | |||
880 | /* Section 3.3.5. | ||
881 | * The Sender-specific Heartbeat Info field should normally include | ||
882 | * information about the sender's current time when this HEARTBEAT | ||
883 | * chunk is sent and the destination transport address to which this | ||
884 | * HEARTBEAT is sent (see Section 8.3). | ||
885 | */ | ||
886 | |||
887 | if (transport->hb_allowed) { | ||
888 | if (SCTP_DISPOSITION_NOMEM == | ||
889 | sctp_sf_heartbeat(ep, asoc, type, arg, | ||
890 | commands)) | ||
891 | return SCTP_DISPOSITION_NOMEM; | ||
892 | /* Set transport error counter and association error counter | ||
893 | * when sending heartbeat. | ||
894 | */ | ||
895 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_RESET, | ||
896 | SCTP_TRANSPORT(transport)); | ||
897 | } | ||
898 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMER_UPDATE, | ||
899 | SCTP_TRANSPORT(transport)); | ||
900 | |||
901 | return SCTP_DISPOSITION_CONSUME; | ||
902 | } | ||
903 | |||
904 | /* | ||
905 | * Process an heartbeat request. | ||
906 | * | ||
907 | * Section: 8.3 Path Heartbeat | ||
908 | * The receiver of the HEARTBEAT should immediately respond with a | ||
909 | * HEARTBEAT ACK that contains the Heartbeat Information field copied | ||
910 | * from the received HEARTBEAT chunk. | ||
911 | * | ||
912 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
913 | * When receiving an SCTP packet, the endpoint MUST ensure that the | ||
914 | * value in the Verification Tag field of the received SCTP packet | ||
915 | * matches its own Tag. If the received Verification Tag value does not | ||
916 | * match the receiver's own tag value, the receiver shall silently | ||
917 | * discard the packet and shall not process it any further except for | ||
918 | * those cases listed in Section 8.5.1 below. | ||
919 | * | ||
920 | * Inputs | ||
921 | * (endpoint, asoc, chunk) | ||
922 | * | ||
923 | * Outputs | ||
924 | * (asoc, reply_msg, msg_up, timers, counters) | ||
925 | * | ||
926 | * The return value is the disposition of the chunk. | ||
927 | */ | ||
928 | sctp_disposition_t sctp_sf_beat_8_3(const struct sctp_endpoint *ep, | ||
929 | const struct sctp_association *asoc, | ||
930 | const sctp_subtype_t type, | ||
931 | void *arg, | ||
932 | sctp_cmd_seq_t *commands) | ||
933 | { | ||
934 | struct sctp_chunk *chunk = arg; | ||
935 | struct sctp_chunk *reply; | ||
936 | size_t paylen = 0; | ||
937 | |||
938 | if (!sctp_vtag_verify(chunk, asoc)) | ||
939 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
940 | |||
941 | /* Make sure that the HEARTBEAT chunk has a valid length. */ | ||
942 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) | ||
943 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
944 | commands); | ||
945 | |||
946 | /* 8.3 The receiver of the HEARTBEAT should immediately | ||
947 | * respond with a HEARTBEAT ACK that contains the Heartbeat | ||
948 | * Information field copied from the received HEARTBEAT chunk. | ||
949 | */ | ||
950 | chunk->subh.hb_hdr = (sctp_heartbeathdr_t *) chunk->skb->data; | ||
951 | paylen = ntohs(chunk->chunk_hdr->length) - sizeof(sctp_chunkhdr_t); | ||
952 | skb_pull(chunk->skb, paylen); | ||
953 | |||
954 | reply = sctp_make_heartbeat_ack(asoc, chunk, | ||
955 | chunk->subh.hb_hdr, paylen); | ||
956 | if (!reply) | ||
957 | goto nomem; | ||
958 | |||
959 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
960 | return SCTP_DISPOSITION_CONSUME; | ||
961 | |||
962 | nomem: | ||
963 | return SCTP_DISPOSITION_NOMEM; | ||
964 | } | ||
965 | |||
966 | /* | ||
967 | * Process the returning HEARTBEAT ACK. | ||
968 | * | ||
969 | * Section: 8.3 Path Heartbeat | ||
970 | * Upon the receipt of the HEARTBEAT ACK, the sender of the HEARTBEAT | ||
971 | * should clear the error counter of the destination transport | ||
972 | * address to which the HEARTBEAT was sent, and mark the destination | ||
973 | * transport address as active if it is not so marked. The endpoint may | ||
974 | * optionally report to the upper layer when an inactive destination | ||
975 | * address is marked as active due to the reception of the latest | ||
976 | * HEARTBEAT ACK. The receiver of the HEARTBEAT ACK must also | ||
977 | * clear the association overall error count as well (as defined | ||
978 | * in section 8.1). | ||
979 | * | ||
980 | * The receiver of the HEARTBEAT ACK should also perform an RTT | ||
981 | * measurement for that destination transport address using the time | ||
982 | * value carried in the HEARTBEAT ACK chunk. | ||
983 | * | ||
984 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
985 | * | ||
986 | * Inputs | ||
987 | * (endpoint, asoc, chunk) | ||
988 | * | ||
989 | * Outputs | ||
990 | * (asoc, reply_msg, msg_up, timers, counters) | ||
991 | * | ||
992 | * The return value is the disposition of the chunk. | ||
993 | */ | ||
994 | sctp_disposition_t sctp_sf_backbeat_8_3(const struct sctp_endpoint *ep, | ||
995 | const struct sctp_association *asoc, | ||
996 | const sctp_subtype_t type, | ||
997 | void *arg, | ||
998 | sctp_cmd_seq_t *commands) | ||
999 | { | ||
1000 | struct sctp_chunk *chunk = arg; | ||
1001 | union sctp_addr from_addr; | ||
1002 | struct sctp_transport *link; | ||
1003 | sctp_sender_hb_info_t *hbinfo; | ||
1004 | unsigned long max_interval; | ||
1005 | |||
1006 | if (!sctp_vtag_verify(chunk, asoc)) | ||
1007 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1008 | |||
1009 | /* Make sure that the HEARTBEAT-ACK chunk has a valid length. */ | ||
1010 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_heartbeat_chunk_t))) | ||
1011 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
1012 | commands); | ||
1013 | |||
1014 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; | ||
1015 | from_addr = hbinfo->daddr; | ||
1016 | link = sctp_assoc_lookup_paddr(asoc, &from_addr); | ||
1017 | |||
1018 | /* This should never happen, but lets log it if so. */ | ||
1019 | if (!link) { | ||
1020 | printk(KERN_WARNING | ||
1021 | "%s: Could not find address %d.%d.%d.%d\n", | ||
1022 | __FUNCTION__, NIPQUAD(from_addr.v4.sin_addr)); | ||
1023 | return SCTP_DISPOSITION_DISCARD; | ||
1024 | } | ||
1025 | |||
1026 | max_interval = link->hb_interval + link->rto; | ||
1027 | |||
1028 | /* Check if the timestamp looks valid. */ | ||
1029 | if (time_after(hbinfo->sent_at, jiffies) || | ||
1030 | time_after(jiffies, hbinfo->sent_at + max_interval)) { | ||
1031 | SCTP_DEBUG_PRINTK("%s: HEARTBEAT ACK with invalid timestamp" | ||
1032 | "received for transport: %p\n", | ||
1033 | __FUNCTION__, link); | ||
1034 | return SCTP_DISPOSITION_DISCARD; | ||
1035 | } | ||
1036 | |||
1037 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of | ||
1038 | * the HEARTBEAT should clear the error counter of the | ||
1039 | * destination transport address to which the HEARTBEAT was | ||
1040 | * sent and mark the destination transport address as active if | ||
1041 | * it is not so marked. | ||
1042 | */ | ||
1043 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSPORT_ON, SCTP_TRANSPORT(link)); | ||
1044 | |||
1045 | return SCTP_DISPOSITION_CONSUME; | ||
1046 | } | ||
1047 | |||
1048 | /* Helper function to send out an abort for the restart | ||
1049 | * condition. | ||
1050 | */ | ||
1051 | static int sctp_sf_send_restart_abort(union sctp_addr *ssa, | ||
1052 | struct sctp_chunk *init, | ||
1053 | sctp_cmd_seq_t *commands) | ||
1054 | { | ||
1055 | int len; | ||
1056 | struct sctp_packet *pkt; | ||
1057 | union sctp_addr_param *addrparm; | ||
1058 | struct sctp_errhdr *errhdr; | ||
1059 | struct sctp_endpoint *ep; | ||
1060 | char buffer[sizeof(struct sctp_errhdr)+sizeof(union sctp_addr_param)]; | ||
1061 | struct sctp_af *af = sctp_get_af_specific(ssa->v4.sin_family); | ||
1062 | |||
1063 | /* Build the error on the stack. We are way to malloc crazy | ||
1064 | * throughout the code today. | ||
1065 | */ | ||
1066 | errhdr = (struct sctp_errhdr *)buffer; | ||
1067 | addrparm = (union sctp_addr_param *)errhdr->variable; | ||
1068 | |||
1069 | /* Copy into a parm format. */ | ||
1070 | len = af->to_addr_param(ssa, addrparm); | ||
1071 | len += sizeof(sctp_errhdr_t); | ||
1072 | |||
1073 | errhdr->cause = SCTP_ERROR_RESTART; | ||
1074 | errhdr->length = htons(len); | ||
1075 | |||
1076 | /* Assign to the control socket. */ | ||
1077 | ep = sctp_sk((sctp_get_ctl_sock()))->ep; | ||
1078 | |||
1079 | /* Association is NULL since this may be a restart attack and we | ||
1080 | * want to send back the attacker's vtag. | ||
1081 | */ | ||
1082 | pkt = sctp_abort_pkt_new(ep, NULL, init, errhdr, len); | ||
1083 | |||
1084 | if (!pkt) | ||
1085 | goto out; | ||
1086 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, SCTP_PACKET(pkt)); | ||
1087 | |||
1088 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
1089 | |||
1090 | /* Discard the rest of the inbound packet. */ | ||
1091 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); | ||
1092 | |||
1093 | out: | ||
1094 | /* Even if there is no memory, treat as a failure so | ||
1095 | * the packet will get dropped. | ||
1096 | */ | ||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | /* A restart is occurring, check to make sure no new addresses | ||
1101 | * are being added as we may be under a takeover attack. | ||
1102 | */ | ||
1103 | static int sctp_sf_check_restart_addrs(const struct sctp_association *new_asoc, | ||
1104 | const struct sctp_association *asoc, | ||
1105 | struct sctp_chunk *init, | ||
1106 | sctp_cmd_seq_t *commands) | ||
1107 | { | ||
1108 | struct sctp_transport *new_addr, *addr; | ||
1109 | struct list_head *pos, *pos2; | ||
1110 | int found; | ||
1111 | |||
1112 | /* Implementor's Guide - Sectin 5.2.2 | ||
1113 | * ... | ||
1114 | * Before responding the endpoint MUST check to see if the | ||
1115 | * unexpected INIT adds new addresses to the association. If new | ||
1116 | * addresses are added to the association, the endpoint MUST respond | ||
1117 | * with an ABORT.. | ||
1118 | */ | ||
1119 | |||
1120 | /* Search through all current addresses and make sure | ||
1121 | * we aren't adding any new ones. | ||
1122 | */ | ||
1123 | new_addr = NULL; | ||
1124 | found = 0; | ||
1125 | |||
1126 | list_for_each(pos, &new_asoc->peer.transport_addr_list) { | ||
1127 | new_addr = list_entry(pos, struct sctp_transport, transports); | ||
1128 | found = 0; | ||
1129 | list_for_each(pos2, &asoc->peer.transport_addr_list) { | ||
1130 | addr = list_entry(pos2, struct sctp_transport, | ||
1131 | transports); | ||
1132 | if (sctp_cmp_addr_exact(&new_addr->ipaddr, | ||
1133 | &addr->ipaddr)) { | ||
1134 | found = 1; | ||
1135 | break; | ||
1136 | } | ||
1137 | } | ||
1138 | if (!found) | ||
1139 | break; | ||
1140 | } | ||
1141 | |||
1142 | /* If a new address was added, ABORT the sender. */ | ||
1143 | if (!found && new_addr) { | ||
1144 | sctp_sf_send_restart_abort(&new_addr->ipaddr, init, commands); | ||
1145 | } | ||
1146 | |||
1147 | /* Return success if all addresses were found. */ | ||
1148 | return found; | ||
1149 | } | ||
1150 | |||
1151 | /* Populate the verification/tie tags based on overlapping INIT | ||
1152 | * scenario. | ||
1153 | * | ||
1154 | * Note: Do not use in CLOSED or SHUTDOWN-ACK-SENT state. | ||
1155 | */ | ||
1156 | static void sctp_tietags_populate(struct sctp_association *new_asoc, | ||
1157 | const struct sctp_association *asoc) | ||
1158 | { | ||
1159 | switch (asoc->state) { | ||
1160 | |||
1161 | /* 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State */ | ||
1162 | |||
1163 | case SCTP_STATE_COOKIE_WAIT: | ||
1164 | new_asoc->c.my_vtag = asoc->c.my_vtag; | ||
1165 | new_asoc->c.my_ttag = asoc->c.my_vtag; | ||
1166 | new_asoc->c.peer_ttag = 0; | ||
1167 | break; | ||
1168 | |||
1169 | case SCTP_STATE_COOKIE_ECHOED: | ||
1170 | new_asoc->c.my_vtag = asoc->c.my_vtag; | ||
1171 | new_asoc->c.my_ttag = asoc->c.my_vtag; | ||
1172 | new_asoc->c.peer_ttag = asoc->c.peer_vtag; | ||
1173 | break; | ||
1174 | |||
1175 | /* 5.2.2 Unexpected INIT in States Other than CLOSED, COOKIE-ECHOED, | ||
1176 | * COOKIE-WAIT and SHUTDOWN-ACK-SENT | ||
1177 | */ | ||
1178 | default: | ||
1179 | new_asoc->c.my_ttag = asoc->c.my_vtag; | ||
1180 | new_asoc->c.peer_ttag = asoc->c.peer_vtag; | ||
1181 | break; | ||
1182 | }; | ||
1183 | |||
1184 | /* Other parameters for the endpoint SHOULD be copied from the | ||
1185 | * existing parameters of the association (e.g. number of | ||
1186 | * outbound streams) into the INIT ACK and cookie. | ||
1187 | */ | ||
1188 | new_asoc->rwnd = asoc->rwnd; | ||
1189 | new_asoc->c.sinit_num_ostreams = asoc->c.sinit_num_ostreams; | ||
1190 | new_asoc->c.sinit_max_instreams = asoc->c.sinit_max_instreams; | ||
1191 | new_asoc->c.initial_tsn = asoc->c.initial_tsn; | ||
1192 | } | ||
1193 | |||
1194 | /* | ||
1195 | * Compare vtag/tietag values to determine unexpected COOKIE-ECHO | ||
1196 | * handling action. | ||
1197 | * | ||
1198 | * RFC 2960 5.2.4 Handle a COOKIE ECHO when a TCB exists. | ||
1199 | * | ||
1200 | * Returns value representing action to be taken. These action values | ||
1201 | * correspond to Action/Description values in RFC 2960, Table 2. | ||
1202 | */ | ||
1203 | static char sctp_tietags_compare(struct sctp_association *new_asoc, | ||
1204 | const struct sctp_association *asoc) | ||
1205 | { | ||
1206 | /* In this case, the peer may have restarted. */ | ||
1207 | if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && | ||
1208 | (asoc->c.peer_vtag != new_asoc->c.peer_vtag) && | ||
1209 | (asoc->c.my_vtag == new_asoc->c.my_ttag) && | ||
1210 | (asoc->c.peer_vtag == new_asoc->c.peer_ttag)) | ||
1211 | return 'A'; | ||
1212 | |||
1213 | /* Collision case B. */ | ||
1214 | if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && | ||
1215 | ((asoc->c.peer_vtag != new_asoc->c.peer_vtag) || | ||
1216 | (0 == asoc->c.peer_vtag))) { | ||
1217 | return 'B'; | ||
1218 | } | ||
1219 | |||
1220 | /* Collision case D. */ | ||
1221 | if ((asoc->c.my_vtag == new_asoc->c.my_vtag) && | ||
1222 | (asoc->c.peer_vtag == new_asoc->c.peer_vtag)) | ||
1223 | return 'D'; | ||
1224 | |||
1225 | /* Collision case C. */ | ||
1226 | if ((asoc->c.my_vtag != new_asoc->c.my_vtag) && | ||
1227 | (asoc->c.peer_vtag == new_asoc->c.peer_vtag) && | ||
1228 | (0 == new_asoc->c.my_ttag) && | ||
1229 | (0 == new_asoc->c.peer_ttag)) | ||
1230 | return 'C'; | ||
1231 | |||
1232 | /* No match to any of the special cases; discard this packet. */ | ||
1233 | return 'E'; | ||
1234 | } | ||
1235 | |||
1236 | /* Common helper routine for both duplicate and simulataneous INIT | ||
1237 | * chunk handling. | ||
1238 | */ | ||
1239 | static sctp_disposition_t sctp_sf_do_unexpected_init( | ||
1240 | const struct sctp_endpoint *ep, | ||
1241 | const struct sctp_association *asoc, | ||
1242 | const sctp_subtype_t type, | ||
1243 | void *arg, sctp_cmd_seq_t *commands) | ||
1244 | { | ||
1245 | sctp_disposition_t retval; | ||
1246 | struct sctp_chunk *chunk = arg; | ||
1247 | struct sctp_chunk *repl; | ||
1248 | struct sctp_association *new_asoc; | ||
1249 | struct sctp_chunk *err_chunk; | ||
1250 | struct sctp_packet *packet; | ||
1251 | sctp_unrecognized_param_t *unk_param; | ||
1252 | int len; | ||
1253 | |||
1254 | /* 6.10 Bundling | ||
1255 | * An endpoint MUST NOT bundle INIT, INIT ACK or | ||
1256 | * SHUTDOWN COMPLETE with any other chunks. | ||
1257 | * | ||
1258 | * IG Section 2.11.2 | ||
1259 | * Furthermore, we require that the receiver of an INIT chunk MUST | ||
1260 | * enforce these rules by silently discarding an arriving packet | ||
1261 | * with an INIT chunk that is bundled with other chunks. | ||
1262 | */ | ||
1263 | if (!chunk->singleton) | ||
1264 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1265 | |||
1266 | /* 3.1 A packet containing an INIT chunk MUST have a zero Verification | ||
1267 | * Tag. | ||
1268 | */ | ||
1269 | if (chunk->sctp_hdr->vtag != 0) | ||
1270 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
1271 | |||
1272 | /* Make sure that the INIT chunk has a valid length. | ||
1273 | * In this case, we generate a protocol violation since we have | ||
1274 | * an association established. | ||
1275 | */ | ||
1276 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_init_chunk_t))) | ||
1277 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
1278 | commands); | ||
1279 | /* Grab the INIT header. */ | ||
1280 | chunk->subh.init_hdr = (sctp_inithdr_t *) chunk->skb->data; | ||
1281 | |||
1282 | /* Tag the variable length parameters. */ | ||
1283 | chunk->param_hdr.v = skb_pull(chunk->skb, sizeof(sctp_inithdr_t)); | ||
1284 | |||
1285 | /* Verify the INIT chunk before processing it. */ | ||
1286 | err_chunk = NULL; | ||
1287 | if (!sctp_verify_init(asoc, chunk->chunk_hdr->type, | ||
1288 | (sctp_init_chunk_t *)chunk->chunk_hdr, chunk, | ||
1289 | &err_chunk)) { | ||
1290 | /* This chunk contains fatal error. It is to be discarded. | ||
1291 | * Send an ABORT, with causes if there is any. | ||
1292 | */ | ||
1293 | if (err_chunk) { | ||
1294 | packet = sctp_abort_pkt_new(ep, asoc, arg, | ||
1295 | (__u8 *)(err_chunk->chunk_hdr) + | ||
1296 | sizeof(sctp_chunkhdr_t), | ||
1297 | ntohs(err_chunk->chunk_hdr->length) - | ||
1298 | sizeof(sctp_chunkhdr_t)); | ||
1299 | |||
1300 | if (packet) { | ||
1301 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
1302 | SCTP_PACKET(packet)); | ||
1303 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
1304 | retval = SCTP_DISPOSITION_CONSUME; | ||
1305 | } else { | ||
1306 | retval = SCTP_DISPOSITION_NOMEM; | ||
1307 | } | ||
1308 | goto cleanup; | ||
1309 | } else { | ||
1310 | return sctp_sf_tabort_8_4_8(ep, asoc, type, arg, | ||
1311 | commands); | ||
1312 | } | ||
1313 | } | ||
1314 | |||
1315 | /* | ||
1316 | * Other parameters for the endpoint SHOULD be copied from the | ||
1317 | * existing parameters of the association (e.g. number of | ||
1318 | * outbound streams) into the INIT ACK and cookie. | ||
1319 | * FIXME: We are copying parameters from the endpoint not the | ||
1320 | * association. | ||
1321 | */ | ||
1322 | new_asoc = sctp_make_temp_asoc(ep, chunk, GFP_ATOMIC); | ||
1323 | if (!new_asoc) | ||
1324 | goto nomem; | ||
1325 | |||
1326 | /* In the outbound INIT ACK the endpoint MUST copy its current | ||
1327 | * Verification Tag and Peers Verification tag into a reserved | ||
1328 | * place (local tie-tag and per tie-tag) within the state cookie. | ||
1329 | */ | ||
1330 | if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, | ||
1331 | sctp_source(chunk), | ||
1332 | (sctp_init_chunk_t *)chunk->chunk_hdr, | ||
1333 | GFP_ATOMIC)) { | ||
1334 | retval = SCTP_DISPOSITION_NOMEM; | ||
1335 | goto nomem_init; | ||
1336 | } | ||
1337 | |||
1338 | /* Make sure no new addresses are being added during the | ||
1339 | * restart. Do not do this check for COOKIE-WAIT state, | ||
1340 | * since there are no peer addresses to check against. | ||
1341 | * Upon return an ABORT will have been sent if needed. | ||
1342 | */ | ||
1343 | if (!sctp_state(asoc, COOKIE_WAIT)) { | ||
1344 | if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, | ||
1345 | commands)) { | ||
1346 | retval = SCTP_DISPOSITION_CONSUME; | ||
1347 | goto cleanup_asoc; | ||
1348 | } | ||
1349 | } | ||
1350 | |||
1351 | sctp_tietags_populate(new_asoc, asoc); | ||
1352 | |||
1353 | /* B) "Z" shall respond immediately with an INIT ACK chunk. */ | ||
1354 | |||
1355 | /* If there are errors need to be reported for unknown parameters, | ||
1356 | * make sure to reserve enough room in the INIT ACK for them. | ||
1357 | */ | ||
1358 | len = 0; | ||
1359 | if (err_chunk) { | ||
1360 | len = ntohs(err_chunk->chunk_hdr->length) - | ||
1361 | sizeof(sctp_chunkhdr_t); | ||
1362 | } | ||
1363 | |||
1364 | if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0) | ||
1365 | goto nomem; | ||
1366 | |||
1367 | repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len); | ||
1368 | if (!repl) | ||
1369 | goto nomem; | ||
1370 | |||
1371 | /* If there are errors need to be reported for unknown parameters, | ||
1372 | * include them in the outgoing INIT ACK as "Unrecognized parameter" | ||
1373 | * parameter. | ||
1374 | */ | ||
1375 | if (err_chunk) { | ||
1376 | /* Get the "Unrecognized parameter" parameter(s) out of the | ||
1377 | * ERROR chunk generated by sctp_verify_init(). Since the | ||
1378 | * error cause code for "unknown parameter" and the | ||
1379 | * "Unrecognized parameter" type is the same, we can | ||
1380 | * construct the parameters in INIT ACK by copying the | ||
1381 | * ERROR causes over. | ||
1382 | */ | ||
1383 | unk_param = (sctp_unrecognized_param_t *) | ||
1384 | ((__u8 *)(err_chunk->chunk_hdr) + | ||
1385 | sizeof(sctp_chunkhdr_t)); | ||
1386 | /* Replace the cause code with the "Unrecognized parameter" | ||
1387 | * parameter type. | ||
1388 | */ | ||
1389 | sctp_addto_chunk(repl, len, unk_param); | ||
1390 | } | ||
1391 | |||
1392 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | ||
1393 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1394 | |||
1395 | /* | ||
1396 | * Note: After sending out INIT ACK with the State Cookie parameter, | ||
1397 | * "Z" MUST NOT allocate any resources for this new association. | ||
1398 | * Otherwise, "Z" will be vulnerable to resource attacks. | ||
1399 | */ | ||
1400 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
1401 | retval = SCTP_DISPOSITION_CONSUME; | ||
1402 | |||
1403 | cleanup: | ||
1404 | if (err_chunk) | ||
1405 | sctp_chunk_free(err_chunk); | ||
1406 | return retval; | ||
1407 | nomem: | ||
1408 | retval = SCTP_DISPOSITION_NOMEM; | ||
1409 | goto cleanup; | ||
1410 | nomem_init: | ||
1411 | cleanup_asoc: | ||
1412 | sctp_association_free(new_asoc); | ||
1413 | goto cleanup; | ||
1414 | } | ||
1415 | |||
1416 | /* | ||
1417 | * Handle simultanous INIT. | ||
1418 | * This means we started an INIT and then we got an INIT request from | ||
1419 | * our peer. | ||
1420 | * | ||
1421 | * Section: 5.2.1 INIT received in COOKIE-WAIT or COOKIE-ECHOED State (Item B) | ||
1422 | * This usually indicates an initialization collision, i.e., each | ||
1423 | * endpoint is attempting, at about the same time, to establish an | ||
1424 | * association with the other endpoint. | ||
1425 | * | ||
1426 | * Upon receipt of an INIT in the COOKIE-WAIT or COOKIE-ECHOED state, an | ||
1427 | * endpoint MUST respond with an INIT ACK using the same parameters it | ||
1428 | * sent in its original INIT chunk (including its Verification Tag, | ||
1429 | * unchanged). These original parameters are combined with those from the | ||
1430 | * newly received INIT chunk. The endpoint shall also generate a State | ||
1431 | * Cookie with the INIT ACK. The endpoint uses the parameters sent in its | ||
1432 | * INIT to calculate the State Cookie. | ||
1433 | * | ||
1434 | * After that, the endpoint MUST NOT change its state, the T1-init | ||
1435 | * timer shall be left running and the corresponding TCB MUST NOT be | ||
1436 | * destroyed. The normal procedures for handling State Cookies when | ||
1437 | * a TCB exists will resolve the duplicate INITs to a single association. | ||
1438 | * | ||
1439 | * For an endpoint that is in the COOKIE-ECHOED state it MUST populate | ||
1440 | * its Tie-Tags with the Tag information of itself and its peer (see | ||
1441 | * section 5.2.2 for a description of the Tie-Tags). | ||
1442 | * | ||
1443 | * Verification Tag: Not explicit, but an INIT can not have a valid | ||
1444 | * verification tag, so we skip the check. | ||
1445 | * | ||
1446 | * Inputs | ||
1447 | * (endpoint, asoc, chunk) | ||
1448 | * | ||
1449 | * Outputs | ||
1450 | * (asoc, reply_msg, msg_up, timers, counters) | ||
1451 | * | ||
1452 | * The return value is the disposition of the chunk. | ||
1453 | */ | ||
1454 | sctp_disposition_t sctp_sf_do_5_2_1_siminit(const struct sctp_endpoint *ep, | ||
1455 | const struct sctp_association *asoc, | ||
1456 | const sctp_subtype_t type, | ||
1457 | void *arg, | ||
1458 | sctp_cmd_seq_t *commands) | ||
1459 | { | ||
1460 | /* Call helper to do the real work for both simulataneous and | ||
1461 | * duplicate INIT chunk handling. | ||
1462 | */ | ||
1463 | return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands); | ||
1464 | } | ||
1465 | |||
1466 | /* | ||
1467 | * Handle duplicated INIT messages. These are usually delayed | ||
1468 | * restransmissions. | ||
1469 | * | ||
1470 | * Section: 5.2.2 Unexpected INIT in States Other than CLOSED, | ||
1471 | * COOKIE-ECHOED and COOKIE-WAIT | ||
1472 | * | ||
1473 | * Unless otherwise stated, upon reception of an unexpected INIT for | ||
1474 | * this association, the endpoint shall generate an INIT ACK with a | ||
1475 | * State Cookie. In the outbound INIT ACK the endpoint MUST copy its | ||
1476 | * current Verification Tag and peer's Verification Tag into a reserved | ||
1477 | * place within the state cookie. We shall refer to these locations as | ||
1478 | * the Peer's-Tie-Tag and the Local-Tie-Tag. The outbound SCTP packet | ||
1479 | * containing this INIT ACK MUST carry a Verification Tag value equal to | ||
1480 | * the Initiation Tag found in the unexpected INIT. And the INIT ACK | ||
1481 | * MUST contain a new Initiation Tag (randomly generated see Section | ||
1482 | * 5.3.1). Other parameters for the endpoint SHOULD be copied from the | ||
1483 | * existing parameters of the association (e.g. number of outbound | ||
1484 | * streams) into the INIT ACK and cookie. | ||
1485 | * | ||
1486 | * After sending out the INIT ACK, the endpoint shall take no further | ||
1487 | * actions, i.e., the existing association, including its current state, | ||
1488 | * and the corresponding TCB MUST NOT be changed. | ||
1489 | * | ||
1490 | * Note: Only when a TCB exists and the association is not in a COOKIE- | ||
1491 | * WAIT state are the Tie-Tags populated. For a normal association INIT | ||
1492 | * (i.e. the endpoint is in a COOKIE-WAIT state), the Tie-Tags MUST be | ||
1493 | * set to 0 (indicating that no previous TCB existed). The INIT ACK and | ||
1494 | * State Cookie are populated as specified in section 5.2.1. | ||
1495 | * | ||
1496 | * Verification Tag: Not specified, but an INIT has no way of knowing | ||
1497 | * what the verification tag could be, so we ignore it. | ||
1498 | * | ||
1499 | * Inputs | ||
1500 | * (endpoint, asoc, chunk) | ||
1501 | * | ||
1502 | * Outputs | ||
1503 | * (asoc, reply_msg, msg_up, timers, counters) | ||
1504 | * | ||
1505 | * The return value is the disposition of the chunk. | ||
1506 | */ | ||
1507 | sctp_disposition_t sctp_sf_do_5_2_2_dupinit(const struct sctp_endpoint *ep, | ||
1508 | const struct sctp_association *asoc, | ||
1509 | const sctp_subtype_t type, | ||
1510 | void *arg, | ||
1511 | sctp_cmd_seq_t *commands) | ||
1512 | { | ||
1513 | /* Call helper to do the real work for both simulataneous and | ||
1514 | * duplicate INIT chunk handling. | ||
1515 | */ | ||
1516 | return sctp_sf_do_unexpected_init(ep, asoc, type, arg, commands); | ||
1517 | } | ||
1518 | |||
1519 | |||
1520 | |||
1521 | /* Unexpected COOKIE-ECHO handler for peer restart (Table 2, action 'A') | ||
1522 | * | ||
1523 | * Section 5.2.4 | ||
1524 | * A) In this case, the peer may have restarted. | ||
1525 | */ | ||
1526 | static sctp_disposition_t sctp_sf_do_dupcook_a(const struct sctp_endpoint *ep, | ||
1527 | const struct sctp_association *asoc, | ||
1528 | struct sctp_chunk *chunk, | ||
1529 | sctp_cmd_seq_t *commands, | ||
1530 | struct sctp_association *new_asoc) | ||
1531 | { | ||
1532 | sctp_init_chunk_t *peer_init; | ||
1533 | struct sctp_ulpevent *ev; | ||
1534 | struct sctp_chunk *repl; | ||
1535 | struct sctp_chunk *err; | ||
1536 | sctp_disposition_t disposition; | ||
1537 | |||
1538 | /* new_asoc is a brand-new association, so these are not yet | ||
1539 | * side effects--it is safe to run them here. | ||
1540 | */ | ||
1541 | peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; | ||
1542 | |||
1543 | if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, | ||
1544 | sctp_source(chunk), peer_init, | ||
1545 | GFP_ATOMIC)) | ||
1546 | goto nomem; | ||
1547 | |||
1548 | /* Make sure no new addresses are being added during the | ||
1549 | * restart. Though this is a pretty complicated attack | ||
1550 | * since you'd have to get inside the cookie. | ||
1551 | */ | ||
1552 | if (!sctp_sf_check_restart_addrs(new_asoc, asoc, chunk, commands)) { | ||
1553 | return SCTP_DISPOSITION_CONSUME; | ||
1554 | } | ||
1555 | |||
1556 | /* If the endpoint is in the SHUTDOWN-ACK-SENT state and recognizes | ||
1557 | * the peer has restarted (Action A), it MUST NOT setup a new | ||
1558 | * association but instead resend the SHUTDOWN ACK and send an ERROR | ||
1559 | * chunk with a "Cookie Received while Shutting Down" error cause to | ||
1560 | * its peer. | ||
1561 | */ | ||
1562 | if (sctp_state(asoc, SHUTDOWN_ACK_SENT)) { | ||
1563 | disposition = sctp_sf_do_9_2_reshutack(ep, asoc, | ||
1564 | SCTP_ST_CHUNK(chunk->chunk_hdr->type), | ||
1565 | chunk, commands); | ||
1566 | if (SCTP_DISPOSITION_NOMEM == disposition) | ||
1567 | goto nomem; | ||
1568 | |||
1569 | err = sctp_make_op_error(asoc, chunk, | ||
1570 | SCTP_ERROR_COOKIE_IN_SHUTDOWN, | ||
1571 | NULL, 0); | ||
1572 | if (err) | ||
1573 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1574 | SCTP_CHUNK(err)); | ||
1575 | |||
1576 | return SCTP_DISPOSITION_CONSUME; | ||
1577 | } | ||
1578 | |||
1579 | /* For now, fail any unsent/unacked data. Consider the optional | ||
1580 | * choice of resending of this data. | ||
1581 | */ | ||
1582 | sctp_add_cmd_sf(commands, SCTP_CMD_PURGE_OUTQUEUE, SCTP_NULL()); | ||
1583 | |||
1584 | /* Update the content of current association. */ | ||
1585 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); | ||
1586 | |||
1587 | repl = sctp_make_cookie_ack(new_asoc, chunk); | ||
1588 | if (!repl) | ||
1589 | goto nomem; | ||
1590 | |||
1591 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1592 | |||
1593 | /* Report association restart to upper layer. */ | ||
1594 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_RESTART, 0, | ||
1595 | new_asoc->c.sinit_num_ostreams, | ||
1596 | new_asoc->c.sinit_max_instreams, | ||
1597 | GFP_ATOMIC); | ||
1598 | if (!ev) | ||
1599 | goto nomem_ev; | ||
1600 | |||
1601 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
1602 | return SCTP_DISPOSITION_CONSUME; | ||
1603 | |||
1604 | nomem_ev: | ||
1605 | sctp_chunk_free(repl); | ||
1606 | nomem: | ||
1607 | return SCTP_DISPOSITION_NOMEM; | ||
1608 | } | ||
1609 | |||
1610 | /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'B') | ||
1611 | * | ||
1612 | * Section 5.2.4 | ||
1613 | * B) In this case, both sides may be attempting to start an association | ||
1614 | * at about the same time but the peer endpoint started its INIT | ||
1615 | * after responding to the local endpoint's INIT | ||
1616 | */ | ||
1617 | /* This case represents an initialization collision. */ | ||
1618 | static sctp_disposition_t sctp_sf_do_dupcook_b(const struct sctp_endpoint *ep, | ||
1619 | const struct sctp_association *asoc, | ||
1620 | struct sctp_chunk *chunk, | ||
1621 | sctp_cmd_seq_t *commands, | ||
1622 | struct sctp_association *new_asoc) | ||
1623 | { | ||
1624 | sctp_init_chunk_t *peer_init; | ||
1625 | struct sctp_ulpevent *ev; | ||
1626 | struct sctp_chunk *repl; | ||
1627 | |||
1628 | /* new_asoc is a brand-new association, so these are not yet | ||
1629 | * side effects--it is safe to run them here. | ||
1630 | */ | ||
1631 | peer_init = &chunk->subh.cookie_hdr->c.peer_init[0]; | ||
1632 | if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type, | ||
1633 | sctp_source(chunk), peer_init, | ||
1634 | GFP_ATOMIC)) | ||
1635 | goto nomem; | ||
1636 | |||
1637 | /* Update the content of current association. */ | ||
1638 | sctp_add_cmd_sf(commands, SCTP_CMD_UPDATE_ASSOC, SCTP_ASOC(new_asoc)); | ||
1639 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
1640 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | ||
1641 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | ||
1642 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL()); | ||
1643 | |||
1644 | repl = sctp_make_cookie_ack(new_asoc, chunk); | ||
1645 | if (!repl) | ||
1646 | goto nomem; | ||
1647 | |||
1648 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1649 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1650 | |||
1651 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
1652 | * | ||
1653 | * D) IMPLEMENTATION NOTE: An implementation may choose to | ||
1654 | * send the Communication Up notification to the SCTP user | ||
1655 | * upon reception of a valid COOKIE ECHO chunk. | ||
1656 | */ | ||
1657 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_UP, 0, | ||
1658 | new_asoc->c.sinit_num_ostreams, | ||
1659 | new_asoc->c.sinit_max_instreams, | ||
1660 | GFP_ATOMIC); | ||
1661 | if (!ev) | ||
1662 | goto nomem_ev; | ||
1663 | |||
1664 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
1665 | |||
1666 | /* Sockets API Draft Section 5.3.1.6 | ||
1667 | * When a peer sends a Adaption Layer Indication parameter , SCTP | ||
1668 | * delivers this notification to inform the application that of the | ||
1669 | * peers requested adaption layer. | ||
1670 | */ | ||
1671 | if (asoc->peer.adaption_ind) { | ||
1672 | ev = sctp_ulpevent_make_adaption_indication(asoc, GFP_ATOMIC); | ||
1673 | if (!ev) | ||
1674 | goto nomem_ev; | ||
1675 | |||
1676 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
1677 | SCTP_ULPEVENT(ev)); | ||
1678 | } | ||
1679 | |||
1680 | return SCTP_DISPOSITION_CONSUME; | ||
1681 | |||
1682 | nomem_ev: | ||
1683 | sctp_chunk_free(repl); | ||
1684 | nomem: | ||
1685 | return SCTP_DISPOSITION_NOMEM; | ||
1686 | } | ||
1687 | |||
1688 | /* Unexpected COOKIE-ECHO handler for setup collision (Table 2, action 'C') | ||
1689 | * | ||
1690 | * Section 5.2.4 | ||
1691 | * C) In this case, the local endpoint's cookie has arrived late. | ||
1692 | * Before it arrived, the local endpoint sent an INIT and received an | ||
1693 | * INIT-ACK and finally sent a COOKIE ECHO with the peer's same tag | ||
1694 | * but a new tag of its own. | ||
1695 | */ | ||
1696 | /* This case represents an initialization collision. */ | ||
1697 | static sctp_disposition_t sctp_sf_do_dupcook_c(const struct sctp_endpoint *ep, | ||
1698 | const struct sctp_association *asoc, | ||
1699 | struct sctp_chunk *chunk, | ||
1700 | sctp_cmd_seq_t *commands, | ||
1701 | struct sctp_association *new_asoc) | ||
1702 | { | ||
1703 | /* The cookie should be silently discarded. | ||
1704 | * The endpoint SHOULD NOT change states and should leave | ||
1705 | * any timers running. | ||
1706 | */ | ||
1707 | return SCTP_DISPOSITION_DISCARD; | ||
1708 | } | ||
1709 | |||
1710 | /* Unexpected COOKIE-ECHO handler lost chunk (Table 2, action 'D') | ||
1711 | * | ||
1712 | * Section 5.2.4 | ||
1713 | * | ||
1714 | * D) When both local and remote tags match the endpoint should always | ||
1715 | * enter the ESTABLISHED state, if it has not already done so. | ||
1716 | */ | ||
1717 | /* This case represents an initialization collision. */ | ||
1718 | static sctp_disposition_t sctp_sf_do_dupcook_d(const struct sctp_endpoint *ep, | ||
1719 | const struct sctp_association *asoc, | ||
1720 | struct sctp_chunk *chunk, | ||
1721 | sctp_cmd_seq_t *commands, | ||
1722 | struct sctp_association *new_asoc) | ||
1723 | { | ||
1724 | struct sctp_ulpevent *ev = NULL; | ||
1725 | struct sctp_chunk *repl; | ||
1726 | |||
1727 | /* Clarification from Implementor's Guide: | ||
1728 | * D) When both local and remote tags match the endpoint should | ||
1729 | * enter the ESTABLISHED state, if it is in the COOKIE-ECHOED state. | ||
1730 | * It should stop any cookie timer that may be running and send | ||
1731 | * a COOKIE ACK. | ||
1732 | */ | ||
1733 | |||
1734 | /* Don't accidentally move back into established state. */ | ||
1735 | if (asoc->state < SCTP_STATE_ESTABLISHED) { | ||
1736 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
1737 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); | ||
1738 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
1739 | SCTP_STATE(SCTP_STATE_ESTABLISHED)); | ||
1740 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | ||
1741 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, | ||
1742 | SCTP_NULL()); | ||
1743 | |||
1744 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
1745 | * | ||
1746 | * D) IMPLEMENTATION NOTE: An implementation may choose | ||
1747 | * to send the Communication Up notification to the | ||
1748 | * SCTP user upon reception of a valid COOKIE | ||
1749 | * ECHO chunk. | ||
1750 | */ | ||
1751 | ev = sctp_ulpevent_make_assoc_change(new_asoc, 0, | ||
1752 | SCTP_COMM_UP, 0, | ||
1753 | new_asoc->c.sinit_num_ostreams, | ||
1754 | new_asoc->c.sinit_max_instreams, | ||
1755 | GFP_ATOMIC); | ||
1756 | if (!ev) | ||
1757 | goto nomem; | ||
1758 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
1759 | SCTP_ULPEVENT(ev)); | ||
1760 | |||
1761 | /* Sockets API Draft Section 5.3.1.6 | ||
1762 | * When a peer sends a Adaption Layer Indication parameter, | ||
1763 | * SCTP delivers this notification to inform the application | ||
1764 | * that of the peers requested adaption layer. | ||
1765 | */ | ||
1766 | if (new_asoc->peer.adaption_ind) { | ||
1767 | ev = sctp_ulpevent_make_adaption_indication(new_asoc, | ||
1768 | GFP_ATOMIC); | ||
1769 | if (!ev) | ||
1770 | goto nomem; | ||
1771 | |||
1772 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
1773 | SCTP_ULPEVENT(ev)); | ||
1774 | } | ||
1775 | } | ||
1776 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1777 | |||
1778 | repl = sctp_make_cookie_ack(new_asoc, chunk); | ||
1779 | if (!repl) | ||
1780 | goto nomem; | ||
1781 | |||
1782 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
1783 | sctp_add_cmd_sf(commands, SCTP_CMD_TRANSMIT, SCTP_NULL()); | ||
1784 | |||
1785 | return SCTP_DISPOSITION_CONSUME; | ||
1786 | |||
1787 | nomem: | ||
1788 | if (ev) | ||
1789 | sctp_ulpevent_free(ev); | ||
1790 | return SCTP_DISPOSITION_NOMEM; | ||
1791 | } | ||
1792 | |||
1793 | /* | ||
1794 | * Handle a duplicate COOKIE-ECHO. This usually means a cookie-carrying | ||
1795 | * chunk was retransmitted and then delayed in the network. | ||
1796 | * | ||
1797 | * Section: 5.2.4 Handle a COOKIE ECHO when a TCB exists | ||
1798 | * | ||
1799 | * Verification Tag: None. Do cookie validation. | ||
1800 | * | ||
1801 | * Inputs | ||
1802 | * (endpoint, asoc, chunk) | ||
1803 | * | ||
1804 | * Outputs | ||
1805 | * (asoc, reply_msg, msg_up, timers, counters) | ||
1806 | * | ||
1807 | * The return value is the disposition of the chunk. | ||
1808 | */ | ||
1809 | sctp_disposition_t sctp_sf_do_5_2_4_dupcook(const struct sctp_endpoint *ep, | ||
1810 | const struct sctp_association *asoc, | ||
1811 | const sctp_subtype_t type, | ||
1812 | void *arg, | ||
1813 | sctp_cmd_seq_t *commands) | ||
1814 | { | ||
1815 | sctp_disposition_t retval; | ||
1816 | struct sctp_chunk *chunk = arg; | ||
1817 | struct sctp_association *new_asoc; | ||
1818 | int error = 0; | ||
1819 | char action; | ||
1820 | struct sctp_chunk *err_chk_p; | ||
1821 | |||
1822 | /* Make sure that the chunk has a valid length from the protocol | ||
1823 | * perspective. In this case check to make sure we have at least | ||
1824 | * enough for the chunk header. Cookie length verification is | ||
1825 | * done later. | ||
1826 | */ | ||
1827 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
1828 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
1829 | commands); | ||
1830 | |||
1831 | /* "Decode" the chunk. We have no optional parameters so we | ||
1832 | * are in good shape. | ||
1833 | */ | ||
1834 | chunk->subh.cookie_hdr = (struct sctp_signed_cookie *)chunk->skb->data; | ||
1835 | skb_pull(chunk->skb, ntohs(chunk->chunk_hdr->length) - | ||
1836 | sizeof(sctp_chunkhdr_t)); | ||
1837 | |||
1838 | /* In RFC 2960 5.2.4 3, if both Verification Tags in the State Cookie | ||
1839 | * of a duplicate COOKIE ECHO match the Verification Tags of the | ||
1840 | * current association, consider the State Cookie valid even if | ||
1841 | * the lifespan is exceeded. | ||
1842 | */ | ||
1843 | new_asoc = sctp_unpack_cookie(ep, asoc, chunk, GFP_ATOMIC, &error, | ||
1844 | &err_chk_p); | ||
1845 | |||
1846 | /* FIXME: | ||
1847 | * If the re-build failed, what is the proper error path | ||
1848 | * from here? | ||
1849 | * | ||
1850 | * [We should abort the association. --piggy] | ||
1851 | */ | ||
1852 | if (!new_asoc) { | ||
1853 | /* FIXME: Several errors are possible. A bad cookie should | ||
1854 | * be silently discarded, but think about logging it too. | ||
1855 | */ | ||
1856 | switch (error) { | ||
1857 | case -SCTP_IERROR_NOMEM: | ||
1858 | goto nomem; | ||
1859 | |||
1860 | case -SCTP_IERROR_STALE_COOKIE: | ||
1861 | sctp_send_stale_cookie_err(ep, asoc, chunk, commands, | ||
1862 | err_chk_p); | ||
1863 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1864 | case -SCTP_IERROR_BAD_SIG: | ||
1865 | default: | ||
1866 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1867 | }; | ||
1868 | } | ||
1869 | |||
1870 | /* Compare the tie_tag in cookie with the verification tag of | ||
1871 | * current association. | ||
1872 | */ | ||
1873 | action = sctp_tietags_compare(new_asoc, asoc); | ||
1874 | |||
1875 | switch (action) { | ||
1876 | case 'A': /* Association restart. */ | ||
1877 | retval = sctp_sf_do_dupcook_a(ep, asoc, chunk, commands, | ||
1878 | new_asoc); | ||
1879 | break; | ||
1880 | |||
1881 | case 'B': /* Collision case B. */ | ||
1882 | retval = sctp_sf_do_dupcook_b(ep, asoc, chunk, commands, | ||
1883 | new_asoc); | ||
1884 | break; | ||
1885 | |||
1886 | case 'C': /* Collision case C. */ | ||
1887 | retval = sctp_sf_do_dupcook_c(ep, asoc, chunk, commands, | ||
1888 | new_asoc); | ||
1889 | break; | ||
1890 | |||
1891 | case 'D': /* Collision case D. */ | ||
1892 | retval = sctp_sf_do_dupcook_d(ep, asoc, chunk, commands, | ||
1893 | new_asoc); | ||
1894 | break; | ||
1895 | |||
1896 | default: /* Discard packet for all others. */ | ||
1897 | retval = sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1898 | break; | ||
1899 | }; | ||
1900 | |||
1901 | /* Delete the tempory new association. */ | ||
1902 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); | ||
1903 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
1904 | |||
1905 | return retval; | ||
1906 | |||
1907 | nomem: | ||
1908 | return SCTP_DISPOSITION_NOMEM; | ||
1909 | } | ||
1910 | |||
1911 | /* | ||
1912 | * Process an ABORT. (SHUTDOWN-PENDING state) | ||
1913 | * | ||
1914 | * See sctp_sf_do_9_1_abort(). | ||
1915 | */ | ||
1916 | sctp_disposition_t sctp_sf_shutdown_pending_abort( | ||
1917 | const struct sctp_endpoint *ep, | ||
1918 | const struct sctp_association *asoc, | ||
1919 | const sctp_subtype_t type, | ||
1920 | void *arg, | ||
1921 | sctp_cmd_seq_t *commands) | ||
1922 | { | ||
1923 | struct sctp_chunk *chunk = arg; | ||
1924 | |||
1925 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
1926 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1927 | |||
1928 | /* Make sure that the ABORT chunk has a valid length. | ||
1929 | * Since this is an ABORT chunk, we have to discard it | ||
1930 | * because of the following text: | ||
1931 | * RFC 2960, Section 3.3.7 | ||
1932 | * If an endpoint receives an ABORT with a format error or for an | ||
1933 | * association that doesn't exist, it MUST silently discard it. | ||
1934 | * Becasue the length is "invalid", we can't really discard just | ||
1935 | * as we do not know its true length. So, to be safe, discard the | ||
1936 | * packet. | ||
1937 | */ | ||
1938 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) | ||
1939 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1940 | |||
1941 | /* Stop the T5-shutdown guard timer. */ | ||
1942 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
1943 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
1944 | |||
1945 | return sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); | ||
1946 | } | ||
1947 | |||
1948 | /* | ||
1949 | * Process an ABORT. (SHUTDOWN-SENT state) | ||
1950 | * | ||
1951 | * See sctp_sf_do_9_1_abort(). | ||
1952 | */ | ||
1953 | sctp_disposition_t sctp_sf_shutdown_sent_abort(const struct sctp_endpoint *ep, | ||
1954 | const struct sctp_association *asoc, | ||
1955 | const sctp_subtype_t type, | ||
1956 | void *arg, | ||
1957 | sctp_cmd_seq_t *commands) | ||
1958 | { | ||
1959 | struct sctp_chunk *chunk = arg; | ||
1960 | |||
1961 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
1962 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1963 | |||
1964 | /* Make sure that the ABORT chunk has a valid length. | ||
1965 | * Since this is an ABORT chunk, we have to discard it | ||
1966 | * because of the following text: | ||
1967 | * RFC 2960, Section 3.3.7 | ||
1968 | * If an endpoint receives an ABORT with a format error or for an | ||
1969 | * association that doesn't exist, it MUST silently discard it. | ||
1970 | * Becasue the length is "invalid", we can't really discard just | ||
1971 | * as we do not know its true length. So, to be safe, discard the | ||
1972 | * packet. | ||
1973 | */ | ||
1974 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) | ||
1975 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
1976 | |||
1977 | /* Stop the T2-shutdown timer. */ | ||
1978 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
1979 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
1980 | |||
1981 | /* Stop the T5-shutdown guard timer. */ | ||
1982 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
1983 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
1984 | |||
1985 | return sctp_sf_do_9_1_abort(ep, asoc, type, arg, commands); | ||
1986 | } | ||
1987 | |||
1988 | /* | ||
1989 | * Process an ABORT. (SHUTDOWN-ACK-SENT state) | ||
1990 | * | ||
1991 | * See sctp_sf_do_9_1_abort(). | ||
1992 | */ | ||
1993 | sctp_disposition_t sctp_sf_shutdown_ack_sent_abort( | ||
1994 | const struct sctp_endpoint *ep, | ||
1995 | const struct sctp_association *asoc, | ||
1996 | const sctp_subtype_t type, | ||
1997 | void *arg, | ||
1998 | sctp_cmd_seq_t *commands) | ||
1999 | { | ||
2000 | /* The same T2 timer, so we should be able to use | ||
2001 | * common function with the SHUTDOWN-SENT state. | ||
2002 | */ | ||
2003 | return sctp_sf_shutdown_sent_abort(ep, asoc, type, arg, commands); | ||
2004 | } | ||
2005 | |||
2006 | /* | ||
2007 | * Handle an Error received in COOKIE_ECHOED state. | ||
2008 | * | ||
2009 | * Only handle the error type of stale COOKIE Error, the other errors will | ||
2010 | * be ignored. | ||
2011 | * | ||
2012 | * Inputs | ||
2013 | * (endpoint, asoc, chunk) | ||
2014 | * | ||
2015 | * Outputs | ||
2016 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2017 | * | ||
2018 | * The return value is the disposition of the chunk. | ||
2019 | */ | ||
2020 | sctp_disposition_t sctp_sf_cookie_echoed_err(const struct sctp_endpoint *ep, | ||
2021 | const struct sctp_association *asoc, | ||
2022 | const sctp_subtype_t type, | ||
2023 | void *arg, | ||
2024 | sctp_cmd_seq_t *commands) | ||
2025 | { | ||
2026 | struct sctp_chunk *chunk = arg; | ||
2027 | sctp_errhdr_t *err; | ||
2028 | |||
2029 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2030 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2031 | |||
2032 | /* Make sure that the ERROR chunk has a valid length. | ||
2033 | * The parameter walking depends on this as well. | ||
2034 | */ | ||
2035 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) | ||
2036 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2037 | commands); | ||
2038 | |||
2039 | /* Process the error here */ | ||
2040 | /* FUTURE FIXME: When PR-SCTP related and other optional | ||
2041 | * parms are emitted, this will have to change to handle multiple | ||
2042 | * errors. | ||
2043 | */ | ||
2044 | sctp_walk_errors(err, chunk->chunk_hdr) { | ||
2045 | if (SCTP_ERROR_STALE_COOKIE == err->cause) | ||
2046 | return sctp_sf_do_5_2_6_stale(ep, asoc, type, | ||
2047 | arg, commands); | ||
2048 | } | ||
2049 | |||
2050 | /* It is possible to have malformed error causes, and that | ||
2051 | * will cause us to end the walk early. However, since | ||
2052 | * we are discarding the packet, there should be no adverse | ||
2053 | * affects. | ||
2054 | */ | ||
2055 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2056 | } | ||
2057 | |||
2058 | /* | ||
2059 | * Handle a Stale COOKIE Error | ||
2060 | * | ||
2061 | * Section: 5.2.6 Handle Stale COOKIE Error | ||
2062 | * If the association is in the COOKIE-ECHOED state, the endpoint may elect | ||
2063 | * one of the following three alternatives. | ||
2064 | * ... | ||
2065 | * 3) Send a new INIT chunk to the endpoint, adding a Cookie | ||
2066 | * Preservative parameter requesting an extension to the lifetime of | ||
2067 | * the State Cookie. When calculating the time extension, an | ||
2068 | * implementation SHOULD use the RTT information measured based on the | ||
2069 | * previous COOKIE ECHO / ERROR exchange, and should add no more | ||
2070 | * than 1 second beyond the measured RTT, due to long State Cookie | ||
2071 | * lifetimes making the endpoint more subject to a replay attack. | ||
2072 | * | ||
2073 | * Verification Tag: Not explicit, but safe to ignore. | ||
2074 | * | ||
2075 | * Inputs | ||
2076 | * (endpoint, asoc, chunk) | ||
2077 | * | ||
2078 | * Outputs | ||
2079 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2080 | * | ||
2081 | * The return value is the disposition of the chunk. | ||
2082 | */ | ||
2083 | static sctp_disposition_t sctp_sf_do_5_2_6_stale(const struct sctp_endpoint *ep, | ||
2084 | const struct sctp_association *asoc, | ||
2085 | const sctp_subtype_t type, | ||
2086 | void *arg, | ||
2087 | sctp_cmd_seq_t *commands) | ||
2088 | { | ||
2089 | struct sctp_chunk *chunk = arg; | ||
2090 | time_t stale; | ||
2091 | sctp_cookie_preserve_param_t bht; | ||
2092 | sctp_errhdr_t *err; | ||
2093 | struct sctp_chunk *reply; | ||
2094 | struct sctp_bind_addr *bp; | ||
2095 | int attempts; | ||
2096 | |||
2097 | attempts = asoc->counters[SCTP_COUNTER_INIT_ERROR] + 1; | ||
2098 | |||
2099 | if (attempts >= asoc->max_init_attempts) { | ||
2100 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | ||
2101 | SCTP_U32(SCTP_ERROR_STALE_COOKIE)); | ||
2102 | return SCTP_DISPOSITION_DELETE_TCB; | ||
2103 | } | ||
2104 | |||
2105 | err = (sctp_errhdr_t *)(chunk->skb->data); | ||
2106 | |||
2107 | /* When calculating the time extension, an implementation | ||
2108 | * SHOULD use the RTT information measured based on the | ||
2109 | * previous COOKIE ECHO / ERROR exchange, and should add no | ||
2110 | * more than 1 second beyond the measured RTT, due to long | ||
2111 | * State Cookie lifetimes making the endpoint more subject to | ||
2112 | * a replay attack. | ||
2113 | * Measure of Staleness's unit is usec. (1/1000000 sec) | ||
2114 | * Suggested Cookie Life-span Increment's unit is msec. | ||
2115 | * (1/1000 sec) | ||
2116 | * In general, if you use the suggested cookie life, the value | ||
2117 | * found in the field of measure of staleness should be doubled | ||
2118 | * to give ample time to retransmit the new cookie and thus | ||
2119 | * yield a higher probability of success on the reattempt. | ||
2120 | */ | ||
2121 | stale = ntohl(*(suseconds_t *)((u8 *)err + sizeof(sctp_errhdr_t))); | ||
2122 | stale = (stale * 2) / 1000; | ||
2123 | |||
2124 | bht.param_hdr.type = SCTP_PARAM_COOKIE_PRESERVATIVE; | ||
2125 | bht.param_hdr.length = htons(sizeof(bht)); | ||
2126 | bht.lifespan_increment = htonl(stale); | ||
2127 | |||
2128 | /* Build that new INIT chunk. */ | ||
2129 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; | ||
2130 | reply = sctp_make_init(asoc, bp, GFP_ATOMIC, sizeof(bht)); | ||
2131 | if (!reply) | ||
2132 | goto nomem; | ||
2133 | |||
2134 | sctp_addto_chunk(reply, sizeof(bht), &bht); | ||
2135 | |||
2136 | /* Clear peer's init_tag cached in assoc as we are sending a new INIT */ | ||
2137 | sctp_add_cmd_sf(commands, SCTP_CMD_CLEAR_INIT_TAG, SCTP_NULL()); | ||
2138 | |||
2139 | /* Stop pending T3-rtx and heartbeat timers */ | ||
2140 | sctp_add_cmd_sf(commands, SCTP_CMD_T3_RTX_TIMERS_STOP, SCTP_NULL()); | ||
2141 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); | ||
2142 | |||
2143 | /* Delete non-primary peer ip addresses since we are transitioning | ||
2144 | * back to the COOKIE-WAIT state | ||
2145 | */ | ||
2146 | sctp_add_cmd_sf(commands, SCTP_CMD_DEL_NON_PRIMARY, SCTP_NULL()); | ||
2147 | |||
2148 | /* If we've sent any data bundled with COOKIE-ECHO we will need to | ||
2149 | * resend | ||
2150 | */ | ||
2151 | sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, | ||
2152 | SCTP_TRANSPORT(asoc->peer.primary_path)); | ||
2153 | |||
2154 | /* Cast away the const modifier, as we want to just | ||
2155 | * rerun it through as a sideffect. | ||
2156 | */ | ||
2157 | sctp_add_cmd_sf(commands, SCTP_CMD_COUNTER_INC, | ||
2158 | SCTP_COUNTER(SCTP_COUNTER_INIT_ERROR)); | ||
2159 | |||
2160 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
2161 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_COOKIE)); | ||
2162 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
2163 | SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); | ||
2164 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
2165 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
2166 | |||
2167 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
2168 | |||
2169 | return SCTP_DISPOSITION_CONSUME; | ||
2170 | |||
2171 | nomem: | ||
2172 | return SCTP_DISPOSITION_NOMEM; | ||
2173 | } | ||
2174 | |||
2175 | /* | ||
2176 | * Process an ABORT. | ||
2177 | * | ||
2178 | * Section: 9.1 | ||
2179 | * After checking the Verification Tag, the receiving endpoint shall | ||
2180 | * remove the association from its record, and shall report the | ||
2181 | * termination to its upper layer. | ||
2182 | * | ||
2183 | * Verification Tag: 8.5.1 Exceptions in Verification Tag Rules | ||
2184 | * B) Rules for packet carrying ABORT: | ||
2185 | * | ||
2186 | * - The endpoint shall always fill in the Verification Tag field of the | ||
2187 | * outbound packet with the destination endpoint's tag value if it | ||
2188 | * is known. | ||
2189 | * | ||
2190 | * - If the ABORT is sent in response to an OOTB packet, the endpoint | ||
2191 | * MUST follow the procedure described in Section 8.4. | ||
2192 | * | ||
2193 | * - The receiver MUST accept the packet if the Verification Tag | ||
2194 | * matches either its own tag, OR the tag of its peer. Otherwise, the | ||
2195 | * receiver MUST silently discard the packet and take no further | ||
2196 | * action. | ||
2197 | * | ||
2198 | * Inputs | ||
2199 | * (endpoint, asoc, chunk) | ||
2200 | * | ||
2201 | * Outputs | ||
2202 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2203 | * | ||
2204 | * The return value is the disposition of the chunk. | ||
2205 | */ | ||
2206 | sctp_disposition_t sctp_sf_do_9_1_abort(const struct sctp_endpoint *ep, | ||
2207 | const struct sctp_association *asoc, | ||
2208 | const sctp_subtype_t type, | ||
2209 | void *arg, | ||
2210 | sctp_cmd_seq_t *commands) | ||
2211 | { | ||
2212 | struct sctp_chunk *chunk = arg; | ||
2213 | unsigned len; | ||
2214 | __u16 error = SCTP_ERROR_NO_ERROR; | ||
2215 | |||
2216 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
2217 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2218 | |||
2219 | /* Make sure that the ABORT chunk has a valid length. | ||
2220 | * Since this is an ABORT chunk, we have to discard it | ||
2221 | * because of the following text: | ||
2222 | * RFC 2960, Section 3.3.7 | ||
2223 | * If an endpoint receives an ABORT with a format error or for an | ||
2224 | * association that doesn't exist, it MUST silently discard it. | ||
2225 | * Becasue the length is "invalid", we can't really discard just | ||
2226 | * as we do not know its true length. So, to be safe, discard the | ||
2227 | * packet. | ||
2228 | */ | ||
2229 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) | ||
2230 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2231 | |||
2232 | /* See if we have an error cause code in the chunk. */ | ||
2233 | len = ntohs(chunk->chunk_hdr->length); | ||
2234 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) | ||
2235 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; | ||
2236 | |||
2237 | /* ASSOC_FAILED will DELETE_TCB. */ | ||
2238 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, SCTP_U32(error)); | ||
2239 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
2240 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
2241 | |||
2242 | return SCTP_DISPOSITION_ABORT; | ||
2243 | } | ||
2244 | |||
2245 | /* | ||
2246 | * Process an ABORT. (COOKIE-WAIT state) | ||
2247 | * | ||
2248 | * See sctp_sf_do_9_1_abort() above. | ||
2249 | */ | ||
2250 | sctp_disposition_t sctp_sf_cookie_wait_abort(const struct sctp_endpoint *ep, | ||
2251 | const struct sctp_association *asoc, | ||
2252 | const sctp_subtype_t type, | ||
2253 | void *arg, | ||
2254 | sctp_cmd_seq_t *commands) | ||
2255 | { | ||
2256 | struct sctp_chunk *chunk = arg; | ||
2257 | unsigned len; | ||
2258 | __u16 error = SCTP_ERROR_NO_ERROR; | ||
2259 | |||
2260 | if (!sctp_vtag_verify_either(chunk, asoc)) | ||
2261 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2262 | |||
2263 | /* Make sure that the ABORT chunk has a valid length. | ||
2264 | * Since this is an ABORT chunk, we have to discard it | ||
2265 | * because of the following text: | ||
2266 | * RFC 2960, Section 3.3.7 | ||
2267 | * If an endpoint receives an ABORT with a format error or for an | ||
2268 | * association that doesn't exist, it MUST silently discard it. | ||
2269 | * Becasue the length is "invalid", we can't really discard just | ||
2270 | * as we do not know its true length. So, to be safe, discard the | ||
2271 | * packet. | ||
2272 | */ | ||
2273 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_abort_chunk_t))) | ||
2274 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2275 | |||
2276 | /* See if we have an error cause code in the chunk. */ | ||
2277 | len = ntohs(chunk->chunk_hdr->length); | ||
2278 | if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) | ||
2279 | error = ((sctp_errhdr_t *)chunk->skb->data)->cause; | ||
2280 | |||
2281 | sctp_stop_t1_and_abort(commands, error); | ||
2282 | return SCTP_DISPOSITION_ABORT; | ||
2283 | } | ||
2284 | |||
2285 | /* | ||
2286 | * Process an incoming ICMP as an ABORT. (COOKIE-WAIT state) | ||
2287 | */ | ||
2288 | sctp_disposition_t sctp_sf_cookie_wait_icmp_abort(const struct sctp_endpoint *ep, | ||
2289 | const struct sctp_association *asoc, | ||
2290 | const sctp_subtype_t type, | ||
2291 | void *arg, | ||
2292 | sctp_cmd_seq_t *commands) | ||
2293 | { | ||
2294 | sctp_stop_t1_and_abort(commands, SCTP_ERROR_NO_ERROR); | ||
2295 | return SCTP_DISPOSITION_ABORT; | ||
2296 | } | ||
2297 | |||
2298 | /* | ||
2299 | * Process an ABORT. (COOKIE-ECHOED state) | ||
2300 | */ | ||
2301 | sctp_disposition_t sctp_sf_cookie_echoed_abort(const struct sctp_endpoint *ep, | ||
2302 | const struct sctp_association *asoc, | ||
2303 | const sctp_subtype_t type, | ||
2304 | void *arg, | ||
2305 | sctp_cmd_seq_t *commands) | ||
2306 | { | ||
2307 | /* There is a single T1 timer, so we should be able to use | ||
2308 | * common function with the COOKIE-WAIT state. | ||
2309 | */ | ||
2310 | return sctp_sf_cookie_wait_abort(ep, asoc, type, arg, commands); | ||
2311 | } | ||
2312 | |||
2313 | /* | ||
2314 | * Stop T1 timer and abort association with "INIT failed". | ||
2315 | * | ||
2316 | * This is common code called by several sctp_sf_*_abort() functions above. | ||
2317 | */ | ||
2318 | void sctp_stop_t1_and_abort(sctp_cmd_seq_t *commands, __u16 error) | ||
2319 | { | ||
2320 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
2321 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
2322 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
2323 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
2324 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
2325 | /* CMD_INIT_FAILED will DELETE_TCB. */ | ||
2326 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | ||
2327 | SCTP_U32(error)); | ||
2328 | } | ||
2329 | |||
2330 | /* | ||
2331 | * sctp_sf_do_9_2_shut | ||
2332 | * | ||
2333 | * Section: 9.2 | ||
2334 | * Upon the reception of the SHUTDOWN, the peer endpoint shall | ||
2335 | * - enter the SHUTDOWN-RECEIVED state, | ||
2336 | * | ||
2337 | * - stop accepting new data from its SCTP user | ||
2338 | * | ||
2339 | * - verify, by checking the Cumulative TSN Ack field of the chunk, | ||
2340 | * that all its outstanding DATA chunks have been received by the | ||
2341 | * SHUTDOWN sender. | ||
2342 | * | ||
2343 | * Once an endpoint as reached the SHUTDOWN-RECEIVED state it MUST NOT | ||
2344 | * send a SHUTDOWN in response to a ULP request. And should discard | ||
2345 | * subsequent SHUTDOWN chunks. | ||
2346 | * | ||
2347 | * If there are still outstanding DATA chunks left, the SHUTDOWN | ||
2348 | * receiver shall continue to follow normal data transmission | ||
2349 | * procedures defined in Section 6 until all outstanding DATA chunks | ||
2350 | * are acknowledged; however, the SHUTDOWN receiver MUST NOT accept | ||
2351 | * new data from its SCTP user. | ||
2352 | * | ||
2353 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2354 | * | ||
2355 | * Inputs | ||
2356 | * (endpoint, asoc, chunk) | ||
2357 | * | ||
2358 | * Outputs | ||
2359 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2360 | * | ||
2361 | * The return value is the disposition of the chunk. | ||
2362 | */ | ||
2363 | sctp_disposition_t sctp_sf_do_9_2_shutdown(const struct sctp_endpoint *ep, | ||
2364 | const struct sctp_association *asoc, | ||
2365 | const sctp_subtype_t type, | ||
2366 | void *arg, | ||
2367 | sctp_cmd_seq_t *commands) | ||
2368 | { | ||
2369 | struct sctp_chunk *chunk = arg; | ||
2370 | sctp_shutdownhdr_t *sdh; | ||
2371 | sctp_disposition_t disposition; | ||
2372 | struct sctp_ulpevent *ev; | ||
2373 | |||
2374 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2375 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2376 | |||
2377 | /* Make sure that the SHUTDOWN chunk has a valid length. */ | ||
2378 | if (!sctp_chunk_length_valid(chunk, | ||
2379 | sizeof(struct sctp_shutdown_chunk_t))) | ||
2380 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2381 | commands); | ||
2382 | |||
2383 | /* Convert the elaborate header. */ | ||
2384 | sdh = (sctp_shutdownhdr_t *)chunk->skb->data; | ||
2385 | skb_pull(chunk->skb, sizeof(sctp_shutdownhdr_t)); | ||
2386 | chunk->subh.shutdown_hdr = sdh; | ||
2387 | |||
2388 | /* Upon the reception of the SHUTDOWN, the peer endpoint shall | ||
2389 | * - enter the SHUTDOWN-RECEIVED state, | ||
2390 | * - stop accepting new data from its SCTP user | ||
2391 | * | ||
2392 | * [This is implicit in the new state.] | ||
2393 | */ | ||
2394 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
2395 | SCTP_STATE(SCTP_STATE_SHUTDOWN_RECEIVED)); | ||
2396 | disposition = SCTP_DISPOSITION_CONSUME; | ||
2397 | |||
2398 | if (sctp_outq_is_empty(&asoc->outqueue)) { | ||
2399 | disposition = sctp_sf_do_9_2_shutdown_ack(ep, asoc, type, | ||
2400 | arg, commands); | ||
2401 | } | ||
2402 | |||
2403 | if (SCTP_DISPOSITION_NOMEM == disposition) | ||
2404 | goto out; | ||
2405 | |||
2406 | /* - verify, by checking the Cumulative TSN Ack field of the | ||
2407 | * chunk, that all its outstanding DATA chunks have been | ||
2408 | * received by the SHUTDOWN sender. | ||
2409 | */ | ||
2410 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_CTSN, | ||
2411 | SCTP_U32(chunk->subh.shutdown_hdr->cum_tsn_ack)); | ||
2412 | |||
2413 | /* API 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
2414 | * When a peer sends a SHUTDOWN, SCTP delivers this notification to | ||
2415 | * inform the application that it should cease sending data. | ||
2416 | */ | ||
2417 | ev = sctp_ulpevent_make_shutdown_event(asoc, 0, GFP_ATOMIC); | ||
2418 | if (!ev) { | ||
2419 | disposition = SCTP_DISPOSITION_NOMEM; | ||
2420 | goto out; | ||
2421 | } | ||
2422 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
2423 | |||
2424 | out: | ||
2425 | return disposition; | ||
2426 | } | ||
2427 | |||
2428 | /* RFC 2960 9.2 | ||
2429 | * If an endpoint is in SHUTDOWN-ACK-SENT state and receives an INIT chunk | ||
2430 | * (e.g., if the SHUTDOWN COMPLETE was lost) with source and destination | ||
2431 | * transport addresses (either in the IP addresses or in the INIT chunk) | ||
2432 | * that belong to this association, it should discard the INIT chunk and | ||
2433 | * retransmit the SHUTDOWN ACK chunk. | ||
2434 | */ | ||
2435 | sctp_disposition_t sctp_sf_do_9_2_reshutack(const struct sctp_endpoint *ep, | ||
2436 | const struct sctp_association *asoc, | ||
2437 | const sctp_subtype_t type, | ||
2438 | void *arg, | ||
2439 | sctp_cmd_seq_t *commands) | ||
2440 | { | ||
2441 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; | ||
2442 | struct sctp_chunk *reply; | ||
2443 | |||
2444 | /* Since we are not going to really process this INIT, there | ||
2445 | * is no point in verifying chunk boundries. Just generate | ||
2446 | * the SHUTDOWN ACK. | ||
2447 | */ | ||
2448 | reply = sctp_make_shutdown_ack(asoc, chunk); | ||
2449 | if (NULL == reply) | ||
2450 | goto nomem; | ||
2451 | |||
2452 | /* Set the transport for the SHUTDOWN ACK chunk and the timeout for | ||
2453 | * the T2-SHUTDOWN timer. | ||
2454 | */ | ||
2455 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); | ||
2456 | |||
2457 | /* and restart the T2-shutdown timer. */ | ||
2458 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
2459 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
2460 | |||
2461 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
2462 | |||
2463 | return SCTP_DISPOSITION_CONSUME; | ||
2464 | nomem: | ||
2465 | return SCTP_DISPOSITION_NOMEM; | ||
2466 | } | ||
2467 | |||
2468 | /* | ||
2469 | * sctp_sf_do_ecn_cwr | ||
2470 | * | ||
2471 | * Section: Appendix A: Explicit Congestion Notification | ||
2472 | * | ||
2473 | * CWR: | ||
2474 | * | ||
2475 | * RFC 2481 details a specific bit for a sender to send in the header of | ||
2476 | * its next outbound TCP segment to indicate to its peer that it has | ||
2477 | * reduced its congestion window. This is termed the CWR bit. For | ||
2478 | * SCTP the same indication is made by including the CWR chunk. | ||
2479 | * This chunk contains one data element, i.e. the TSN number that | ||
2480 | * was sent in the ECNE chunk. This element represents the lowest | ||
2481 | * TSN number in the datagram that was originally marked with the | ||
2482 | * CE bit. | ||
2483 | * | ||
2484 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2485 | * Inputs | ||
2486 | * (endpoint, asoc, chunk) | ||
2487 | * | ||
2488 | * Outputs | ||
2489 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2490 | * | ||
2491 | * The return value is the disposition of the chunk. | ||
2492 | */ | ||
2493 | sctp_disposition_t sctp_sf_do_ecn_cwr(const struct sctp_endpoint *ep, | ||
2494 | const struct sctp_association *asoc, | ||
2495 | const sctp_subtype_t type, | ||
2496 | void *arg, | ||
2497 | sctp_cmd_seq_t *commands) | ||
2498 | { | ||
2499 | sctp_cwrhdr_t *cwr; | ||
2500 | struct sctp_chunk *chunk = arg; | ||
2501 | |||
2502 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2503 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2504 | |||
2505 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) | ||
2506 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2507 | commands); | ||
2508 | |||
2509 | cwr = (sctp_cwrhdr_t *) chunk->skb->data; | ||
2510 | skb_pull(chunk->skb, sizeof(sctp_cwrhdr_t)); | ||
2511 | |||
2512 | cwr->lowest_tsn = ntohl(cwr->lowest_tsn); | ||
2513 | |||
2514 | /* Does this CWR ack the last sent congestion notification? */ | ||
2515 | if (TSN_lte(asoc->last_ecne_tsn, cwr->lowest_tsn)) { | ||
2516 | /* Stop sending ECNE. */ | ||
2517 | sctp_add_cmd_sf(commands, | ||
2518 | SCTP_CMD_ECN_CWR, | ||
2519 | SCTP_U32(cwr->lowest_tsn)); | ||
2520 | } | ||
2521 | return SCTP_DISPOSITION_CONSUME; | ||
2522 | } | ||
2523 | |||
2524 | /* | ||
2525 | * sctp_sf_do_ecne | ||
2526 | * | ||
2527 | * Section: Appendix A: Explicit Congestion Notification | ||
2528 | * | ||
2529 | * ECN-Echo | ||
2530 | * | ||
2531 | * RFC 2481 details a specific bit for a receiver to send back in its | ||
2532 | * TCP acknowledgements to notify the sender of the Congestion | ||
2533 | * Experienced (CE) bit having arrived from the network. For SCTP this | ||
2534 | * same indication is made by including the ECNE chunk. This chunk | ||
2535 | * contains one data element, i.e. the lowest TSN associated with the IP | ||
2536 | * datagram marked with the CE bit..... | ||
2537 | * | ||
2538 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2539 | * Inputs | ||
2540 | * (endpoint, asoc, chunk) | ||
2541 | * | ||
2542 | * Outputs | ||
2543 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2544 | * | ||
2545 | * The return value is the disposition of the chunk. | ||
2546 | */ | ||
2547 | sctp_disposition_t sctp_sf_do_ecne(const struct sctp_endpoint *ep, | ||
2548 | const struct sctp_association *asoc, | ||
2549 | const sctp_subtype_t type, | ||
2550 | void *arg, | ||
2551 | sctp_cmd_seq_t *commands) | ||
2552 | { | ||
2553 | sctp_ecnehdr_t *ecne; | ||
2554 | struct sctp_chunk *chunk = arg; | ||
2555 | |||
2556 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2557 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2558 | |||
2559 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_ecne_chunk_t))) | ||
2560 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2561 | commands); | ||
2562 | |||
2563 | ecne = (sctp_ecnehdr_t *) chunk->skb->data; | ||
2564 | skb_pull(chunk->skb, sizeof(sctp_ecnehdr_t)); | ||
2565 | |||
2566 | /* If this is a newer ECNE than the last CWR packet we sent out */ | ||
2567 | sctp_add_cmd_sf(commands, SCTP_CMD_ECN_ECNE, | ||
2568 | SCTP_U32(ntohl(ecne->lowest_tsn))); | ||
2569 | |||
2570 | return SCTP_DISPOSITION_CONSUME; | ||
2571 | } | ||
2572 | |||
2573 | /* | ||
2574 | * Section: 6.2 Acknowledgement on Reception of DATA Chunks | ||
2575 | * | ||
2576 | * The SCTP endpoint MUST always acknowledge the reception of each valid | ||
2577 | * DATA chunk. | ||
2578 | * | ||
2579 | * The guidelines on delayed acknowledgement algorithm specified in | ||
2580 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an | ||
2581 | * acknowledgement SHOULD be generated for at least every second packet | ||
2582 | * (not every second DATA chunk) received, and SHOULD be generated within | ||
2583 | * 200 ms of the arrival of any unacknowledged DATA chunk. In some | ||
2584 | * situations it may be beneficial for an SCTP transmitter to be more | ||
2585 | * conservative than the algorithms detailed in this document allow. | ||
2586 | * However, an SCTP transmitter MUST NOT be more aggressive than the | ||
2587 | * following algorithms allow. | ||
2588 | * | ||
2589 | * A SCTP receiver MUST NOT generate more than one SACK for every | ||
2590 | * incoming packet, other than to update the offered window as the | ||
2591 | * receiving application consumes new data. | ||
2592 | * | ||
2593 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2594 | * | ||
2595 | * Inputs | ||
2596 | * (endpoint, asoc, chunk) | ||
2597 | * | ||
2598 | * Outputs | ||
2599 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2600 | * | ||
2601 | * The return value is the disposition of the chunk. | ||
2602 | */ | ||
2603 | sctp_disposition_t sctp_sf_eat_data_6_2(const struct sctp_endpoint *ep, | ||
2604 | const struct sctp_association *asoc, | ||
2605 | const sctp_subtype_t type, | ||
2606 | void *arg, | ||
2607 | sctp_cmd_seq_t *commands) | ||
2608 | { | ||
2609 | struct sctp_chunk *chunk = arg; | ||
2610 | int error; | ||
2611 | |||
2612 | if (!sctp_vtag_verify(chunk, asoc)) { | ||
2613 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
2614 | SCTP_NULL()); | ||
2615 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2616 | } | ||
2617 | |||
2618 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) | ||
2619 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2620 | commands); | ||
2621 | |||
2622 | error = sctp_eat_data(asoc, chunk, commands ); | ||
2623 | switch (error) { | ||
2624 | case SCTP_IERROR_NO_ERROR: | ||
2625 | break; | ||
2626 | case SCTP_IERROR_HIGH_TSN: | ||
2627 | case SCTP_IERROR_BAD_STREAM: | ||
2628 | goto discard_noforce; | ||
2629 | case SCTP_IERROR_DUP_TSN: | ||
2630 | case SCTP_IERROR_IGNORE_TSN: | ||
2631 | goto discard_force; | ||
2632 | case SCTP_IERROR_NO_DATA: | ||
2633 | goto consume; | ||
2634 | default: | ||
2635 | BUG(); | ||
2636 | } | ||
2637 | |||
2638 | if (asoc->autoclose) { | ||
2639 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
2640 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
2641 | } | ||
2642 | |||
2643 | /* If this is the last chunk in a packet, we need to count it | ||
2644 | * toward sack generation. Note that we need to SACK every | ||
2645 | * OTHER packet containing data chunks, EVEN IF WE DISCARD | ||
2646 | * THEM. We elect to NOT generate SACK's if the chunk fails | ||
2647 | * the verification tag test. | ||
2648 | * | ||
2649 | * RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks | ||
2650 | * | ||
2651 | * The SCTP endpoint MUST always acknowledge the reception of | ||
2652 | * each valid DATA chunk. | ||
2653 | * | ||
2654 | * The guidelines on delayed acknowledgement algorithm | ||
2655 | * specified in Section 4.2 of [RFC2581] SHOULD be followed. | ||
2656 | * Specifically, an acknowledgement SHOULD be generated for at | ||
2657 | * least every second packet (not every second DATA chunk) | ||
2658 | * received, and SHOULD be generated within 200 ms of the | ||
2659 | * arrival of any unacknowledged DATA chunk. In some | ||
2660 | * situations it may be beneficial for an SCTP transmitter to | ||
2661 | * be more conservative than the algorithms detailed in this | ||
2662 | * document allow. However, an SCTP transmitter MUST NOT be | ||
2663 | * more aggressive than the following algorithms allow. | ||
2664 | */ | ||
2665 | if (chunk->end_of_packet) { | ||
2666 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); | ||
2667 | |||
2668 | /* Start the SACK timer. */ | ||
2669 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
2670 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | ||
2671 | } | ||
2672 | |||
2673 | return SCTP_DISPOSITION_CONSUME; | ||
2674 | |||
2675 | discard_force: | ||
2676 | /* RFC 2960 6.2 Acknowledgement on Reception of DATA Chunks | ||
2677 | * | ||
2678 | * When a packet arrives with duplicate DATA chunk(s) and with | ||
2679 | * no new DATA chunk(s), the endpoint MUST immediately send a | ||
2680 | * SACK with no delay. If a packet arrives with duplicate | ||
2681 | * DATA chunk(s) bundled with new DATA chunks, the endpoint | ||
2682 | * MAY immediately send a SACK. Normally receipt of duplicate | ||
2683 | * DATA chunks will occur when the original SACK chunk was lost | ||
2684 | * and the peer's RTO has expired. The duplicate TSN number(s) | ||
2685 | * SHOULD be reported in the SACK as duplicate. | ||
2686 | */ | ||
2687 | /* In our case, we split the MAY SACK advice up whether or not | ||
2688 | * the last chunk is a duplicate.' | ||
2689 | */ | ||
2690 | if (chunk->end_of_packet) | ||
2691 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); | ||
2692 | return SCTP_DISPOSITION_DISCARD; | ||
2693 | |||
2694 | discard_noforce: | ||
2695 | if (chunk->end_of_packet) { | ||
2696 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); | ||
2697 | |||
2698 | /* Start the SACK timer. */ | ||
2699 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
2700 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | ||
2701 | } | ||
2702 | return SCTP_DISPOSITION_DISCARD; | ||
2703 | consume: | ||
2704 | return SCTP_DISPOSITION_CONSUME; | ||
2705 | |||
2706 | } | ||
2707 | |||
2708 | /* | ||
2709 | * sctp_sf_eat_data_fast_4_4 | ||
2710 | * | ||
2711 | * Section: 4 (4) | ||
2712 | * (4) In SHUTDOWN-SENT state the endpoint MUST acknowledge any received | ||
2713 | * DATA chunks without delay. | ||
2714 | * | ||
2715 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2716 | * Inputs | ||
2717 | * (endpoint, asoc, chunk) | ||
2718 | * | ||
2719 | * Outputs | ||
2720 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2721 | * | ||
2722 | * The return value is the disposition of the chunk. | ||
2723 | */ | ||
2724 | sctp_disposition_t sctp_sf_eat_data_fast_4_4(const struct sctp_endpoint *ep, | ||
2725 | const struct sctp_association *asoc, | ||
2726 | const sctp_subtype_t type, | ||
2727 | void *arg, | ||
2728 | sctp_cmd_seq_t *commands) | ||
2729 | { | ||
2730 | struct sctp_chunk *chunk = arg; | ||
2731 | int error; | ||
2732 | |||
2733 | if (!sctp_vtag_verify(chunk, asoc)) { | ||
2734 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
2735 | SCTP_NULL()); | ||
2736 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2737 | } | ||
2738 | |||
2739 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_data_chunk_t))) | ||
2740 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2741 | commands); | ||
2742 | |||
2743 | error = sctp_eat_data(asoc, chunk, commands ); | ||
2744 | switch (error) { | ||
2745 | case SCTP_IERROR_NO_ERROR: | ||
2746 | case SCTP_IERROR_HIGH_TSN: | ||
2747 | case SCTP_IERROR_DUP_TSN: | ||
2748 | case SCTP_IERROR_IGNORE_TSN: | ||
2749 | case SCTP_IERROR_BAD_STREAM: | ||
2750 | break; | ||
2751 | case SCTP_IERROR_NO_DATA: | ||
2752 | goto consume; | ||
2753 | default: | ||
2754 | BUG(); | ||
2755 | } | ||
2756 | |||
2757 | /* Go a head and force a SACK, since we are shutting down. */ | ||
2758 | |||
2759 | /* Implementor's Guide. | ||
2760 | * | ||
2761 | * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately | ||
2762 | * respond to each received packet containing one or more DATA chunk(s) | ||
2763 | * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer | ||
2764 | */ | ||
2765 | if (chunk->end_of_packet) { | ||
2766 | /* We must delay the chunk creation since the cumulative | ||
2767 | * TSN has not been updated yet. | ||
2768 | */ | ||
2769 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); | ||
2770 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); | ||
2771 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
2772 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
2773 | } | ||
2774 | |||
2775 | consume: | ||
2776 | return SCTP_DISPOSITION_CONSUME; | ||
2777 | } | ||
2778 | |||
2779 | /* | ||
2780 | * Section: 6.2 Processing a Received SACK | ||
2781 | * D) Any time a SACK arrives, the endpoint performs the following: | ||
2782 | * | ||
2783 | * i) If Cumulative TSN Ack is less than the Cumulative TSN Ack Point, | ||
2784 | * then drop the SACK. Since Cumulative TSN Ack is monotonically | ||
2785 | * increasing, a SACK whose Cumulative TSN Ack is less than the | ||
2786 | * Cumulative TSN Ack Point indicates an out-of-order SACK. | ||
2787 | * | ||
2788 | * ii) Set rwnd equal to the newly received a_rwnd minus the number | ||
2789 | * of bytes still outstanding after processing the Cumulative TSN Ack | ||
2790 | * and the Gap Ack Blocks. | ||
2791 | * | ||
2792 | * iii) If the SACK is missing a TSN that was previously | ||
2793 | * acknowledged via a Gap Ack Block (e.g., the data receiver | ||
2794 | * reneged on the data), then mark the corresponding DATA chunk | ||
2795 | * as available for retransmit: Mark it as missing for fast | ||
2796 | * retransmit as described in Section 7.2.4 and if no retransmit | ||
2797 | * timer is running for the destination address to which the DATA | ||
2798 | * chunk was originally transmitted, then T3-rtx is started for | ||
2799 | * that destination address. | ||
2800 | * | ||
2801 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
2802 | * | ||
2803 | * Inputs | ||
2804 | * (endpoint, asoc, chunk) | ||
2805 | * | ||
2806 | * Outputs | ||
2807 | * (asoc, reply_msg, msg_up, timers, counters) | ||
2808 | * | ||
2809 | * The return value is the disposition of the chunk. | ||
2810 | */ | ||
2811 | sctp_disposition_t sctp_sf_eat_sack_6_2(const struct sctp_endpoint *ep, | ||
2812 | const struct sctp_association *asoc, | ||
2813 | const sctp_subtype_t type, | ||
2814 | void *arg, | ||
2815 | sctp_cmd_seq_t *commands) | ||
2816 | { | ||
2817 | struct sctp_chunk *chunk = arg; | ||
2818 | sctp_sackhdr_t *sackh; | ||
2819 | __u32 ctsn; | ||
2820 | |||
2821 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2822 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2823 | |||
2824 | /* Make sure that the SACK chunk has a valid length. */ | ||
2825 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_sack_chunk_t))) | ||
2826 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2827 | commands); | ||
2828 | |||
2829 | /* Pull the SACK chunk from the data buffer */ | ||
2830 | sackh = sctp_sm_pull_sack(chunk); | ||
2831 | /* Was this a bogus SACK? */ | ||
2832 | if (!sackh) | ||
2833 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2834 | chunk->subh.sack_hdr = sackh; | ||
2835 | ctsn = ntohl(sackh->cum_tsn_ack); | ||
2836 | |||
2837 | /* i) If Cumulative TSN Ack is less than the Cumulative TSN | ||
2838 | * Ack Point, then drop the SACK. Since Cumulative TSN | ||
2839 | * Ack is monotonically increasing, a SACK whose | ||
2840 | * Cumulative TSN Ack is less than the Cumulative TSN Ack | ||
2841 | * Point indicates an out-of-order SACK. | ||
2842 | */ | ||
2843 | if (TSN_lt(ctsn, asoc->ctsn_ack_point)) { | ||
2844 | SCTP_DEBUG_PRINTK("ctsn %x\n", ctsn); | ||
2845 | SCTP_DEBUG_PRINTK("ctsn_ack_point %x\n", asoc->ctsn_ack_point); | ||
2846 | return SCTP_DISPOSITION_DISCARD; | ||
2847 | } | ||
2848 | |||
2849 | /* Return this SACK for further processing. */ | ||
2850 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, SCTP_SACKH(sackh)); | ||
2851 | |||
2852 | /* Note: We do the rest of the work on the PROCESS_SACK | ||
2853 | * sideeffect. | ||
2854 | */ | ||
2855 | return SCTP_DISPOSITION_CONSUME; | ||
2856 | } | ||
2857 | |||
2858 | /* | ||
2859 | * Generate an ABORT in response to a packet. | ||
2860 | * | ||
2861 | * Section: 8.4 Handle "Out of the blue" Packets | ||
2862 | * | ||
2863 | * 8) The receiver should respond to the sender of the OOTB packet | ||
2864 | * with an ABORT. When sending the ABORT, the receiver of the | ||
2865 | * OOTB packet MUST fill in the Verification Tag field of the | ||
2866 | * outbound packet with the value found in the Verification Tag | ||
2867 | * field of the OOTB packet and set the T-bit in the Chunk Flags | ||
2868 | * to indicate that no TCB was found. After sending this ABORT, | ||
2869 | * the receiver of the OOTB packet shall discard the OOTB packet | ||
2870 | * and take no further action. | ||
2871 | * | ||
2872 | * Verification Tag: | ||
2873 | * | ||
2874 | * The return value is the disposition of the chunk. | ||
2875 | */ | ||
2876 | sctp_disposition_t sctp_sf_tabort_8_4_8(const struct sctp_endpoint *ep, | ||
2877 | const struct sctp_association *asoc, | ||
2878 | const sctp_subtype_t type, | ||
2879 | void *arg, | ||
2880 | sctp_cmd_seq_t *commands) | ||
2881 | { | ||
2882 | struct sctp_packet *packet = NULL; | ||
2883 | struct sctp_chunk *chunk = arg; | ||
2884 | struct sctp_chunk *abort; | ||
2885 | |||
2886 | packet = sctp_ootb_pkt_new(asoc, chunk); | ||
2887 | |||
2888 | if (packet) { | ||
2889 | /* Make an ABORT. The T bit will be set if the asoc | ||
2890 | * is NULL. | ||
2891 | */ | ||
2892 | abort = sctp_make_abort(asoc, chunk, 0); | ||
2893 | if (!abort) { | ||
2894 | sctp_ootb_pkt_free(packet); | ||
2895 | return SCTP_DISPOSITION_NOMEM; | ||
2896 | } | ||
2897 | |||
2898 | /* Set the skb to the belonging sock for accounting. */ | ||
2899 | abort->skb->sk = ep->base.sk; | ||
2900 | |||
2901 | sctp_packet_append_chunk(packet, abort); | ||
2902 | |||
2903 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
2904 | SCTP_PACKET(packet)); | ||
2905 | |||
2906 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
2907 | |||
2908 | return SCTP_DISPOSITION_CONSUME; | ||
2909 | } | ||
2910 | |||
2911 | return SCTP_DISPOSITION_NOMEM; | ||
2912 | } | ||
2913 | |||
2914 | /* | ||
2915 | * Received an ERROR chunk from peer. Generate SCTP_REMOTE_ERROR | ||
2916 | * event as ULP notification for each cause included in the chunk. | ||
2917 | * | ||
2918 | * API 5.3.1.3 - SCTP_REMOTE_ERROR | ||
2919 | * | ||
2920 | * The return value is the disposition of the chunk. | ||
2921 | */ | ||
2922 | sctp_disposition_t sctp_sf_operr_notify(const struct sctp_endpoint *ep, | ||
2923 | const struct sctp_association *asoc, | ||
2924 | const sctp_subtype_t type, | ||
2925 | void *arg, | ||
2926 | sctp_cmd_seq_t *commands) | ||
2927 | { | ||
2928 | struct sctp_chunk *chunk = arg; | ||
2929 | struct sctp_ulpevent *ev; | ||
2930 | |||
2931 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2932 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2933 | |||
2934 | /* Make sure that the ERROR chunk has a valid length. */ | ||
2935 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_operr_chunk_t))) | ||
2936 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2937 | commands); | ||
2938 | |||
2939 | while (chunk->chunk_end > chunk->skb->data) { | ||
2940 | ev = sctp_ulpevent_make_remote_error(asoc, chunk, 0, | ||
2941 | GFP_ATOMIC); | ||
2942 | if (!ev) | ||
2943 | goto nomem; | ||
2944 | |||
2945 | if (!sctp_add_cmd(commands, SCTP_CMD_EVENT_ULP, | ||
2946 | SCTP_ULPEVENT(ev))) { | ||
2947 | sctp_ulpevent_free(ev); | ||
2948 | goto nomem; | ||
2949 | } | ||
2950 | |||
2951 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_OPERR, | ||
2952 | SCTP_CHUNK(chunk)); | ||
2953 | } | ||
2954 | return SCTP_DISPOSITION_CONSUME; | ||
2955 | |||
2956 | nomem: | ||
2957 | return SCTP_DISPOSITION_NOMEM; | ||
2958 | } | ||
2959 | |||
2960 | /* | ||
2961 | * Process an inbound SHUTDOWN ACK. | ||
2962 | * | ||
2963 | * From Section 9.2: | ||
2964 | * Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall | ||
2965 | * stop the T2-shutdown timer, send a SHUTDOWN COMPLETE chunk to its | ||
2966 | * peer, and remove all record of the association. | ||
2967 | * | ||
2968 | * The return value is the disposition. | ||
2969 | */ | ||
2970 | sctp_disposition_t sctp_sf_do_9_2_final(const struct sctp_endpoint *ep, | ||
2971 | const struct sctp_association *asoc, | ||
2972 | const sctp_subtype_t type, | ||
2973 | void *arg, | ||
2974 | sctp_cmd_seq_t *commands) | ||
2975 | { | ||
2976 | struct sctp_chunk *chunk = arg; | ||
2977 | struct sctp_chunk *reply; | ||
2978 | struct sctp_ulpevent *ev; | ||
2979 | |||
2980 | if (!sctp_vtag_verify(chunk, asoc)) | ||
2981 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
2982 | |||
2983 | /* Make sure that the SHUTDOWN_ACK chunk has a valid length. */ | ||
2984 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
2985 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
2986 | commands); | ||
2987 | |||
2988 | /* 10.2 H) SHUTDOWN COMPLETE notification | ||
2989 | * | ||
2990 | * When SCTP completes the shutdown procedures (section 9.2) this | ||
2991 | * notification is passed to the upper layer. | ||
2992 | */ | ||
2993 | ev = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_SHUTDOWN_COMP, | ||
2994 | 0, 0, 0, GFP_ATOMIC); | ||
2995 | if (!ev) | ||
2996 | goto nomem; | ||
2997 | |||
2998 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, SCTP_ULPEVENT(ev)); | ||
2999 | |||
3000 | /* Upon the receipt of the SHUTDOWN ACK, the SHUTDOWN sender shall | ||
3001 | * stop the T2-shutdown timer, | ||
3002 | */ | ||
3003 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
3004 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
3005 | |||
3006 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
3007 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
3008 | |||
3009 | /* ...send a SHUTDOWN COMPLETE chunk to its peer, */ | ||
3010 | reply = sctp_make_shutdown_complete(asoc, chunk); | ||
3011 | if (!reply) | ||
3012 | goto nomem; | ||
3013 | |||
3014 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
3015 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
3016 | SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); | ||
3017 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3018 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
3019 | |||
3020 | /* ...and remove all record of the association. */ | ||
3021 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
3022 | return SCTP_DISPOSITION_DELETE_TCB; | ||
3023 | |||
3024 | nomem: | ||
3025 | return SCTP_DISPOSITION_NOMEM; | ||
3026 | } | ||
3027 | |||
3028 | /* | ||
3029 | * RFC 2960, 8.4 - Handle "Out of the blue" Packets | ||
3030 | * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should | ||
3031 | * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. | ||
3032 | * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB | ||
3033 | * packet must fill in the Verification Tag field of the outbound | ||
3034 | * packet with the Verification Tag received in the SHUTDOWN ACK and | ||
3035 | * set the T-bit in the Chunk Flags to indicate that no TCB was | ||
3036 | * found. Otherwise, | ||
3037 | * | ||
3038 | * 8) The receiver should respond to the sender of the OOTB packet with | ||
3039 | * an ABORT. When sending the ABORT, the receiver of the OOTB packet | ||
3040 | * MUST fill in the Verification Tag field of the outbound packet | ||
3041 | * with the value found in the Verification Tag field of the OOTB | ||
3042 | * packet and set the T-bit in the Chunk Flags to indicate that no | ||
3043 | * TCB was found. After sending this ABORT, the receiver of the OOTB | ||
3044 | * packet shall discard the OOTB packet and take no further action. | ||
3045 | */ | ||
3046 | sctp_disposition_t sctp_sf_ootb(const struct sctp_endpoint *ep, | ||
3047 | const struct sctp_association *asoc, | ||
3048 | const sctp_subtype_t type, | ||
3049 | void *arg, | ||
3050 | sctp_cmd_seq_t *commands) | ||
3051 | { | ||
3052 | struct sctp_chunk *chunk = arg; | ||
3053 | struct sk_buff *skb = chunk->skb; | ||
3054 | sctp_chunkhdr_t *ch; | ||
3055 | __u8 *ch_end; | ||
3056 | int ootb_shut_ack = 0; | ||
3057 | |||
3058 | SCTP_INC_STATS(SCTP_MIB_OUTOFBLUES); | ||
3059 | |||
3060 | ch = (sctp_chunkhdr_t *) chunk->chunk_hdr; | ||
3061 | do { | ||
3062 | /* Break out if chunk length is less then minimal. */ | ||
3063 | if (ntohs(ch->length) < sizeof(sctp_chunkhdr_t)) | ||
3064 | break; | ||
3065 | |||
3066 | ch_end = ((__u8 *)ch) + WORD_ROUND(ntohs(ch->length)); | ||
3067 | |||
3068 | if (SCTP_CID_SHUTDOWN_ACK == ch->type) | ||
3069 | ootb_shut_ack = 1; | ||
3070 | |||
3071 | /* RFC 2960, Section 3.3.7 | ||
3072 | * Moreover, under any circumstances, an endpoint that | ||
3073 | * receives an ABORT MUST NOT respond to that ABORT by | ||
3074 | * sending an ABORT of its own. | ||
3075 | */ | ||
3076 | if (SCTP_CID_ABORT == ch->type) | ||
3077 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3078 | |||
3079 | ch = (sctp_chunkhdr_t *) ch_end; | ||
3080 | } while (ch_end < skb->tail); | ||
3081 | |||
3082 | if (ootb_shut_ack) | ||
3083 | sctp_sf_shut_8_4_5(ep, asoc, type, arg, commands); | ||
3084 | else | ||
3085 | sctp_sf_tabort_8_4_8(ep, asoc, type, arg, commands); | ||
3086 | |||
3087 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3088 | } | ||
3089 | |||
3090 | /* | ||
3091 | * Handle an "Out of the blue" SHUTDOWN ACK. | ||
3092 | * | ||
3093 | * Section: 8.4 5) | ||
3094 | * 5) If the packet contains a SHUTDOWN ACK chunk, the receiver should | ||
3095 | * respond to the sender of the OOTB packet with a SHUTDOWN COMPLETE. | ||
3096 | * When sending the SHUTDOWN COMPLETE, the receiver of the OOTB packet | ||
3097 | * must fill in the Verification Tag field of the outbound packet with | ||
3098 | * the Verification Tag received in the SHUTDOWN ACK and set the | ||
3099 | * T-bit in the Chunk Flags to indicate that no TCB was found. | ||
3100 | * | ||
3101 | * Inputs | ||
3102 | * (endpoint, asoc, type, arg, commands) | ||
3103 | * | ||
3104 | * Outputs | ||
3105 | * (sctp_disposition_t) | ||
3106 | * | ||
3107 | * The return value is the disposition of the chunk. | ||
3108 | */ | ||
3109 | static sctp_disposition_t sctp_sf_shut_8_4_5(const struct sctp_endpoint *ep, | ||
3110 | const struct sctp_association *asoc, | ||
3111 | const sctp_subtype_t type, | ||
3112 | void *arg, | ||
3113 | sctp_cmd_seq_t *commands) | ||
3114 | { | ||
3115 | struct sctp_packet *packet = NULL; | ||
3116 | struct sctp_chunk *chunk = arg; | ||
3117 | struct sctp_chunk *shut; | ||
3118 | |||
3119 | packet = sctp_ootb_pkt_new(asoc, chunk); | ||
3120 | |||
3121 | if (packet) { | ||
3122 | /* Make an SHUTDOWN_COMPLETE. | ||
3123 | * The T bit will be set if the asoc is NULL. | ||
3124 | */ | ||
3125 | shut = sctp_make_shutdown_complete(asoc, chunk); | ||
3126 | if (!shut) { | ||
3127 | sctp_ootb_pkt_free(packet); | ||
3128 | return SCTP_DISPOSITION_NOMEM; | ||
3129 | } | ||
3130 | |||
3131 | /* Set the skb to the belonging sock for accounting. */ | ||
3132 | shut->skb->sk = ep->base.sk; | ||
3133 | |||
3134 | sctp_packet_append_chunk(packet, shut); | ||
3135 | |||
3136 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
3137 | SCTP_PACKET(packet)); | ||
3138 | |||
3139 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3140 | |||
3141 | /* If the chunk length is invalid, we don't want to process | ||
3142 | * the reset of the packet. | ||
3143 | */ | ||
3144 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_chunkhdr_t))) | ||
3145 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3146 | |||
3147 | return SCTP_DISPOSITION_CONSUME; | ||
3148 | } | ||
3149 | |||
3150 | return SCTP_DISPOSITION_NOMEM; | ||
3151 | } | ||
3152 | |||
3153 | /* | ||
3154 | * Handle SHUTDOWN ACK in COOKIE_ECHOED or COOKIE_WAIT state. | ||
3155 | * | ||
3156 | * Verification Tag: 8.5.1 E) Rules for packet carrying a SHUTDOWN ACK | ||
3157 | * If the receiver is in COOKIE-ECHOED or COOKIE-WAIT state the | ||
3158 | * procedures in section 8.4 SHOULD be followed, in other words it | ||
3159 | * should be treated as an Out Of The Blue packet. | ||
3160 | * [This means that we do NOT check the Verification Tag on these | ||
3161 | * chunks. --piggy ] | ||
3162 | * | ||
3163 | */ | ||
3164 | sctp_disposition_t sctp_sf_do_8_5_1_E_sa(const struct sctp_endpoint *ep, | ||
3165 | const struct sctp_association *asoc, | ||
3166 | const sctp_subtype_t type, | ||
3167 | void *arg, | ||
3168 | sctp_cmd_seq_t *commands) | ||
3169 | { | ||
3170 | /* Although we do have an association in this case, it corresponds | ||
3171 | * to a restarted association. So the packet is treated as an OOTB | ||
3172 | * packet and the state function that handles OOTB SHUTDOWN_ACK is | ||
3173 | * called with a NULL association. | ||
3174 | */ | ||
3175 | return sctp_sf_shut_8_4_5(ep, NULL, type, arg, commands); | ||
3176 | } | ||
3177 | |||
3178 | /* ADDIP Section 4.2 Upon reception of an ASCONF Chunk. */ | ||
3179 | sctp_disposition_t sctp_sf_do_asconf(const struct sctp_endpoint *ep, | ||
3180 | const struct sctp_association *asoc, | ||
3181 | const sctp_subtype_t type, void *arg, | ||
3182 | sctp_cmd_seq_t *commands) | ||
3183 | { | ||
3184 | struct sctp_chunk *chunk = arg; | ||
3185 | struct sctp_chunk *asconf_ack = NULL; | ||
3186 | sctp_addiphdr_t *hdr; | ||
3187 | __u32 serial; | ||
3188 | |||
3189 | if (!sctp_vtag_verify(chunk, asoc)) { | ||
3190 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
3191 | SCTP_NULL()); | ||
3192 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3193 | } | ||
3194 | |||
3195 | /* Make sure that the ASCONF ADDIP chunk has a valid length. */ | ||
3196 | if (!sctp_chunk_length_valid(chunk, sizeof(sctp_addip_chunk_t))) | ||
3197 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3198 | commands); | ||
3199 | |||
3200 | hdr = (sctp_addiphdr_t *)chunk->skb->data; | ||
3201 | serial = ntohl(hdr->serial); | ||
3202 | |||
3203 | /* ADDIP 4.2 C1) Compare the value of the serial number to the value | ||
3204 | * the endpoint stored in a new association variable | ||
3205 | * 'Peer-Serial-Number'. | ||
3206 | */ | ||
3207 | if (serial == asoc->peer.addip_serial + 1) { | ||
3208 | /* ADDIP 4.2 C2) If the value found in the serial number is | ||
3209 | * equal to the ('Peer-Serial-Number' + 1), the endpoint MUST | ||
3210 | * do V1-V5. | ||
3211 | */ | ||
3212 | asconf_ack = sctp_process_asconf((struct sctp_association *) | ||
3213 | asoc, chunk); | ||
3214 | if (!asconf_ack) | ||
3215 | return SCTP_DISPOSITION_NOMEM; | ||
3216 | } else if (serial == asoc->peer.addip_serial) { | ||
3217 | /* ADDIP 4.2 C3) If the value found in the serial number is | ||
3218 | * equal to the value stored in the 'Peer-Serial-Number' | ||
3219 | * IMPLEMENTATION NOTE: As an optimization a receiver may wish | ||
3220 | * to save the last ASCONF-ACK for some predetermined period of | ||
3221 | * time and instead of re-processing the ASCONF (with the same | ||
3222 | * serial number) it may just re-transmit the ASCONF-ACK. | ||
3223 | */ | ||
3224 | if (asoc->addip_last_asconf_ack) | ||
3225 | asconf_ack = asoc->addip_last_asconf_ack; | ||
3226 | else | ||
3227 | return SCTP_DISPOSITION_DISCARD; | ||
3228 | } else { | ||
3229 | /* ADDIP 4.2 C4) Otherwise, the ASCONF Chunk is discarded since | ||
3230 | * it must be either a stale packet or from an attacker. | ||
3231 | */ | ||
3232 | return SCTP_DISPOSITION_DISCARD; | ||
3233 | } | ||
3234 | |||
3235 | /* ADDIP 4.2 C5) In both cases C2 and C3 the ASCONF-ACK MUST be sent | ||
3236 | * back to the source address contained in the IP header of the ASCONF | ||
3237 | * being responded to. | ||
3238 | */ | ||
3239 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(asconf_ack)); | ||
3240 | |||
3241 | return SCTP_DISPOSITION_CONSUME; | ||
3242 | } | ||
3243 | |||
3244 | /* | ||
3245 | * ADDIP Section 4.3 General rules for address manipulation | ||
3246 | * When building TLV parameters for the ASCONF Chunk that will add or | ||
3247 | * delete IP addresses the D0 to D13 rules should be applied: | ||
3248 | */ | ||
3249 | sctp_disposition_t sctp_sf_do_asconf_ack(const struct sctp_endpoint *ep, | ||
3250 | const struct sctp_association *asoc, | ||
3251 | const sctp_subtype_t type, void *arg, | ||
3252 | sctp_cmd_seq_t *commands) | ||
3253 | { | ||
3254 | struct sctp_chunk *asconf_ack = arg; | ||
3255 | struct sctp_chunk *last_asconf = asoc->addip_last_asconf; | ||
3256 | struct sctp_chunk *abort; | ||
3257 | sctp_addiphdr_t *addip_hdr; | ||
3258 | __u32 sent_serial, rcvd_serial; | ||
3259 | |||
3260 | if (!sctp_vtag_verify(asconf_ack, asoc)) { | ||
3261 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
3262 | SCTP_NULL()); | ||
3263 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3264 | } | ||
3265 | |||
3266 | /* Make sure that the ADDIP chunk has a valid length. */ | ||
3267 | if (!sctp_chunk_length_valid(asconf_ack, sizeof(sctp_addip_chunk_t))) | ||
3268 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3269 | commands); | ||
3270 | |||
3271 | addip_hdr = (sctp_addiphdr_t *)asconf_ack->skb->data; | ||
3272 | rcvd_serial = ntohl(addip_hdr->serial); | ||
3273 | |||
3274 | if (last_asconf) { | ||
3275 | addip_hdr = (sctp_addiphdr_t *)last_asconf->subh.addip_hdr; | ||
3276 | sent_serial = ntohl(addip_hdr->serial); | ||
3277 | } else { | ||
3278 | sent_serial = asoc->addip_serial - 1; | ||
3279 | } | ||
3280 | |||
3281 | /* D0) If an endpoint receives an ASCONF-ACK that is greater than or | ||
3282 | * equal to the next serial number to be used but no ASCONF chunk is | ||
3283 | * outstanding the endpoint MUST ABORT the association. Note that a | ||
3284 | * sequence number is greater than if it is no more than 2^^31-1 | ||
3285 | * larger than the current sequence number (using serial arithmetic). | ||
3286 | */ | ||
3287 | if (ADDIP_SERIAL_gte(rcvd_serial, sent_serial + 1) && | ||
3288 | !(asoc->addip_last_asconf)) { | ||
3289 | abort = sctp_make_abort(asoc, asconf_ack, | ||
3290 | sizeof(sctp_errhdr_t)); | ||
3291 | if (abort) { | ||
3292 | sctp_init_cause(abort, SCTP_ERROR_ASCONF_ACK, NULL, 0); | ||
3293 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
3294 | SCTP_CHUNK(abort)); | ||
3295 | } | ||
3296 | /* We are going to ABORT, so we might as well stop | ||
3297 | * processing the rest of the chunks in the packet. | ||
3298 | */ | ||
3299 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
3300 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
3301 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | ||
3302 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
3303 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | ||
3304 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
3305 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3306 | return SCTP_DISPOSITION_ABORT; | ||
3307 | } | ||
3308 | |||
3309 | if ((rcvd_serial == sent_serial) && asoc->addip_last_asconf) { | ||
3310 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
3311 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
3312 | |||
3313 | if (!sctp_process_asconf_ack((struct sctp_association *)asoc, | ||
3314 | asconf_ack)) | ||
3315 | return SCTP_DISPOSITION_CONSUME; | ||
3316 | |||
3317 | abort = sctp_make_abort(asoc, asconf_ack, | ||
3318 | sizeof(sctp_errhdr_t)); | ||
3319 | if (abort) { | ||
3320 | sctp_init_cause(abort, SCTP_ERROR_RSRC_LOW, NULL, 0); | ||
3321 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
3322 | SCTP_CHUNK(abort)); | ||
3323 | } | ||
3324 | /* We are going to ABORT, so we might as well stop | ||
3325 | * processing the rest of the chunks in the packet. | ||
3326 | */ | ||
3327 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | ||
3328 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
3329 | SCTP_U32(SCTP_ERROR_ASCONF_ACK)); | ||
3330 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
3331 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3332 | return SCTP_DISPOSITION_ABORT; | ||
3333 | } | ||
3334 | |||
3335 | return SCTP_DISPOSITION_DISCARD; | ||
3336 | } | ||
3337 | |||
3338 | /* | ||
3339 | * PR-SCTP Section 3.6 Receiver Side Implementation of PR-SCTP | ||
3340 | * | ||
3341 | * When a FORWARD TSN chunk arrives, the data receiver MUST first update | ||
3342 | * its cumulative TSN point to the value carried in the FORWARD TSN | ||
3343 | * chunk, and then MUST further advance its cumulative TSN point locally | ||
3344 | * if possible. | ||
3345 | * After the above processing, the data receiver MUST stop reporting any | ||
3346 | * missing TSNs earlier than or equal to the new cumulative TSN point. | ||
3347 | * | ||
3348 | * Verification Tag: 8.5 Verification Tag [Normal verification] | ||
3349 | * | ||
3350 | * The return value is the disposition of the chunk. | ||
3351 | */ | ||
3352 | sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep, | ||
3353 | const struct sctp_association *asoc, | ||
3354 | const sctp_subtype_t type, | ||
3355 | void *arg, | ||
3356 | sctp_cmd_seq_t *commands) | ||
3357 | { | ||
3358 | struct sctp_chunk *chunk = arg; | ||
3359 | struct sctp_fwdtsn_hdr *fwdtsn_hdr; | ||
3360 | __u16 len; | ||
3361 | __u32 tsn; | ||
3362 | |||
3363 | if (!sctp_vtag_verify(chunk, asoc)) { | ||
3364 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
3365 | SCTP_NULL()); | ||
3366 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3367 | } | ||
3368 | |||
3369 | /* Make sure that the FORWARD_TSN chunk has valid length. */ | ||
3370 | if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) | ||
3371 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3372 | commands); | ||
3373 | |||
3374 | fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; | ||
3375 | chunk->subh.fwdtsn_hdr = fwdtsn_hdr; | ||
3376 | len = ntohs(chunk->chunk_hdr->length); | ||
3377 | len -= sizeof(struct sctp_chunkhdr); | ||
3378 | skb_pull(chunk->skb, len); | ||
3379 | |||
3380 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); | ||
3381 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); | ||
3382 | |||
3383 | /* The TSN is too high--silently discard the chunk and count on it | ||
3384 | * getting retransmitted later. | ||
3385 | */ | ||
3386 | if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) | ||
3387 | goto discard_noforce; | ||
3388 | |||
3389 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); | ||
3390 | if (len > sizeof(struct sctp_fwdtsn_hdr)) | ||
3391 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, | ||
3392 | SCTP_CHUNK(chunk)); | ||
3393 | |||
3394 | /* Count this as receiving DATA. */ | ||
3395 | if (asoc->autoclose) { | ||
3396 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
3397 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
3398 | } | ||
3399 | |||
3400 | /* FIXME: For now send a SACK, but DATA processing may | ||
3401 | * send another. | ||
3402 | */ | ||
3403 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_NOFORCE()); | ||
3404 | /* Start the SACK timer. */ | ||
3405 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
3406 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | ||
3407 | |||
3408 | return SCTP_DISPOSITION_CONSUME; | ||
3409 | |||
3410 | discard_noforce: | ||
3411 | return SCTP_DISPOSITION_DISCARD; | ||
3412 | } | ||
3413 | |||
3414 | sctp_disposition_t sctp_sf_eat_fwd_tsn_fast( | ||
3415 | const struct sctp_endpoint *ep, | ||
3416 | const struct sctp_association *asoc, | ||
3417 | const sctp_subtype_t type, | ||
3418 | void *arg, | ||
3419 | sctp_cmd_seq_t *commands) | ||
3420 | { | ||
3421 | struct sctp_chunk *chunk = arg; | ||
3422 | struct sctp_fwdtsn_hdr *fwdtsn_hdr; | ||
3423 | __u16 len; | ||
3424 | __u32 tsn; | ||
3425 | |||
3426 | if (!sctp_vtag_verify(chunk, asoc)) { | ||
3427 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG, | ||
3428 | SCTP_NULL()); | ||
3429 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3430 | } | ||
3431 | |||
3432 | /* Make sure that the FORWARD_TSN chunk has a valid length. */ | ||
3433 | if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_fwdtsn_chunk))) | ||
3434 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3435 | commands); | ||
3436 | |||
3437 | fwdtsn_hdr = (struct sctp_fwdtsn_hdr *)chunk->skb->data; | ||
3438 | chunk->subh.fwdtsn_hdr = fwdtsn_hdr; | ||
3439 | len = ntohs(chunk->chunk_hdr->length); | ||
3440 | len -= sizeof(struct sctp_chunkhdr); | ||
3441 | skb_pull(chunk->skb, len); | ||
3442 | |||
3443 | tsn = ntohl(fwdtsn_hdr->new_cum_tsn); | ||
3444 | SCTP_DEBUG_PRINTK("%s: TSN 0x%x.\n", __FUNCTION__, tsn); | ||
3445 | |||
3446 | /* The TSN is too high--silently discard the chunk and count on it | ||
3447 | * getting retransmitted later. | ||
3448 | */ | ||
3449 | if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0) | ||
3450 | goto gen_shutdown; | ||
3451 | |||
3452 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn)); | ||
3453 | if (len > sizeof(struct sctp_fwdtsn_hdr)) | ||
3454 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN, | ||
3455 | SCTP_CHUNK(chunk)); | ||
3456 | |||
3457 | /* Go a head and force a SACK, since we are shutting down. */ | ||
3458 | gen_shutdown: | ||
3459 | /* Implementor's Guide. | ||
3460 | * | ||
3461 | * While in SHUTDOWN-SENT state, the SHUTDOWN sender MUST immediately | ||
3462 | * respond to each received packet containing one or more DATA chunk(s) | ||
3463 | * with a SACK, a SHUTDOWN chunk, and restart the T2-shutdown timer | ||
3464 | */ | ||
3465 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SHUTDOWN, SCTP_NULL()); | ||
3466 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); | ||
3467 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
3468 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
3469 | |||
3470 | return SCTP_DISPOSITION_CONSUME; | ||
3471 | } | ||
3472 | |||
3473 | /* | ||
3474 | * Process an unknown chunk. | ||
3475 | * | ||
3476 | * Section: 3.2. Also, 2.1 in the implementor's guide. | ||
3477 | * | ||
3478 | * Chunk Types are encoded such that the highest-order two bits specify | ||
3479 | * the action that must be taken if the processing endpoint does not | ||
3480 | * recognize the Chunk Type. | ||
3481 | * | ||
3482 | * 00 - Stop processing this SCTP packet and discard it, do not process | ||
3483 | * any further chunks within it. | ||
3484 | * | ||
3485 | * 01 - Stop processing this SCTP packet and discard it, do not process | ||
3486 | * any further chunks within it, and report the unrecognized | ||
3487 | * chunk in an 'Unrecognized Chunk Type'. | ||
3488 | * | ||
3489 | * 10 - Skip this chunk and continue processing. | ||
3490 | * | ||
3491 | * 11 - Skip this chunk and continue processing, but report in an ERROR | ||
3492 | * Chunk using the 'Unrecognized Chunk Type' cause of error. | ||
3493 | * | ||
3494 | * The return value is the disposition of the chunk. | ||
3495 | */ | ||
3496 | sctp_disposition_t sctp_sf_unk_chunk(const struct sctp_endpoint *ep, | ||
3497 | const struct sctp_association *asoc, | ||
3498 | const sctp_subtype_t type, | ||
3499 | void *arg, | ||
3500 | sctp_cmd_seq_t *commands) | ||
3501 | { | ||
3502 | struct sctp_chunk *unk_chunk = arg; | ||
3503 | struct sctp_chunk *err_chunk; | ||
3504 | sctp_chunkhdr_t *hdr; | ||
3505 | |||
3506 | SCTP_DEBUG_PRINTK("Processing the unknown chunk id %d.\n", type.chunk); | ||
3507 | |||
3508 | if (!sctp_vtag_verify(unk_chunk, asoc)) | ||
3509 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3510 | |||
3511 | /* Make sure that the chunk has a valid length. | ||
3512 | * Since we don't know the chunk type, we use a general | ||
3513 | * chunkhdr structure to make a comparison. | ||
3514 | */ | ||
3515 | if (!sctp_chunk_length_valid(unk_chunk, sizeof(sctp_chunkhdr_t))) | ||
3516 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
3517 | commands); | ||
3518 | |||
3519 | switch (type.chunk & SCTP_CID_ACTION_MASK) { | ||
3520 | case SCTP_CID_ACTION_DISCARD: | ||
3521 | /* Discard the packet. */ | ||
3522 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3523 | break; | ||
3524 | case SCTP_CID_ACTION_DISCARD_ERR: | ||
3525 | /* Discard the packet. */ | ||
3526 | sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
3527 | |||
3528 | /* Generate an ERROR chunk as response. */ | ||
3529 | hdr = unk_chunk->chunk_hdr; | ||
3530 | err_chunk = sctp_make_op_error(asoc, unk_chunk, | ||
3531 | SCTP_ERROR_UNKNOWN_CHUNK, hdr, | ||
3532 | WORD_ROUND(ntohs(hdr->length))); | ||
3533 | if (err_chunk) { | ||
3534 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
3535 | SCTP_CHUNK(err_chunk)); | ||
3536 | } | ||
3537 | return SCTP_DISPOSITION_CONSUME; | ||
3538 | break; | ||
3539 | case SCTP_CID_ACTION_SKIP: | ||
3540 | /* Skip the chunk. */ | ||
3541 | return SCTP_DISPOSITION_DISCARD; | ||
3542 | break; | ||
3543 | case SCTP_CID_ACTION_SKIP_ERR: | ||
3544 | /* Generate an ERROR chunk as response. */ | ||
3545 | hdr = unk_chunk->chunk_hdr; | ||
3546 | err_chunk = sctp_make_op_error(asoc, unk_chunk, | ||
3547 | SCTP_ERROR_UNKNOWN_CHUNK, hdr, | ||
3548 | WORD_ROUND(ntohs(hdr->length))); | ||
3549 | if (err_chunk) { | ||
3550 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
3551 | SCTP_CHUNK(err_chunk)); | ||
3552 | } | ||
3553 | /* Skip the chunk. */ | ||
3554 | return SCTP_DISPOSITION_CONSUME; | ||
3555 | break; | ||
3556 | default: | ||
3557 | break; | ||
3558 | } | ||
3559 | |||
3560 | return SCTP_DISPOSITION_DISCARD; | ||
3561 | } | ||
3562 | |||
3563 | /* | ||
3564 | * Discard the chunk. | ||
3565 | * | ||
3566 | * Section: 0.2, 5.2.3, 5.2.5, 5.2.6, 6.0, 8.4.6, 8.5.1c, 9.2 | ||
3567 | * [Too numerous to mention...] | ||
3568 | * Verification Tag: No verification needed. | ||
3569 | * Inputs | ||
3570 | * (endpoint, asoc, chunk) | ||
3571 | * | ||
3572 | * Outputs | ||
3573 | * (asoc, reply_msg, msg_up, timers, counters) | ||
3574 | * | ||
3575 | * The return value is the disposition of the chunk. | ||
3576 | */ | ||
3577 | sctp_disposition_t sctp_sf_discard_chunk(const struct sctp_endpoint *ep, | ||
3578 | const struct sctp_association *asoc, | ||
3579 | const sctp_subtype_t type, | ||
3580 | void *arg, | ||
3581 | sctp_cmd_seq_t *commands) | ||
3582 | { | ||
3583 | SCTP_DEBUG_PRINTK("Chunk %d is discarded\n", type.chunk); | ||
3584 | return SCTP_DISPOSITION_DISCARD; | ||
3585 | } | ||
3586 | |||
3587 | /* | ||
3588 | * Discard the whole packet. | ||
3589 | * | ||
3590 | * Section: 8.4 2) | ||
3591 | * | ||
3592 | * 2) If the OOTB packet contains an ABORT chunk, the receiver MUST | ||
3593 | * silently discard the OOTB packet and take no further action. | ||
3594 | * Otherwise, | ||
3595 | * | ||
3596 | * Verification Tag: No verification necessary | ||
3597 | * | ||
3598 | * Inputs | ||
3599 | * (endpoint, asoc, chunk) | ||
3600 | * | ||
3601 | * Outputs | ||
3602 | * (asoc, reply_msg, msg_up, timers, counters) | ||
3603 | * | ||
3604 | * The return value is the disposition of the chunk. | ||
3605 | */ | ||
3606 | sctp_disposition_t sctp_sf_pdiscard(const struct sctp_endpoint *ep, | ||
3607 | const struct sctp_association *asoc, | ||
3608 | const sctp_subtype_t type, | ||
3609 | void *arg, | ||
3610 | sctp_cmd_seq_t *commands) | ||
3611 | { | ||
3612 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); | ||
3613 | |||
3614 | return SCTP_DISPOSITION_CONSUME; | ||
3615 | } | ||
3616 | |||
3617 | |||
3618 | /* | ||
3619 | * The other end is violating protocol. | ||
3620 | * | ||
3621 | * Section: Not specified | ||
3622 | * Verification Tag: Not specified | ||
3623 | * Inputs | ||
3624 | * (endpoint, asoc, chunk) | ||
3625 | * | ||
3626 | * Outputs | ||
3627 | * (asoc, reply_msg, msg_up, timers, counters) | ||
3628 | * | ||
3629 | * We simply tag the chunk as a violation. The state machine will log | ||
3630 | * the violation and continue. | ||
3631 | */ | ||
3632 | sctp_disposition_t sctp_sf_violation(const struct sctp_endpoint *ep, | ||
3633 | const struct sctp_association *asoc, | ||
3634 | const sctp_subtype_t type, | ||
3635 | void *arg, | ||
3636 | sctp_cmd_seq_t *commands) | ||
3637 | { | ||
3638 | return SCTP_DISPOSITION_VIOLATION; | ||
3639 | } | ||
3640 | |||
3641 | |||
3642 | /* | ||
3643 | * Handle a protocol violation when the chunk length is invalid. | ||
3644 | * "Invalid" length is identified as smaller then the minimal length a | ||
3645 | * given chunk can be. For example, a SACK chunk has invalid length | ||
3646 | * if it's length is set to be smaller then the size of sctp_sack_chunk_t. | ||
3647 | * | ||
3648 | * We inform the other end by sending an ABORT with a Protocol Violation | ||
3649 | * error code. | ||
3650 | * | ||
3651 | * Section: Not specified | ||
3652 | * Verification Tag: Nothing to do | ||
3653 | * Inputs | ||
3654 | * (endpoint, asoc, chunk) | ||
3655 | * | ||
3656 | * Outputs | ||
3657 | * (reply_msg, msg_up, counters) | ||
3658 | * | ||
3659 | * Generate an ABORT chunk and terminate the association. | ||
3660 | */ | ||
3661 | sctp_disposition_t sctp_sf_violation_chunklen(const struct sctp_endpoint *ep, | ||
3662 | const struct sctp_association *asoc, | ||
3663 | const sctp_subtype_t type, | ||
3664 | void *arg, | ||
3665 | sctp_cmd_seq_t *commands) | ||
3666 | { | ||
3667 | struct sctp_chunk *chunk = arg; | ||
3668 | struct sctp_chunk *abort = NULL; | ||
3669 | char err_str[]="The following chunk had invalid length:"; | ||
3670 | |||
3671 | /* Make the abort chunk. */ | ||
3672 | abort = sctp_make_abort_violation(asoc, chunk, err_str, | ||
3673 | sizeof(err_str)); | ||
3674 | if (!abort) | ||
3675 | goto nomem; | ||
3676 | |||
3677 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | ||
3678 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
3679 | |||
3680 | if (asoc->state <= SCTP_STATE_COOKIE_ECHOED) { | ||
3681 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
3682 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
3683 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | ||
3684 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | ||
3685 | } else { | ||
3686 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
3687 | SCTP_U32(SCTP_ERROR_PROTO_VIOLATION)); | ||
3688 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
3689 | } | ||
3690 | |||
3691 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET, SCTP_NULL()); | ||
3692 | |||
3693 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
3694 | |||
3695 | return SCTP_DISPOSITION_ABORT; | ||
3696 | |||
3697 | nomem: | ||
3698 | return SCTP_DISPOSITION_NOMEM; | ||
3699 | } | ||
3700 | |||
3701 | /*************************************************************************** | ||
3702 | * These are the state functions for handling primitive (Section 10) events. | ||
3703 | ***************************************************************************/ | ||
3704 | /* | ||
3705 | * sctp_sf_do_prm_asoc | ||
3706 | * | ||
3707 | * Section: 10.1 ULP-to-SCTP | ||
3708 | * B) Associate | ||
3709 | * | ||
3710 | * Format: ASSOCIATE(local SCTP instance name, destination transport addr, | ||
3711 | * outbound stream count) | ||
3712 | * -> association id [,destination transport addr list] [,outbound stream | ||
3713 | * count] | ||
3714 | * | ||
3715 | * This primitive allows the upper layer to initiate an association to a | ||
3716 | * specific peer endpoint. | ||
3717 | * | ||
3718 | * The peer endpoint shall be specified by one of the transport addresses | ||
3719 | * which defines the endpoint (see Section 1.4). If the local SCTP | ||
3720 | * instance has not been initialized, the ASSOCIATE is considered an | ||
3721 | * error. | ||
3722 | * [This is not relevant for the kernel implementation since we do all | ||
3723 | * initialization at boot time. It we hadn't initialized we wouldn't | ||
3724 | * get anywhere near this code.] | ||
3725 | * | ||
3726 | * An association id, which is a local handle to the SCTP association, | ||
3727 | * will be returned on successful establishment of the association. If | ||
3728 | * SCTP is not able to open an SCTP association with the peer endpoint, | ||
3729 | * an error is returned. | ||
3730 | * [In the kernel implementation, the struct sctp_association needs to | ||
3731 | * be created BEFORE causing this primitive to run.] | ||
3732 | * | ||
3733 | * Other association parameters may be returned, including the | ||
3734 | * complete destination transport addresses of the peer as well as the | ||
3735 | * outbound stream count of the local endpoint. One of the transport | ||
3736 | * address from the returned destination addresses will be selected by | ||
3737 | * the local endpoint as default primary path for sending SCTP packets | ||
3738 | * to this peer. The returned "destination transport addr list" can | ||
3739 | * be used by the ULP to change the default primary path or to force | ||
3740 | * sending a packet to a specific transport address. [All of this | ||
3741 | * stuff happens when the INIT ACK arrives. This is a NON-BLOCKING | ||
3742 | * function.] | ||
3743 | * | ||
3744 | * Mandatory attributes: | ||
3745 | * | ||
3746 | * o local SCTP instance name - obtained from the INITIALIZE operation. | ||
3747 | * [This is the argument asoc.] | ||
3748 | * o destination transport addr - specified as one of the transport | ||
3749 | * addresses of the peer endpoint with which the association is to be | ||
3750 | * established. | ||
3751 | * [This is asoc->peer.active_path.] | ||
3752 | * o outbound stream count - the number of outbound streams the ULP | ||
3753 | * would like to open towards this peer endpoint. | ||
3754 | * [BUG: This is not currently implemented.] | ||
3755 | * Optional attributes: | ||
3756 | * | ||
3757 | * None. | ||
3758 | * | ||
3759 | * The return value is a disposition. | ||
3760 | */ | ||
3761 | sctp_disposition_t sctp_sf_do_prm_asoc(const struct sctp_endpoint *ep, | ||
3762 | const struct sctp_association *asoc, | ||
3763 | const sctp_subtype_t type, | ||
3764 | void *arg, | ||
3765 | sctp_cmd_seq_t *commands) | ||
3766 | { | ||
3767 | struct sctp_chunk *repl; | ||
3768 | |||
3769 | /* The comment below says that we enter COOKIE-WAIT AFTER | ||
3770 | * sending the INIT, but that doesn't actually work in our | ||
3771 | * implementation... | ||
3772 | */ | ||
3773 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
3774 | SCTP_STATE(SCTP_STATE_COOKIE_WAIT)); | ||
3775 | |||
3776 | /* RFC 2960 5.1 Normal Establishment of an Association | ||
3777 | * | ||
3778 | * A) "A" first sends an INIT chunk to "Z". In the INIT, "A" | ||
3779 | * must provide its Verification Tag (Tag_A) in the Initiate | ||
3780 | * Tag field. Tag_A SHOULD be a random number in the range of | ||
3781 | * 1 to 4294967295 (see 5.3.1 for Tag value selection). ... | ||
3782 | */ | ||
3783 | |||
3784 | repl = sctp_make_init(asoc, &asoc->base.bind_addr, GFP_ATOMIC, 0); | ||
3785 | if (!repl) | ||
3786 | goto nomem; | ||
3787 | |||
3788 | /* Cast away the const modifier, as we want to just | ||
3789 | * rerun it through as a sideffect. | ||
3790 | */ | ||
3791 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, | ||
3792 | SCTP_ASOC((struct sctp_association *) asoc)); | ||
3793 | |||
3794 | /* After sending the INIT, "A" starts the T1-init timer and | ||
3795 | * enters the COOKIE-WAIT state. | ||
3796 | */ | ||
3797 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
3798 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
3799 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
3800 | return SCTP_DISPOSITION_CONSUME; | ||
3801 | |||
3802 | nomem: | ||
3803 | return SCTP_DISPOSITION_NOMEM; | ||
3804 | } | ||
3805 | |||
3806 | /* | ||
3807 | * Process the SEND primitive. | ||
3808 | * | ||
3809 | * Section: 10.1 ULP-to-SCTP | ||
3810 | * E) Send | ||
3811 | * | ||
3812 | * Format: SEND(association id, buffer address, byte count [,context] | ||
3813 | * [,stream id] [,life time] [,destination transport address] | ||
3814 | * [,unorder flag] [,no-bundle flag] [,payload protocol-id] ) | ||
3815 | * -> result | ||
3816 | * | ||
3817 | * This is the main method to send user data via SCTP. | ||
3818 | * | ||
3819 | * Mandatory attributes: | ||
3820 | * | ||
3821 | * o association id - local handle to the SCTP association | ||
3822 | * | ||
3823 | * o buffer address - the location where the user message to be | ||
3824 | * transmitted is stored; | ||
3825 | * | ||
3826 | * o byte count - The size of the user data in number of bytes; | ||
3827 | * | ||
3828 | * Optional attributes: | ||
3829 | * | ||
3830 | * o context - an optional 32 bit integer that will be carried in the | ||
3831 | * sending failure notification to the ULP if the transportation of | ||
3832 | * this User Message fails. | ||
3833 | * | ||
3834 | * o stream id - to indicate which stream to send the data on. If not | ||
3835 | * specified, stream 0 will be used. | ||
3836 | * | ||
3837 | * o life time - specifies the life time of the user data. The user data | ||
3838 | * will not be sent by SCTP after the life time expires. This | ||
3839 | * parameter can be used to avoid efforts to transmit stale | ||
3840 | * user messages. SCTP notifies the ULP if the data cannot be | ||
3841 | * initiated to transport (i.e. sent to the destination via SCTP's | ||
3842 | * send primitive) within the life time variable. However, the | ||
3843 | * user data will be transmitted if SCTP has attempted to transmit a | ||
3844 | * chunk before the life time expired. | ||
3845 | * | ||
3846 | * o destination transport address - specified as one of the destination | ||
3847 | * transport addresses of the peer endpoint to which this packet | ||
3848 | * should be sent. Whenever possible, SCTP should use this destination | ||
3849 | * transport address for sending the packets, instead of the current | ||
3850 | * primary path. | ||
3851 | * | ||
3852 | * o unorder flag - this flag, if present, indicates that the user | ||
3853 | * would like the data delivered in an unordered fashion to the peer | ||
3854 | * (i.e., the U flag is set to 1 on all DATA chunks carrying this | ||
3855 | * message). | ||
3856 | * | ||
3857 | * o no-bundle flag - instructs SCTP not to bundle this user data with | ||
3858 | * other outbound DATA chunks. SCTP MAY still bundle even when | ||
3859 | * this flag is present, when faced with network congestion. | ||
3860 | * | ||
3861 | * o payload protocol-id - A 32 bit unsigned integer that is to be | ||
3862 | * passed to the peer indicating the type of payload protocol data | ||
3863 | * being transmitted. This value is passed as opaque data by SCTP. | ||
3864 | * | ||
3865 | * The return value is the disposition. | ||
3866 | */ | ||
3867 | sctp_disposition_t sctp_sf_do_prm_send(const struct sctp_endpoint *ep, | ||
3868 | const struct sctp_association *asoc, | ||
3869 | const sctp_subtype_t type, | ||
3870 | void *arg, | ||
3871 | sctp_cmd_seq_t *commands) | ||
3872 | { | ||
3873 | struct sctp_chunk *chunk = arg; | ||
3874 | |||
3875 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); | ||
3876 | return SCTP_DISPOSITION_CONSUME; | ||
3877 | } | ||
3878 | |||
3879 | /* | ||
3880 | * Process the SHUTDOWN primitive. | ||
3881 | * | ||
3882 | * Section: 10.1: | ||
3883 | * C) Shutdown | ||
3884 | * | ||
3885 | * Format: SHUTDOWN(association id) | ||
3886 | * -> result | ||
3887 | * | ||
3888 | * Gracefully closes an association. Any locally queued user data | ||
3889 | * will be delivered to the peer. The association will be terminated only | ||
3890 | * after the peer acknowledges all the SCTP packets sent. A success code | ||
3891 | * will be returned on successful termination of the association. If | ||
3892 | * attempting to terminate the association results in a failure, an error | ||
3893 | * code shall be returned. | ||
3894 | * | ||
3895 | * Mandatory attributes: | ||
3896 | * | ||
3897 | * o association id - local handle to the SCTP association | ||
3898 | * | ||
3899 | * Optional attributes: | ||
3900 | * | ||
3901 | * None. | ||
3902 | * | ||
3903 | * The return value is the disposition. | ||
3904 | */ | ||
3905 | sctp_disposition_t sctp_sf_do_9_2_prm_shutdown( | ||
3906 | const struct sctp_endpoint *ep, | ||
3907 | const struct sctp_association *asoc, | ||
3908 | const sctp_subtype_t type, | ||
3909 | void *arg, | ||
3910 | sctp_cmd_seq_t *commands) | ||
3911 | { | ||
3912 | int disposition; | ||
3913 | |||
3914 | /* From 9.2 Shutdown of an Association | ||
3915 | * Upon receipt of the SHUTDOWN primitive from its upper | ||
3916 | * layer, the endpoint enters SHUTDOWN-PENDING state and | ||
3917 | * remains there until all outstanding data has been | ||
3918 | * acknowledged by its peer. The endpoint accepts no new data | ||
3919 | * from its upper layer, but retransmits data to the far end | ||
3920 | * if necessary to fill gaps. | ||
3921 | */ | ||
3922 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
3923 | SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); | ||
3924 | |||
3925 | /* sctpimpguide-05 Section 2.12.2 | ||
3926 | * The sender of the SHUTDOWN MAY also start an overall guard timer | ||
3927 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. | ||
3928 | */ | ||
3929 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
3930 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
3931 | |||
3932 | disposition = SCTP_DISPOSITION_CONSUME; | ||
3933 | if (sctp_outq_is_empty(&asoc->outqueue)) { | ||
3934 | disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, | ||
3935 | arg, commands); | ||
3936 | } | ||
3937 | return disposition; | ||
3938 | } | ||
3939 | |||
3940 | /* | ||
3941 | * Process the ABORT primitive. | ||
3942 | * | ||
3943 | * Section: 10.1: | ||
3944 | * C) Abort | ||
3945 | * | ||
3946 | * Format: Abort(association id [, cause code]) | ||
3947 | * -> result | ||
3948 | * | ||
3949 | * Ungracefully closes an association. Any locally queued user data | ||
3950 | * will be discarded and an ABORT chunk is sent to the peer. A success code | ||
3951 | * will be returned on successful abortion of the association. If | ||
3952 | * attempting to abort the association results in a failure, an error | ||
3953 | * code shall be returned. | ||
3954 | * | ||
3955 | * Mandatory attributes: | ||
3956 | * | ||
3957 | * o association id - local handle to the SCTP association | ||
3958 | * | ||
3959 | * Optional attributes: | ||
3960 | * | ||
3961 | * o cause code - reason of the abort to be passed to the peer | ||
3962 | * | ||
3963 | * None. | ||
3964 | * | ||
3965 | * The return value is the disposition. | ||
3966 | */ | ||
3967 | sctp_disposition_t sctp_sf_do_9_1_prm_abort( | ||
3968 | const struct sctp_endpoint *ep, | ||
3969 | const struct sctp_association *asoc, | ||
3970 | const sctp_subtype_t type, | ||
3971 | void *arg, | ||
3972 | sctp_cmd_seq_t *commands) | ||
3973 | { | ||
3974 | /* From 9.1 Abort of an Association | ||
3975 | * Upon receipt of the ABORT primitive from its upper | ||
3976 | * layer, the endpoint enters CLOSED state and | ||
3977 | * discard all outstanding data has been | ||
3978 | * acknowledged by its peer. The endpoint accepts no new data | ||
3979 | * from its upper layer, but retransmits data to the far end | ||
3980 | * if necessary to fill gaps. | ||
3981 | */ | ||
3982 | struct msghdr *msg = arg; | ||
3983 | struct sctp_chunk *abort; | ||
3984 | sctp_disposition_t retval; | ||
3985 | |||
3986 | retval = SCTP_DISPOSITION_CONSUME; | ||
3987 | |||
3988 | /* Generate ABORT chunk to send the peer. */ | ||
3989 | abort = sctp_make_abort_user(asoc, NULL, msg); | ||
3990 | if (!abort) | ||
3991 | retval = SCTP_DISPOSITION_NOMEM; | ||
3992 | else | ||
3993 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | ||
3994 | |||
3995 | /* Even if we can't send the ABORT due to low memory delete the | ||
3996 | * TCB. This is a departure from our typical NOMEM handling. | ||
3997 | */ | ||
3998 | |||
3999 | /* Delete the established association. */ | ||
4000 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4001 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | ||
4002 | |||
4003 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4004 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
4005 | |||
4006 | return retval; | ||
4007 | } | ||
4008 | |||
4009 | /* We tried an illegal operation on an association which is closed. */ | ||
4010 | sctp_disposition_t sctp_sf_error_closed(const struct sctp_endpoint *ep, | ||
4011 | const struct sctp_association *asoc, | ||
4012 | const sctp_subtype_t type, | ||
4013 | void *arg, | ||
4014 | sctp_cmd_seq_t *commands) | ||
4015 | { | ||
4016 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, SCTP_ERROR(-EINVAL)); | ||
4017 | return SCTP_DISPOSITION_CONSUME; | ||
4018 | } | ||
4019 | |||
4020 | /* We tried an illegal operation on an association which is shutting | ||
4021 | * down. | ||
4022 | */ | ||
4023 | sctp_disposition_t sctp_sf_error_shutdown(const struct sctp_endpoint *ep, | ||
4024 | const struct sctp_association *asoc, | ||
4025 | const sctp_subtype_t type, | ||
4026 | void *arg, | ||
4027 | sctp_cmd_seq_t *commands) | ||
4028 | { | ||
4029 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_ERROR, | ||
4030 | SCTP_ERROR(-ESHUTDOWN)); | ||
4031 | return SCTP_DISPOSITION_CONSUME; | ||
4032 | } | ||
4033 | |||
4034 | /* | ||
4035 | * sctp_cookie_wait_prm_shutdown | ||
4036 | * | ||
4037 | * Section: 4 Note: 2 | ||
4038 | * Verification Tag: | ||
4039 | * Inputs | ||
4040 | * (endpoint, asoc) | ||
4041 | * | ||
4042 | * The RFC does not explicitly address this issue, but is the route through the | ||
4043 | * state table when someone issues a shutdown while in COOKIE_WAIT state. | ||
4044 | * | ||
4045 | * Outputs | ||
4046 | * (timers) | ||
4047 | */ | ||
4048 | sctp_disposition_t sctp_sf_cookie_wait_prm_shutdown( | ||
4049 | const struct sctp_endpoint *ep, | ||
4050 | const struct sctp_association *asoc, | ||
4051 | const sctp_subtype_t type, | ||
4052 | void *arg, | ||
4053 | sctp_cmd_seq_t *commands) | ||
4054 | { | ||
4055 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4056 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
4057 | |||
4058 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
4059 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
4060 | |||
4061 | SCTP_INC_STATS(SCTP_MIB_SHUTDOWNS); | ||
4062 | |||
4063 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
4064 | |||
4065 | return SCTP_DISPOSITION_DELETE_TCB; | ||
4066 | } | ||
4067 | |||
4068 | /* | ||
4069 | * sctp_cookie_echoed_prm_shutdown | ||
4070 | * | ||
4071 | * Section: 4 Note: 2 | ||
4072 | * Verification Tag: | ||
4073 | * Inputs | ||
4074 | * (endpoint, asoc) | ||
4075 | * | ||
4076 | * The RFC does not explcitly address this issue, but is the route through the | ||
4077 | * state table when someone issues a shutdown while in COOKIE_ECHOED state. | ||
4078 | * | ||
4079 | * Outputs | ||
4080 | * (timers) | ||
4081 | */ | ||
4082 | sctp_disposition_t sctp_sf_cookie_echoed_prm_shutdown( | ||
4083 | const struct sctp_endpoint *ep, | ||
4084 | const struct sctp_association *asoc, | ||
4085 | const sctp_subtype_t type, | ||
4086 | void *arg, sctp_cmd_seq_t *commands) | ||
4087 | { | ||
4088 | /* There is a single T1 timer, so we should be able to use | ||
4089 | * common function with the COOKIE-WAIT state. | ||
4090 | */ | ||
4091 | return sctp_sf_cookie_wait_prm_shutdown(ep, asoc, type, arg, commands); | ||
4092 | } | ||
4093 | |||
4094 | /* | ||
4095 | * sctp_sf_cookie_wait_prm_abort | ||
4096 | * | ||
4097 | * Section: 4 Note: 2 | ||
4098 | * Verification Tag: | ||
4099 | * Inputs | ||
4100 | * (endpoint, asoc) | ||
4101 | * | ||
4102 | * The RFC does not explicitly address this issue, but is the route through the | ||
4103 | * state table when someone issues an abort while in COOKIE_WAIT state. | ||
4104 | * | ||
4105 | * Outputs | ||
4106 | * (timers) | ||
4107 | */ | ||
4108 | sctp_disposition_t sctp_sf_cookie_wait_prm_abort( | ||
4109 | const struct sctp_endpoint *ep, | ||
4110 | const struct sctp_association *asoc, | ||
4111 | const sctp_subtype_t type, | ||
4112 | void *arg, | ||
4113 | sctp_cmd_seq_t *commands) | ||
4114 | { | ||
4115 | struct msghdr *msg = arg; | ||
4116 | struct sctp_chunk *abort; | ||
4117 | sctp_disposition_t retval; | ||
4118 | |||
4119 | /* Stop T1-init timer */ | ||
4120 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4121 | SCTP_TO(SCTP_EVENT_TIMEOUT_T1_INIT)); | ||
4122 | retval = SCTP_DISPOSITION_CONSUME; | ||
4123 | |||
4124 | /* Generate ABORT chunk to send the peer */ | ||
4125 | abort = sctp_make_abort_user(asoc, NULL, msg); | ||
4126 | if (!abort) | ||
4127 | retval = SCTP_DISPOSITION_NOMEM; | ||
4128 | else | ||
4129 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(abort)); | ||
4130 | |||
4131 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
4132 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
4133 | |||
4134 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4135 | |||
4136 | /* Even if we can't send the ABORT due to low memory delete the | ||
4137 | * TCB. This is a departure from our typical NOMEM handling. | ||
4138 | */ | ||
4139 | |||
4140 | /* Delete the established association. */ | ||
4141 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | ||
4142 | SCTP_U32(SCTP_ERROR_USER_ABORT)); | ||
4143 | |||
4144 | return retval; | ||
4145 | } | ||
4146 | |||
4147 | /* | ||
4148 | * sctp_sf_cookie_echoed_prm_abort | ||
4149 | * | ||
4150 | * Section: 4 Note: 3 | ||
4151 | * Verification Tag: | ||
4152 | * Inputs | ||
4153 | * (endpoint, asoc) | ||
4154 | * | ||
4155 | * The RFC does not explcitly address this issue, but is the route through the | ||
4156 | * state table when someone issues an abort while in COOKIE_ECHOED state. | ||
4157 | * | ||
4158 | * Outputs | ||
4159 | * (timers) | ||
4160 | */ | ||
4161 | sctp_disposition_t sctp_sf_cookie_echoed_prm_abort( | ||
4162 | const struct sctp_endpoint *ep, | ||
4163 | const struct sctp_association *asoc, | ||
4164 | const sctp_subtype_t type, | ||
4165 | void *arg, | ||
4166 | sctp_cmd_seq_t *commands) | ||
4167 | { | ||
4168 | /* There is a single T1 timer, so we should be able to use | ||
4169 | * common function with the COOKIE-WAIT state. | ||
4170 | */ | ||
4171 | return sctp_sf_cookie_wait_prm_abort(ep, asoc, type, arg, commands); | ||
4172 | } | ||
4173 | |||
4174 | /* | ||
4175 | * sctp_sf_shutdown_pending_prm_abort | ||
4176 | * | ||
4177 | * Inputs | ||
4178 | * (endpoint, asoc) | ||
4179 | * | ||
4180 | * The RFC does not explicitly address this issue, but is the route through the | ||
4181 | * state table when someone issues an abort while in SHUTDOWN-PENDING state. | ||
4182 | * | ||
4183 | * Outputs | ||
4184 | * (timers) | ||
4185 | */ | ||
4186 | sctp_disposition_t sctp_sf_shutdown_pending_prm_abort( | ||
4187 | const struct sctp_endpoint *ep, | ||
4188 | const struct sctp_association *asoc, | ||
4189 | const sctp_subtype_t type, | ||
4190 | void *arg, | ||
4191 | sctp_cmd_seq_t *commands) | ||
4192 | { | ||
4193 | /* Stop the T5-shutdown guard timer. */ | ||
4194 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4195 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
4196 | |||
4197 | return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands); | ||
4198 | } | ||
4199 | |||
4200 | /* | ||
4201 | * sctp_sf_shutdown_sent_prm_abort | ||
4202 | * | ||
4203 | * Inputs | ||
4204 | * (endpoint, asoc) | ||
4205 | * | ||
4206 | * The RFC does not explicitly address this issue, but is the route through the | ||
4207 | * state table when someone issues an abort while in SHUTDOWN-SENT state. | ||
4208 | * | ||
4209 | * Outputs | ||
4210 | * (timers) | ||
4211 | */ | ||
4212 | sctp_disposition_t sctp_sf_shutdown_sent_prm_abort( | ||
4213 | const struct sctp_endpoint *ep, | ||
4214 | const struct sctp_association *asoc, | ||
4215 | const sctp_subtype_t type, | ||
4216 | void *arg, | ||
4217 | sctp_cmd_seq_t *commands) | ||
4218 | { | ||
4219 | /* Stop the T2-shutdown timer. */ | ||
4220 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4221 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
4222 | |||
4223 | /* Stop the T5-shutdown guard timer. */ | ||
4224 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4225 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
4226 | |||
4227 | return sctp_sf_do_9_1_prm_abort(ep, asoc, type, arg, commands); | ||
4228 | } | ||
4229 | |||
4230 | /* | ||
4231 | * sctp_sf_cookie_echoed_prm_abort | ||
4232 | * | ||
4233 | * Inputs | ||
4234 | * (endpoint, asoc) | ||
4235 | * | ||
4236 | * The RFC does not explcitly address this issue, but is the route through the | ||
4237 | * state table when someone issues an abort while in COOKIE_ECHOED state. | ||
4238 | * | ||
4239 | * Outputs | ||
4240 | * (timers) | ||
4241 | */ | ||
4242 | sctp_disposition_t sctp_sf_shutdown_ack_sent_prm_abort( | ||
4243 | const struct sctp_endpoint *ep, | ||
4244 | const struct sctp_association *asoc, | ||
4245 | const sctp_subtype_t type, | ||
4246 | void *arg, | ||
4247 | sctp_cmd_seq_t *commands) | ||
4248 | { | ||
4249 | /* The same T2 timer, so we should be able to use | ||
4250 | * common function with the SHUTDOWN-SENT state. | ||
4251 | */ | ||
4252 | return sctp_sf_shutdown_sent_prm_abort(ep, asoc, type, arg, commands); | ||
4253 | } | ||
4254 | |||
4255 | /* | ||
4256 | * Process the REQUESTHEARTBEAT primitive | ||
4257 | * | ||
4258 | * 10.1 ULP-to-SCTP | ||
4259 | * J) Request Heartbeat | ||
4260 | * | ||
4261 | * Format: REQUESTHEARTBEAT(association id, destination transport address) | ||
4262 | * | ||
4263 | * -> result | ||
4264 | * | ||
4265 | * Instructs the local endpoint to perform a HeartBeat on the specified | ||
4266 | * destination transport address of the given association. The returned | ||
4267 | * result should indicate whether the transmission of the HEARTBEAT | ||
4268 | * chunk to the destination address is successful. | ||
4269 | * | ||
4270 | * Mandatory attributes: | ||
4271 | * | ||
4272 | * o association id - local handle to the SCTP association | ||
4273 | * | ||
4274 | * o destination transport address - the transport address of the | ||
4275 | * association on which a heartbeat should be issued. | ||
4276 | */ | ||
4277 | sctp_disposition_t sctp_sf_do_prm_requestheartbeat( | ||
4278 | const struct sctp_endpoint *ep, | ||
4279 | const struct sctp_association *asoc, | ||
4280 | const sctp_subtype_t type, | ||
4281 | void *arg, | ||
4282 | sctp_cmd_seq_t *commands) | ||
4283 | { | ||
4284 | return sctp_sf_heartbeat(ep, asoc, type, (struct sctp_transport *)arg, | ||
4285 | commands); | ||
4286 | } | ||
4287 | |||
4288 | /* | ||
4289 | * ADDIP Section 4.1 ASCONF Chunk Procedures | ||
4290 | * When an endpoint has an ASCONF signaled change to be sent to the | ||
4291 | * remote endpoint it should do A1 to A9 | ||
4292 | */ | ||
4293 | sctp_disposition_t sctp_sf_do_prm_asconf(const struct sctp_endpoint *ep, | ||
4294 | const struct sctp_association *asoc, | ||
4295 | const sctp_subtype_t type, | ||
4296 | void *arg, | ||
4297 | sctp_cmd_seq_t *commands) | ||
4298 | { | ||
4299 | struct sctp_chunk *chunk = arg; | ||
4300 | |||
4301 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); | ||
4302 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
4303 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
4304 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk)); | ||
4305 | return SCTP_DISPOSITION_CONSUME; | ||
4306 | } | ||
4307 | |||
4308 | /* | ||
4309 | * Ignore the primitive event | ||
4310 | * | ||
4311 | * The return value is the disposition of the primitive. | ||
4312 | */ | ||
4313 | sctp_disposition_t sctp_sf_ignore_primitive( | ||
4314 | const struct sctp_endpoint *ep, | ||
4315 | const struct sctp_association *asoc, | ||
4316 | const sctp_subtype_t type, | ||
4317 | void *arg, | ||
4318 | sctp_cmd_seq_t *commands) | ||
4319 | { | ||
4320 | SCTP_DEBUG_PRINTK("Primitive type %d is ignored.\n", type.primitive); | ||
4321 | return SCTP_DISPOSITION_DISCARD; | ||
4322 | } | ||
4323 | |||
4324 | /*************************************************************************** | ||
4325 | * These are the state functions for the OTHER events. | ||
4326 | ***************************************************************************/ | ||
4327 | |||
4328 | /* | ||
4329 | * Start the shutdown negotiation. | ||
4330 | * | ||
4331 | * From Section 9.2: | ||
4332 | * Once all its outstanding data has been acknowledged, the endpoint | ||
4333 | * shall send a SHUTDOWN chunk to its peer including in the Cumulative | ||
4334 | * TSN Ack field the last sequential TSN it has received from the peer. | ||
4335 | * It shall then start the T2-shutdown timer and enter the SHUTDOWN-SENT | ||
4336 | * state. If the timer expires, the endpoint must re-send the SHUTDOWN | ||
4337 | * with the updated last sequential TSN received from its peer. | ||
4338 | * | ||
4339 | * The return value is the disposition. | ||
4340 | */ | ||
4341 | sctp_disposition_t sctp_sf_do_9_2_start_shutdown( | ||
4342 | const struct sctp_endpoint *ep, | ||
4343 | const struct sctp_association *asoc, | ||
4344 | const sctp_subtype_t type, | ||
4345 | void *arg, | ||
4346 | sctp_cmd_seq_t *commands) | ||
4347 | { | ||
4348 | struct sctp_chunk *reply; | ||
4349 | |||
4350 | /* Once all its outstanding data has been acknowledged, the | ||
4351 | * endpoint shall send a SHUTDOWN chunk to its peer including | ||
4352 | * in the Cumulative TSN Ack field the last sequential TSN it | ||
4353 | * has received from the peer. | ||
4354 | */ | ||
4355 | reply = sctp_make_shutdown(asoc, NULL); | ||
4356 | if (!reply) | ||
4357 | goto nomem; | ||
4358 | |||
4359 | /* Set the transport for the SHUTDOWN chunk and the timeout for the | ||
4360 | * T2-shutdown timer. | ||
4361 | */ | ||
4362 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); | ||
4363 | |||
4364 | /* It shall then start the T2-shutdown timer */ | ||
4365 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
4366 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
4367 | |||
4368 | if (asoc->autoclose) | ||
4369 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4370 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
4371 | |||
4372 | /* and enter the SHUTDOWN-SENT state. */ | ||
4373 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
4374 | SCTP_STATE(SCTP_STATE_SHUTDOWN_SENT)); | ||
4375 | |||
4376 | /* sctp-implguide 2.10 Issues with Heartbeating and failover | ||
4377 | * | ||
4378 | * HEARTBEAT ... is discontinued after sending either SHUTDOWN | ||
4379 | * or SHUTDOWN-ACK. | ||
4380 | */ | ||
4381 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); | ||
4382 | |||
4383 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
4384 | |||
4385 | return SCTP_DISPOSITION_CONSUME; | ||
4386 | |||
4387 | nomem: | ||
4388 | return SCTP_DISPOSITION_NOMEM; | ||
4389 | } | ||
4390 | |||
4391 | /* | ||
4392 | * Generate a SHUTDOWN ACK now that everything is SACK'd. | ||
4393 | * | ||
4394 | * From Section 9.2: | ||
4395 | * | ||
4396 | * If it has no more outstanding DATA chunks, the SHUTDOWN receiver | ||
4397 | * shall send a SHUTDOWN ACK and start a T2-shutdown timer of its own, | ||
4398 | * entering the SHUTDOWN-ACK-SENT state. If the timer expires, the | ||
4399 | * endpoint must re-send the SHUTDOWN ACK. | ||
4400 | * | ||
4401 | * The return value is the disposition. | ||
4402 | */ | ||
4403 | sctp_disposition_t sctp_sf_do_9_2_shutdown_ack( | ||
4404 | const struct sctp_endpoint *ep, | ||
4405 | const struct sctp_association *asoc, | ||
4406 | const sctp_subtype_t type, | ||
4407 | void *arg, | ||
4408 | sctp_cmd_seq_t *commands) | ||
4409 | { | ||
4410 | struct sctp_chunk *chunk = (struct sctp_chunk *) arg; | ||
4411 | struct sctp_chunk *reply; | ||
4412 | |||
4413 | /* There are 2 ways of getting here: | ||
4414 | * 1) called in response to a SHUTDOWN chunk | ||
4415 | * 2) called when SCTP_EVENT_NO_PENDING_TSN event is issued. | ||
4416 | * | ||
4417 | * For the case (2), the arg parameter is set to NULL. We need | ||
4418 | * to check that we have a chunk before accessing it's fields. | ||
4419 | */ | ||
4420 | if (chunk) { | ||
4421 | if (!sctp_vtag_verify(chunk, asoc)) | ||
4422 | return sctp_sf_pdiscard(ep, asoc, type, arg, commands); | ||
4423 | |||
4424 | /* Make sure that the SHUTDOWN chunk has a valid length. */ | ||
4425 | if (!sctp_chunk_length_valid(chunk, sizeof(struct sctp_shutdown_chunk_t))) | ||
4426 | return sctp_sf_violation_chunklen(ep, asoc, type, arg, | ||
4427 | commands); | ||
4428 | } | ||
4429 | |||
4430 | /* If it has no more outstanding DATA chunks, the SHUTDOWN receiver | ||
4431 | * shall send a SHUTDOWN ACK ... | ||
4432 | */ | ||
4433 | reply = sctp_make_shutdown_ack(asoc, chunk); | ||
4434 | if (!reply) | ||
4435 | goto nomem; | ||
4436 | |||
4437 | /* Set the transport for the SHUTDOWN ACK chunk and the timeout for | ||
4438 | * the T2-shutdown timer. | ||
4439 | */ | ||
4440 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); | ||
4441 | |||
4442 | /* and start/restart a T2-shutdown timer of its own, */ | ||
4443 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
4444 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
4445 | |||
4446 | if (asoc->autoclose) | ||
4447 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4448 | SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE)); | ||
4449 | |||
4450 | /* Enter the SHUTDOWN-ACK-SENT state. */ | ||
4451 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
4452 | SCTP_STATE(SCTP_STATE_SHUTDOWN_ACK_SENT)); | ||
4453 | |||
4454 | /* sctp-implguide 2.10 Issues with Heartbeating and failover | ||
4455 | * | ||
4456 | * HEARTBEAT ... is discontinued after sending either SHUTDOWN | ||
4457 | * or SHUTDOWN-ACK. | ||
4458 | */ | ||
4459 | sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_STOP, SCTP_NULL()); | ||
4460 | |||
4461 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
4462 | |||
4463 | return SCTP_DISPOSITION_CONSUME; | ||
4464 | |||
4465 | nomem: | ||
4466 | return SCTP_DISPOSITION_NOMEM; | ||
4467 | } | ||
4468 | |||
4469 | /* | ||
4470 | * Ignore the event defined as other | ||
4471 | * | ||
4472 | * The return value is the disposition of the event. | ||
4473 | */ | ||
4474 | sctp_disposition_t sctp_sf_ignore_other(const struct sctp_endpoint *ep, | ||
4475 | const struct sctp_association *asoc, | ||
4476 | const sctp_subtype_t type, | ||
4477 | void *arg, | ||
4478 | sctp_cmd_seq_t *commands) | ||
4479 | { | ||
4480 | SCTP_DEBUG_PRINTK("The event other type %d is ignored\n", type.other); | ||
4481 | return SCTP_DISPOSITION_DISCARD; | ||
4482 | } | ||
4483 | |||
4484 | /************************************************************ | ||
4485 | * These are the state functions for handling timeout events. | ||
4486 | ************************************************************/ | ||
4487 | |||
4488 | /* | ||
4489 | * RTX Timeout | ||
4490 | * | ||
4491 | * Section: 6.3.3 Handle T3-rtx Expiration | ||
4492 | * | ||
4493 | * Whenever the retransmission timer T3-rtx expires for a destination | ||
4494 | * address, do the following: | ||
4495 | * [See below] | ||
4496 | * | ||
4497 | * The return value is the disposition of the chunk. | ||
4498 | */ | ||
4499 | sctp_disposition_t sctp_sf_do_6_3_3_rtx(const struct sctp_endpoint *ep, | ||
4500 | const struct sctp_association *asoc, | ||
4501 | const sctp_subtype_t type, | ||
4502 | void *arg, | ||
4503 | sctp_cmd_seq_t *commands) | ||
4504 | { | ||
4505 | struct sctp_transport *transport = arg; | ||
4506 | |||
4507 | if (asoc->overall_error_count >= asoc->max_retrans) { | ||
4508 | /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | ||
4509 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4510 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
4511 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4512 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
4513 | return SCTP_DISPOSITION_DELETE_TCB; | ||
4514 | } | ||
4515 | |||
4516 | /* E1) For the destination address for which the timer | ||
4517 | * expires, adjust its ssthresh with rules defined in Section | ||
4518 | * 7.2.3 and set the cwnd <- MTU. | ||
4519 | */ | ||
4520 | |||
4521 | /* E2) For the destination address for which the timer | ||
4522 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | ||
4523 | * maximum value discussed in rule C7 above (RTO.max) may be | ||
4524 | * used to provide an upper bound to this doubling operation. | ||
4525 | */ | ||
4526 | |||
4527 | /* E3) Determine how many of the earliest (i.e., lowest TSN) | ||
4528 | * outstanding DATA chunks for the address for which the | ||
4529 | * T3-rtx has expired will fit into a single packet, subject | ||
4530 | * to the MTU constraint for the path corresponding to the | ||
4531 | * destination transport address to which the retransmission | ||
4532 | * is being sent (this may be different from the address for | ||
4533 | * which the timer expires [see Section 6.4]). Call this | ||
4534 | * value K. Bundle and retransmit those K DATA chunks in a | ||
4535 | * single packet to the destination endpoint. | ||
4536 | * | ||
4537 | * Note: Any DATA chunks that were sent to the address for | ||
4538 | * which the T3-rtx timer expired but did not fit in one MTU | ||
4539 | * (rule E3 above), should be marked for retransmission and | ||
4540 | * sent as soon as cwnd allows (normally when a SACK arrives). | ||
4541 | */ | ||
4542 | |||
4543 | /* NB: Rules E4 and F1 are implicit in R1. */ | ||
4544 | sctp_add_cmd_sf(commands, SCTP_CMD_RETRAN, SCTP_TRANSPORT(transport)); | ||
4545 | |||
4546 | /* Do some failure management (Section 8.2). */ | ||
4547 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); | ||
4548 | |||
4549 | return SCTP_DISPOSITION_CONSUME; | ||
4550 | } | ||
4551 | |||
4552 | /* | ||
4553 | * Generate delayed SACK on timeout | ||
4554 | * | ||
4555 | * Section: 6.2 Acknowledgement on Reception of DATA Chunks | ||
4556 | * | ||
4557 | * The guidelines on delayed acknowledgement algorithm specified in | ||
4558 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, an | ||
4559 | * acknowledgement SHOULD be generated for at least every second packet | ||
4560 | * (not every second DATA chunk) received, and SHOULD be generated | ||
4561 | * within 200 ms of the arrival of any unacknowledged DATA chunk. In | ||
4562 | * some situations it may be beneficial for an SCTP transmitter to be | ||
4563 | * more conservative than the algorithms detailed in this document | ||
4564 | * allow. However, an SCTP transmitter MUST NOT be more aggressive than | ||
4565 | * the following algorithms allow. | ||
4566 | */ | ||
4567 | sctp_disposition_t sctp_sf_do_6_2_sack(const struct sctp_endpoint *ep, | ||
4568 | const struct sctp_association *asoc, | ||
4569 | const sctp_subtype_t type, | ||
4570 | void *arg, | ||
4571 | sctp_cmd_seq_t *commands) | ||
4572 | { | ||
4573 | sctp_add_cmd_sf(commands, SCTP_CMD_GEN_SACK, SCTP_FORCE()); | ||
4574 | return SCTP_DISPOSITION_CONSUME; | ||
4575 | } | ||
4576 | |||
4577 | /* | ||
4578 | * sctp_sf_t1_timer_expire | ||
4579 | * | ||
4580 | * Section: 4 Note: 2 | ||
4581 | * Verification Tag: | ||
4582 | * Inputs | ||
4583 | * (endpoint, asoc) | ||
4584 | * | ||
4585 | * RFC 2960 Section 4 Notes | ||
4586 | * 2) If the T1-init timer expires, the endpoint MUST retransmit INIT | ||
4587 | * and re-start the T1-init timer without changing state. This MUST | ||
4588 | * be repeated up to 'Max.Init.Retransmits' times. After that, the | ||
4589 | * endpoint MUST abort the initialization process and report the | ||
4590 | * error to SCTP user. | ||
4591 | * | ||
4592 | * 3) If the T1-cookie timer expires, the endpoint MUST retransmit | ||
4593 | * COOKIE ECHO and re-start the T1-cookie timer without changing | ||
4594 | * state. This MUST be repeated up to 'Max.Init.Retransmits' times. | ||
4595 | * After that, the endpoint MUST abort the initialization process and | ||
4596 | * report the error to SCTP user. | ||
4597 | * | ||
4598 | * Outputs | ||
4599 | * (timers, events) | ||
4600 | * | ||
4601 | */ | ||
4602 | sctp_disposition_t sctp_sf_t1_timer_expire(const struct sctp_endpoint *ep, | ||
4603 | const struct sctp_association *asoc, | ||
4604 | const sctp_subtype_t type, | ||
4605 | void *arg, | ||
4606 | sctp_cmd_seq_t *commands) | ||
4607 | { | ||
4608 | struct sctp_chunk *repl; | ||
4609 | struct sctp_bind_addr *bp; | ||
4610 | sctp_event_timeout_t timer = (sctp_event_timeout_t) arg; | ||
4611 | int timeout; | ||
4612 | int attempts; | ||
4613 | |||
4614 | timeout = asoc->timeouts[timer]; | ||
4615 | attempts = asoc->counters[SCTP_COUNTER_INIT_ERROR] + 1; | ||
4616 | repl = NULL; | ||
4617 | |||
4618 | SCTP_DEBUG_PRINTK("Timer T1 expired.\n"); | ||
4619 | |||
4620 | if (attempts < asoc->max_init_attempts) { | ||
4621 | switch (timer) { | ||
4622 | case SCTP_EVENT_TIMEOUT_T1_INIT: | ||
4623 | bp = (struct sctp_bind_addr *) &asoc->base.bind_addr; | ||
4624 | repl = sctp_make_init(asoc, bp, GFP_ATOMIC, 0); | ||
4625 | break; | ||
4626 | |||
4627 | case SCTP_EVENT_TIMEOUT_T1_COOKIE: | ||
4628 | repl = sctp_make_cookie_echo(asoc, NULL); | ||
4629 | break; | ||
4630 | |||
4631 | default: | ||
4632 | BUG(); | ||
4633 | break; | ||
4634 | }; | ||
4635 | |||
4636 | if (!repl) | ||
4637 | goto nomem; | ||
4638 | |||
4639 | /* Issue a sideeffect to do the needed accounting. */ | ||
4640 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_RESTART, | ||
4641 | SCTP_TO(timer)); | ||
4642 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(repl)); | ||
4643 | } else { | ||
4644 | sctp_add_cmd_sf(commands, SCTP_CMD_INIT_FAILED, | ||
4645 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
4646 | return SCTP_DISPOSITION_DELETE_TCB; | ||
4647 | } | ||
4648 | |||
4649 | return SCTP_DISPOSITION_CONSUME; | ||
4650 | |||
4651 | nomem: | ||
4652 | return SCTP_DISPOSITION_NOMEM; | ||
4653 | } | ||
4654 | |||
4655 | /* RFC2960 9.2 If the timer expires, the endpoint must re-send the SHUTDOWN | ||
4656 | * with the updated last sequential TSN received from its peer. | ||
4657 | * | ||
4658 | * An endpoint should limit the number of retransmissions of the | ||
4659 | * SHUTDOWN chunk to the protocol parameter 'Association.Max.Retrans'. | ||
4660 | * If this threshold is exceeded the endpoint should destroy the TCB and | ||
4661 | * MUST report the peer endpoint unreachable to the upper layer (and | ||
4662 | * thus the association enters the CLOSED state). The reception of any | ||
4663 | * packet from its peer (i.e. as the peer sends all of its queued DATA | ||
4664 | * chunks) should clear the endpoint's retransmission count and restart | ||
4665 | * the T2-Shutdown timer, giving its peer ample opportunity to transmit | ||
4666 | * all of its queued DATA chunks that have not yet been sent. | ||
4667 | */ | ||
4668 | sctp_disposition_t sctp_sf_t2_timer_expire(const struct sctp_endpoint *ep, | ||
4669 | const struct sctp_association *asoc, | ||
4670 | const sctp_subtype_t type, | ||
4671 | void *arg, | ||
4672 | sctp_cmd_seq_t *commands) | ||
4673 | { | ||
4674 | struct sctp_chunk *reply = NULL; | ||
4675 | |||
4676 | SCTP_DEBUG_PRINTK("Timer T2 expired.\n"); | ||
4677 | if (asoc->overall_error_count >= asoc->max_retrans) { | ||
4678 | /* Note: CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */ | ||
4679 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4680 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
4681 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4682 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
4683 | return SCTP_DISPOSITION_DELETE_TCB; | ||
4684 | } | ||
4685 | |||
4686 | switch (asoc->state) { | ||
4687 | case SCTP_STATE_SHUTDOWN_SENT: | ||
4688 | reply = sctp_make_shutdown(asoc, NULL); | ||
4689 | break; | ||
4690 | |||
4691 | case SCTP_STATE_SHUTDOWN_ACK_SENT: | ||
4692 | reply = sctp_make_shutdown_ack(asoc, NULL); | ||
4693 | break; | ||
4694 | |||
4695 | default: | ||
4696 | BUG(); | ||
4697 | break; | ||
4698 | }; | ||
4699 | |||
4700 | if (!reply) | ||
4701 | goto nomem; | ||
4702 | |||
4703 | /* Do some failure management (Section 8.2). */ | ||
4704 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, | ||
4705 | SCTP_TRANSPORT(asoc->shutdown_last_sent_to)); | ||
4706 | |||
4707 | /* Set the transport for the SHUTDOWN/ACK chunk and the timeout for | ||
4708 | * the T2-shutdown timer. | ||
4709 | */ | ||
4710 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T2, SCTP_CHUNK(reply)); | ||
4711 | |||
4712 | /* Restart the T2-shutdown timer. */ | ||
4713 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
4714 | SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN)); | ||
4715 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
4716 | return SCTP_DISPOSITION_CONSUME; | ||
4717 | |||
4718 | nomem: | ||
4719 | return SCTP_DISPOSITION_NOMEM; | ||
4720 | } | ||
4721 | |||
4722 | /* | ||
4723 | * ADDIP Section 4.1 ASCONF CHunk Procedures | ||
4724 | * If the T4 RTO timer expires the endpoint should do B1 to B5 | ||
4725 | */ | ||
4726 | sctp_disposition_t sctp_sf_t4_timer_expire( | ||
4727 | const struct sctp_endpoint *ep, | ||
4728 | const struct sctp_association *asoc, | ||
4729 | const sctp_subtype_t type, | ||
4730 | void *arg, | ||
4731 | sctp_cmd_seq_t *commands) | ||
4732 | { | ||
4733 | struct sctp_chunk *chunk = asoc->addip_last_asconf; | ||
4734 | struct sctp_transport *transport = chunk->transport; | ||
4735 | |||
4736 | /* ADDIP 4.1 B1) Increment the error counters and perform path failure | ||
4737 | * detection on the appropriate destination address as defined in | ||
4738 | * RFC2960 [5] section 8.1 and 8.2. | ||
4739 | */ | ||
4740 | sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport)); | ||
4741 | |||
4742 | /* Reconfig T4 timer and transport. */ | ||
4743 | sctp_add_cmd_sf(commands, SCTP_CMD_SETUP_T4, SCTP_CHUNK(chunk)); | ||
4744 | |||
4745 | /* ADDIP 4.1 B2) Increment the association error counters and perform | ||
4746 | * endpoint failure detection on the association as defined in | ||
4747 | * RFC2960 [5] section 8.1 and 8.2. | ||
4748 | * association error counter is incremented in SCTP_CMD_STRIKE. | ||
4749 | */ | ||
4750 | if (asoc->overall_error_count >= asoc->max_retrans) { | ||
4751 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
4752 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
4753 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4754 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
4755 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
4756 | SCTP_INC_STATS(SCTP_MIB_CURRESTAB); | ||
4757 | return SCTP_DISPOSITION_ABORT; | ||
4758 | } | ||
4759 | |||
4760 | /* ADDIP 4.1 B3) Back-off the destination address RTO value to which | ||
4761 | * the ASCONF chunk was sent by doubling the RTO timer value. | ||
4762 | * This is done in SCTP_CMD_STRIKE. | ||
4763 | */ | ||
4764 | |||
4765 | /* ADDIP 4.1 B4) Re-transmit the ASCONF Chunk last sent and if possible | ||
4766 | * choose an alternate destination address (please refer to RFC2960 | ||
4767 | * [5] section 6.4.1). An endpoint MUST NOT add new parameters to this | ||
4768 | * chunk, it MUST be the same (including its serial number) as the last | ||
4769 | * ASCONF sent. | ||
4770 | */ | ||
4771 | sctp_chunk_hold(asoc->addip_last_asconf); | ||
4772 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
4773 | SCTP_CHUNK(asoc->addip_last_asconf)); | ||
4774 | |||
4775 | /* ADDIP 4.1 B5) Restart the T-4 RTO timer. Note that if a different | ||
4776 | * destination is selected, then the RTO used will be that of the new | ||
4777 | * destination address. | ||
4778 | */ | ||
4779 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART, | ||
4780 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
4781 | |||
4782 | return SCTP_DISPOSITION_CONSUME; | ||
4783 | } | ||
4784 | |||
4785 | /* sctpimpguide-05 Section 2.12.2 | ||
4786 | * The sender of the SHUTDOWN MAY also start an overall guard timer | ||
4787 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. | ||
4788 | * At the expiration of this timer the sender SHOULD abort the association | ||
4789 | * by sending an ABORT chunk. | ||
4790 | */ | ||
4791 | sctp_disposition_t sctp_sf_t5_timer_expire(const struct sctp_endpoint *ep, | ||
4792 | const struct sctp_association *asoc, | ||
4793 | const sctp_subtype_t type, | ||
4794 | void *arg, | ||
4795 | sctp_cmd_seq_t *commands) | ||
4796 | { | ||
4797 | struct sctp_chunk *reply = NULL; | ||
4798 | |||
4799 | SCTP_DEBUG_PRINTK("Timer T5 expired.\n"); | ||
4800 | |||
4801 | reply = sctp_make_abort(asoc, NULL, 0); | ||
4802 | if (!reply) | ||
4803 | goto nomem; | ||
4804 | |||
4805 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(reply)); | ||
4806 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
4807 | SCTP_U32(SCTP_ERROR_NO_ERROR)); | ||
4808 | |||
4809 | return SCTP_DISPOSITION_DELETE_TCB; | ||
4810 | nomem: | ||
4811 | return SCTP_DISPOSITION_NOMEM; | ||
4812 | } | ||
4813 | |||
4814 | /* Handle expiration of AUTOCLOSE timer. When the autoclose timer expires, | ||
4815 | * the association is automatically closed by starting the shutdown process. | ||
4816 | * The work that needs to be done is same as when SHUTDOWN is initiated by | ||
4817 | * the user. So this routine looks same as sctp_sf_do_9_2_prm_shutdown(). | ||
4818 | */ | ||
4819 | sctp_disposition_t sctp_sf_autoclose_timer_expire( | ||
4820 | const struct sctp_endpoint *ep, | ||
4821 | const struct sctp_association *asoc, | ||
4822 | const sctp_subtype_t type, | ||
4823 | void *arg, | ||
4824 | sctp_cmd_seq_t *commands) | ||
4825 | { | ||
4826 | int disposition; | ||
4827 | |||
4828 | /* From 9.2 Shutdown of an Association | ||
4829 | * Upon receipt of the SHUTDOWN primitive from its upper | ||
4830 | * layer, the endpoint enters SHUTDOWN-PENDING state and | ||
4831 | * remains there until all outstanding data has been | ||
4832 | * acknowledged by its peer. The endpoint accepts no new data | ||
4833 | * from its upper layer, but retransmits data to the far end | ||
4834 | * if necessary to fill gaps. | ||
4835 | */ | ||
4836 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
4837 | SCTP_STATE(SCTP_STATE_SHUTDOWN_PENDING)); | ||
4838 | |||
4839 | /* sctpimpguide-05 Section 2.12.2 | ||
4840 | * The sender of the SHUTDOWN MAY also start an overall guard timer | ||
4841 | * 'T5-shutdown-guard' to bound the overall time for shutdown sequence. | ||
4842 | */ | ||
4843 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START, | ||
4844 | SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD)); | ||
4845 | disposition = SCTP_DISPOSITION_CONSUME; | ||
4846 | if (sctp_outq_is_empty(&asoc->outqueue)) { | ||
4847 | disposition = sctp_sf_do_9_2_start_shutdown(ep, asoc, type, | ||
4848 | arg, commands); | ||
4849 | } | ||
4850 | return disposition; | ||
4851 | } | ||
4852 | |||
4853 | /***************************************************************************** | ||
4854 | * These are sa state functions which could apply to all types of events. | ||
4855 | ****************************************************************************/ | ||
4856 | |||
4857 | /* | ||
4858 | * This table entry is not implemented. | ||
4859 | * | ||
4860 | * Inputs | ||
4861 | * (endpoint, asoc, chunk) | ||
4862 | * | ||
4863 | * The return value is the disposition of the chunk. | ||
4864 | */ | ||
4865 | sctp_disposition_t sctp_sf_not_impl(const struct sctp_endpoint *ep, | ||
4866 | const struct sctp_association *asoc, | ||
4867 | const sctp_subtype_t type, | ||
4868 | void *arg, | ||
4869 | sctp_cmd_seq_t *commands) | ||
4870 | { | ||
4871 | return SCTP_DISPOSITION_NOT_IMPL; | ||
4872 | } | ||
4873 | |||
4874 | /* | ||
4875 | * This table entry represents a bug. | ||
4876 | * | ||
4877 | * Inputs | ||
4878 | * (endpoint, asoc, chunk) | ||
4879 | * | ||
4880 | * The return value is the disposition of the chunk. | ||
4881 | */ | ||
4882 | sctp_disposition_t sctp_sf_bug(const struct sctp_endpoint *ep, | ||
4883 | const struct sctp_association *asoc, | ||
4884 | const sctp_subtype_t type, | ||
4885 | void *arg, | ||
4886 | sctp_cmd_seq_t *commands) | ||
4887 | { | ||
4888 | return SCTP_DISPOSITION_BUG; | ||
4889 | } | ||
4890 | |||
4891 | /* | ||
4892 | * This table entry represents the firing of a timer in the wrong state. | ||
4893 | * Since timer deletion cannot be guaranteed a timer 'may' end up firing | ||
4894 | * when the association is in the wrong state. This event should | ||
4895 | * be ignored, so as to prevent any rearming of the timer. | ||
4896 | * | ||
4897 | * Inputs | ||
4898 | * (endpoint, asoc, chunk) | ||
4899 | * | ||
4900 | * The return value is the disposition of the chunk. | ||
4901 | */ | ||
4902 | sctp_disposition_t sctp_sf_timer_ignore(const struct sctp_endpoint *ep, | ||
4903 | const struct sctp_association *asoc, | ||
4904 | const sctp_subtype_t type, | ||
4905 | void *arg, | ||
4906 | sctp_cmd_seq_t *commands) | ||
4907 | { | ||
4908 | SCTP_DEBUG_PRINTK("Timer %d ignored.\n", type.chunk); | ||
4909 | return SCTP_DISPOSITION_CONSUME; | ||
4910 | } | ||
4911 | |||
4912 | /******************************************************************** | ||
4913 | * 2nd Level Abstractions | ||
4914 | ********************************************************************/ | ||
4915 | |||
4916 | /* Pull the SACK chunk based on the SACK header. */ | ||
4917 | static struct sctp_sackhdr *sctp_sm_pull_sack(struct sctp_chunk *chunk) | ||
4918 | { | ||
4919 | struct sctp_sackhdr *sack; | ||
4920 | unsigned int len; | ||
4921 | __u16 num_blocks; | ||
4922 | __u16 num_dup_tsns; | ||
4923 | |||
4924 | /* Protect ourselves from reading too far into | ||
4925 | * the skb from a bogus sender. | ||
4926 | */ | ||
4927 | sack = (struct sctp_sackhdr *) chunk->skb->data; | ||
4928 | |||
4929 | num_blocks = ntohs(sack->num_gap_ack_blocks); | ||
4930 | num_dup_tsns = ntohs(sack->num_dup_tsns); | ||
4931 | len = sizeof(struct sctp_sackhdr); | ||
4932 | len += (num_blocks + num_dup_tsns) * sizeof(__u32); | ||
4933 | if (len > chunk->skb->len) | ||
4934 | return NULL; | ||
4935 | |||
4936 | skb_pull(chunk->skb, len); | ||
4937 | |||
4938 | return sack; | ||
4939 | } | ||
4940 | |||
4941 | /* Create an ABORT packet to be sent as a response, with the specified | ||
4942 | * error causes. | ||
4943 | */ | ||
4944 | static struct sctp_packet *sctp_abort_pkt_new(const struct sctp_endpoint *ep, | ||
4945 | const struct sctp_association *asoc, | ||
4946 | struct sctp_chunk *chunk, | ||
4947 | const void *payload, | ||
4948 | size_t paylen) | ||
4949 | { | ||
4950 | struct sctp_packet *packet; | ||
4951 | struct sctp_chunk *abort; | ||
4952 | |||
4953 | packet = sctp_ootb_pkt_new(asoc, chunk); | ||
4954 | |||
4955 | if (packet) { | ||
4956 | /* Make an ABORT. | ||
4957 | * The T bit will be set if the asoc is NULL. | ||
4958 | */ | ||
4959 | abort = sctp_make_abort(asoc, chunk, paylen); | ||
4960 | if (!abort) { | ||
4961 | sctp_ootb_pkt_free(packet); | ||
4962 | return NULL; | ||
4963 | } | ||
4964 | /* Add specified error causes, i.e., payload, to the | ||
4965 | * end of the chunk. | ||
4966 | */ | ||
4967 | sctp_addto_chunk(abort, paylen, payload); | ||
4968 | |||
4969 | /* Set the skb to the belonging sock for accounting. */ | ||
4970 | abort->skb->sk = ep->base.sk; | ||
4971 | |||
4972 | sctp_packet_append_chunk(packet, abort); | ||
4973 | |||
4974 | } | ||
4975 | |||
4976 | return packet; | ||
4977 | } | ||
4978 | |||
4979 | /* Allocate a packet for responding in the OOTB conditions. */ | ||
4980 | static struct sctp_packet *sctp_ootb_pkt_new(const struct sctp_association *asoc, | ||
4981 | const struct sctp_chunk *chunk) | ||
4982 | { | ||
4983 | struct sctp_packet *packet; | ||
4984 | struct sctp_transport *transport; | ||
4985 | __u16 sport; | ||
4986 | __u16 dport; | ||
4987 | __u32 vtag; | ||
4988 | |||
4989 | /* Get the source and destination port from the inbound packet. */ | ||
4990 | sport = ntohs(chunk->sctp_hdr->dest); | ||
4991 | dport = ntohs(chunk->sctp_hdr->source); | ||
4992 | |||
4993 | /* The V-tag is going to be the same as the inbound packet if no | ||
4994 | * association exists, otherwise, use the peer's vtag. | ||
4995 | */ | ||
4996 | if (asoc) { | ||
4997 | vtag = asoc->peer.i.init_tag; | ||
4998 | } else { | ||
4999 | /* Special case the INIT and stale COOKIE_ECHO as there is no | ||
5000 | * vtag yet. | ||
5001 | */ | ||
5002 | switch(chunk->chunk_hdr->type) { | ||
5003 | case SCTP_CID_INIT: | ||
5004 | { | ||
5005 | sctp_init_chunk_t *init; | ||
5006 | |||
5007 | init = (sctp_init_chunk_t *)chunk->chunk_hdr; | ||
5008 | vtag = ntohl(init->init_hdr.init_tag); | ||
5009 | break; | ||
5010 | } | ||
5011 | default: | ||
5012 | vtag = ntohl(chunk->sctp_hdr->vtag); | ||
5013 | break; | ||
5014 | } | ||
5015 | } | ||
5016 | |||
5017 | /* Make a transport for the bucket, Eliza... */ | ||
5018 | transport = sctp_transport_new(sctp_source(chunk), GFP_ATOMIC); | ||
5019 | if (!transport) | ||
5020 | goto nomem; | ||
5021 | |||
5022 | /* Cache a route for the transport with the chunk's destination as | ||
5023 | * the source address. | ||
5024 | */ | ||
5025 | sctp_transport_route(transport, (union sctp_addr *)&chunk->dest, | ||
5026 | sctp_sk(sctp_get_ctl_sock())); | ||
5027 | |||
5028 | packet = sctp_packet_init(&transport->packet, transport, sport, dport); | ||
5029 | packet = sctp_packet_config(packet, vtag, 0); | ||
5030 | |||
5031 | return packet; | ||
5032 | |||
5033 | nomem: | ||
5034 | return NULL; | ||
5035 | } | ||
5036 | |||
5037 | /* Free the packet allocated earlier for responding in the OOTB condition. */ | ||
5038 | void sctp_ootb_pkt_free(struct sctp_packet *packet) | ||
5039 | { | ||
5040 | sctp_transport_free(packet->transport); | ||
5041 | } | ||
5042 | |||
5043 | /* Send a stale cookie error when a invalid COOKIE ECHO chunk is found */ | ||
5044 | static void sctp_send_stale_cookie_err(const struct sctp_endpoint *ep, | ||
5045 | const struct sctp_association *asoc, | ||
5046 | const struct sctp_chunk *chunk, | ||
5047 | sctp_cmd_seq_t *commands, | ||
5048 | struct sctp_chunk *err_chunk) | ||
5049 | { | ||
5050 | struct sctp_packet *packet; | ||
5051 | |||
5052 | if (err_chunk) { | ||
5053 | packet = sctp_ootb_pkt_new(asoc, chunk); | ||
5054 | if (packet) { | ||
5055 | struct sctp_signed_cookie *cookie; | ||
5056 | |||
5057 | /* Override the OOTB vtag from the cookie. */ | ||
5058 | cookie = chunk->subh.cookie_hdr; | ||
5059 | packet->vtag = cookie->c.peer_vtag; | ||
5060 | |||
5061 | /* Set the skb to the belonging sock for accounting. */ | ||
5062 | err_chunk->skb->sk = ep->base.sk; | ||
5063 | sctp_packet_append_chunk(packet, err_chunk); | ||
5064 | sctp_add_cmd_sf(commands, SCTP_CMD_SEND_PKT, | ||
5065 | SCTP_PACKET(packet)); | ||
5066 | SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS); | ||
5067 | } else | ||
5068 | sctp_chunk_free (err_chunk); | ||
5069 | } | ||
5070 | } | ||
5071 | |||
5072 | |||
5073 | /* Process a data chunk */ | ||
5074 | static int sctp_eat_data(const struct sctp_association *asoc, | ||
5075 | struct sctp_chunk *chunk, | ||
5076 | sctp_cmd_seq_t *commands) | ||
5077 | { | ||
5078 | sctp_datahdr_t *data_hdr; | ||
5079 | struct sctp_chunk *err; | ||
5080 | size_t datalen; | ||
5081 | sctp_verb_t deliver; | ||
5082 | int tmp; | ||
5083 | __u32 tsn; | ||
5084 | |||
5085 | data_hdr = chunk->subh.data_hdr = (sctp_datahdr_t *)chunk->skb->data; | ||
5086 | skb_pull(chunk->skb, sizeof(sctp_datahdr_t)); | ||
5087 | |||
5088 | tsn = ntohl(data_hdr->tsn); | ||
5089 | SCTP_DEBUG_PRINTK("eat_data: TSN 0x%x.\n", tsn); | ||
5090 | |||
5091 | /* ASSERT: Now skb->data is really the user data. */ | ||
5092 | |||
5093 | /* Process ECN based congestion. | ||
5094 | * | ||
5095 | * Since the chunk structure is reused for all chunks within | ||
5096 | * a packet, we use ecn_ce_done to track if we've already | ||
5097 | * done CE processing for this packet. | ||
5098 | * | ||
5099 | * We need to do ECN processing even if we plan to discard the | ||
5100 | * chunk later. | ||
5101 | */ | ||
5102 | |||
5103 | if (!chunk->ecn_ce_done) { | ||
5104 | struct sctp_af *af; | ||
5105 | chunk->ecn_ce_done = 1; | ||
5106 | |||
5107 | af = sctp_get_af_specific( | ||
5108 | ipver2af(chunk->skb->nh.iph->version)); | ||
5109 | |||
5110 | if (af && af->is_ce(chunk->skb) && asoc->peer.ecn_capable) { | ||
5111 | /* Do real work as sideffect. */ | ||
5112 | sctp_add_cmd_sf(commands, SCTP_CMD_ECN_CE, | ||
5113 | SCTP_U32(tsn)); | ||
5114 | } | ||
5115 | } | ||
5116 | |||
5117 | tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn); | ||
5118 | if (tmp < 0) { | ||
5119 | /* The TSN is too high--silently discard the chunk and | ||
5120 | * count on it getting retransmitted later. | ||
5121 | */ | ||
5122 | return SCTP_IERROR_HIGH_TSN; | ||
5123 | } else if (tmp > 0) { | ||
5124 | /* This is a duplicate. Record it. */ | ||
5125 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_DUP, SCTP_U32(tsn)); | ||
5126 | return SCTP_IERROR_DUP_TSN; | ||
5127 | } | ||
5128 | |||
5129 | /* This is a new TSN. */ | ||
5130 | |||
5131 | /* Discard if there is no room in the receive window. | ||
5132 | * Actually, allow a little bit of overflow (up to a MTU). | ||
5133 | */ | ||
5134 | datalen = ntohs(chunk->chunk_hdr->length); | ||
5135 | datalen -= sizeof(sctp_data_chunk_t); | ||
5136 | |||
5137 | deliver = SCTP_CMD_CHUNK_ULP; | ||
5138 | |||
5139 | /* Think about partial delivery. */ | ||
5140 | if ((datalen >= asoc->rwnd) && (!asoc->ulpq.pd_mode)) { | ||
5141 | |||
5142 | /* Even if we don't accept this chunk there is | ||
5143 | * memory pressure. | ||
5144 | */ | ||
5145 | sctp_add_cmd_sf(commands, SCTP_CMD_PART_DELIVER, SCTP_NULL()); | ||
5146 | } | ||
5147 | |||
5148 | /* Spill over rwnd a little bit. Note: While allowed, this spill over | ||
5149 | * seems a bit troublesome in that frag_point varies based on | ||
5150 | * PMTU. In cases, such as loopback, this might be a rather | ||
5151 | * large spill over. | ||
5152 | */ | ||
5153 | if (!asoc->rwnd || asoc->rwnd_over || | ||
5154 | (datalen > asoc->rwnd + asoc->frag_point)) { | ||
5155 | |||
5156 | /* If this is the next TSN, consider reneging to make | ||
5157 | * room. Note: Playing nice with a confused sender. A | ||
5158 | * malicious sender can still eat up all our buffer | ||
5159 | * space and in the future we may want to detect and | ||
5160 | * do more drastic reneging. | ||
5161 | */ | ||
5162 | if (sctp_tsnmap_has_gap(&asoc->peer.tsn_map) && | ||
5163 | (sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1) == tsn) { | ||
5164 | SCTP_DEBUG_PRINTK("Reneging for tsn:%u\n", tsn); | ||
5165 | deliver = SCTP_CMD_RENEGE; | ||
5166 | } else { | ||
5167 | SCTP_DEBUG_PRINTK("Discard tsn: %u len: %Zd, " | ||
5168 | "rwnd: %d\n", tsn, datalen, | ||
5169 | asoc->rwnd); | ||
5170 | return SCTP_IERROR_IGNORE_TSN; | ||
5171 | } | ||
5172 | } | ||
5173 | |||
5174 | /* | ||
5175 | * Section 3.3.10.9 No User Data (9) | ||
5176 | * | ||
5177 | * Cause of error | ||
5178 | * --------------- | ||
5179 | * No User Data: This error cause is returned to the originator of a | ||
5180 | * DATA chunk if a received DATA chunk has no user data. | ||
5181 | */ | ||
5182 | if (unlikely(0 == datalen)) { | ||
5183 | err = sctp_make_abort_no_data(asoc, chunk, tsn); | ||
5184 | if (err) { | ||
5185 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
5186 | SCTP_CHUNK(err)); | ||
5187 | } | ||
5188 | /* We are going to ABORT, so we might as well stop | ||
5189 | * processing the rest of the chunks in the packet. | ||
5190 | */ | ||
5191 | sctp_add_cmd_sf(commands, SCTP_CMD_DISCARD_PACKET,SCTP_NULL()); | ||
5192 | sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED, | ||
5193 | SCTP_U32(SCTP_ERROR_NO_DATA)); | ||
5194 | SCTP_INC_STATS(SCTP_MIB_ABORTEDS); | ||
5195 | SCTP_DEC_STATS(SCTP_MIB_CURRESTAB); | ||
5196 | return SCTP_IERROR_NO_DATA; | ||
5197 | } | ||
5198 | |||
5199 | /* If definately accepting the DATA chunk, record its TSN, otherwise | ||
5200 | * wait for renege processing. | ||
5201 | */ | ||
5202 | if (SCTP_CMD_CHUNK_ULP == deliver) | ||
5203 | sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_TSN, SCTP_U32(tsn)); | ||
5204 | |||
5205 | /* Note: Some chunks may get overcounted (if we drop) or overcounted | ||
5206 | * if we renege and the chunk arrives again. | ||
5207 | */ | ||
5208 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) | ||
5209 | SCTP_INC_STATS(SCTP_MIB_INUNORDERCHUNKS); | ||
5210 | else | ||
5211 | SCTP_INC_STATS(SCTP_MIB_INORDERCHUNKS); | ||
5212 | |||
5213 | /* RFC 2960 6.5 Stream Identifier and Stream Sequence Number | ||
5214 | * | ||
5215 | * If an endpoint receive a DATA chunk with an invalid stream | ||
5216 | * identifier, it shall acknowledge the reception of the DATA chunk | ||
5217 | * following the normal procedure, immediately send an ERROR chunk | ||
5218 | * with cause set to "Invalid Stream Identifier" (See Section 3.3.10) | ||
5219 | * and discard the DATA chunk. | ||
5220 | */ | ||
5221 | if (ntohs(data_hdr->stream) >= asoc->c.sinit_max_instreams) { | ||
5222 | err = sctp_make_op_error(asoc, chunk, SCTP_ERROR_INV_STRM, | ||
5223 | &data_hdr->stream, | ||
5224 | sizeof(data_hdr->stream)); | ||
5225 | if (err) | ||
5226 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
5227 | SCTP_CHUNK(err)); | ||
5228 | return SCTP_IERROR_BAD_STREAM; | ||
5229 | } | ||
5230 | |||
5231 | /* Send the data up to the user. Note: Schedule the | ||
5232 | * SCTP_CMD_CHUNK_ULP cmd before the SCTP_CMD_GEN_SACK, as the SACK | ||
5233 | * chunk needs the updated rwnd. | ||
5234 | */ | ||
5235 | sctp_add_cmd_sf(commands, deliver, SCTP_CHUNK(chunk)); | ||
5236 | |||
5237 | return SCTP_IERROR_NO_ERROR; | ||
5238 | } | ||
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c new file mode 100644 index 000000000000..8967846f69e8 --- /dev/null +++ b/net/sctp/sm_statetable.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * | ||
8 | * This file is part of the SCTP kernel reference Implementation | ||
9 | * | ||
10 | * These are the state tables for the SCTP state machine. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Jon Grimm <jgrimm@us.ibm.com> | ||
40 | * Hui Huang <hui.huang@nokia.com> | ||
41 | * Daisy Chang <daisyc@us.ibm.com> | ||
42 | * Ardelle Fan <ardelle.fan@intel.com> | ||
43 | * Sridhar Samudrala <sri@us.ibm.com> | ||
44 | * | ||
45 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
46 | * be incorporated into the next SCTP release. | ||
47 | */ | ||
48 | |||
49 | #include <linux/skbuff.h> | ||
50 | #include <net/sctp/sctp.h> | ||
51 | #include <net/sctp/sm.h> | ||
52 | |||
53 | static const sctp_sm_table_entry_t | ||
54 | primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES]; | ||
55 | static const sctp_sm_table_entry_t | ||
56 | other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES]; | ||
57 | static const sctp_sm_table_entry_t | ||
58 | timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES]; | ||
59 | |||
60 | static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, | ||
61 | sctp_state_t state); | ||
62 | |||
63 | |||
64 | static const sctp_sm_table_entry_t bug = { | ||
65 | .fn = sctp_sf_bug, | ||
66 | .name = "sctp_sf_bug" | ||
67 | }; | ||
68 | |||
69 | #define DO_LOOKUP(_max, _type, _table) \ | ||
70 | if ((event_subtype._type > (_max))) { \ | ||
71 | printk(KERN_WARNING \ | ||
72 | "sctp table %p possible attack:" \ | ||
73 | " event %d exceeds max %d\n", \ | ||
74 | _table, event_subtype._type, _max); \ | ||
75 | return &bug; \ | ||
76 | } \ | ||
77 | return &_table[event_subtype._type][(int)state]; | ||
78 | |||
79 | const sctp_sm_table_entry_t *sctp_sm_lookup_event(sctp_event_t event_type, | ||
80 | sctp_state_t state, | ||
81 | sctp_subtype_t event_subtype) | ||
82 | { | ||
83 | switch (event_type) { | ||
84 | case SCTP_EVENT_T_CHUNK: | ||
85 | return sctp_chunk_event_lookup(event_subtype.chunk, state); | ||
86 | break; | ||
87 | case SCTP_EVENT_T_TIMEOUT: | ||
88 | DO_LOOKUP(SCTP_EVENT_TIMEOUT_MAX, timeout, | ||
89 | timeout_event_table); | ||
90 | break; | ||
91 | |||
92 | case SCTP_EVENT_T_OTHER: | ||
93 | DO_LOOKUP(SCTP_EVENT_OTHER_MAX, other, other_event_table); | ||
94 | break; | ||
95 | |||
96 | case SCTP_EVENT_T_PRIMITIVE: | ||
97 | DO_LOOKUP(SCTP_EVENT_PRIMITIVE_MAX, primitive, | ||
98 | primitive_event_table); | ||
99 | break; | ||
100 | |||
101 | default: | ||
102 | /* Yikes! We got an illegal event type. */ | ||
103 | return &bug; | ||
104 | }; | ||
105 | } | ||
106 | |||
107 | #define TYPE_SCTP_DATA { \ | ||
108 | /* SCTP_STATE_EMPTY */ \ | ||
109 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
110 | /* SCTP_STATE_CLOSED */ \ | ||
111 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
112 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
113 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
114 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
115 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
116 | /* SCTP_STATE_ESTABLISHED */ \ | ||
117 | {.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \ | ||
118 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
119 | {.fn = sctp_sf_eat_data_6_2, .name = "sctp_sf_eat_data_6_2"}, \ | ||
120 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
121 | {.fn = sctp_sf_eat_data_fast_4_4, .name = "sctp_sf_eat_data_fast_4_4"}, \ | ||
122 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
123 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
124 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
125 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
126 | } /* TYPE_SCTP_DATA */ | ||
127 | |||
128 | #define TYPE_SCTP_INIT { \ | ||
129 | /* SCTP_STATE_EMPTY */ \ | ||
130 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
131 | /* SCTP_STATE_CLOSED */ \ | ||
132 | {.fn = sctp_sf_do_5_1B_init, .name = "sctp_sf_do_5_1B_init"}, \ | ||
133 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
134 | {.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \ | ||
135 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
136 | {.fn = sctp_sf_do_5_2_1_siminit, .name = "sctp_sf_do_5_2_1_siminit"}, \ | ||
137 | /* SCTP_STATE_ESTABLISHED */ \ | ||
138 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | ||
139 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
140 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | ||
141 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
142 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | ||
143 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
144 | {.fn = sctp_sf_do_5_2_2_dupinit, .name = "sctp_sf_do_5_2_2_dupinit"}, \ | ||
145 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
146 | {.fn = sctp_sf_do_9_2_reshutack, .name = "sctp_sf_do_9_2_reshutack"}, \ | ||
147 | } /* TYPE_SCTP_INIT */ | ||
148 | |||
149 | #define TYPE_SCTP_INIT_ACK { \ | ||
150 | /* SCTP_STATE_EMPTY */ \ | ||
151 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
152 | /* SCTP_STATE_CLOSED */ \ | ||
153 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
154 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
155 | {.fn = sctp_sf_do_5_1C_ack, .name = "sctp_sf_do_5_1C_ack"}, \ | ||
156 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
157 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
158 | /* SCTP_STATE_ESTABLISHED */ \ | ||
159 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
160 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
161 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
162 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
163 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
164 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
165 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
166 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
167 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
168 | } /* TYPE_SCTP_INIT_ACK */ | ||
169 | |||
170 | #define TYPE_SCTP_SACK { \ | ||
171 | /* SCTP_STATE_EMPTY */ \ | ||
172 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
173 | /* SCTP_STATE_CLOSED */ \ | ||
174 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
175 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
176 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
177 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
178 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | ||
179 | /* SCTP_STATE_ESTABLISHED */ \ | ||
180 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | ||
181 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
182 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | ||
183 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
184 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
185 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
186 | {.fn = sctp_sf_eat_sack_6_2, .name = "sctp_sf_eat_sack_6_2"}, \ | ||
187 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
188 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
189 | } /* TYPE_SCTP_SACK */ | ||
190 | |||
191 | #define TYPE_SCTP_HEARTBEAT { \ | ||
192 | /* SCTP_STATE_EMPTY */ \ | ||
193 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
194 | /* SCTP_STATE_CLOSED */ \ | ||
195 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
196 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
197 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
198 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
199 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
200 | /* SCTP_STATE_ESTABLISHED */ \ | ||
201 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
202 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
203 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
204 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
205 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
206 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
207 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
208 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
209 | /* This should not happen, but we are nice. */ \ | ||
210 | {.fn = sctp_sf_beat_8_3, .name = "sctp_sf_beat_8_3"}, \ | ||
211 | } /* TYPE_SCTP_HEARTBEAT */ | ||
212 | |||
213 | #define TYPE_SCTP_HEARTBEAT_ACK { \ | ||
214 | /* SCTP_STATE_EMPTY */ \ | ||
215 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
216 | /* SCTP_STATE_CLOSED */ \ | ||
217 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
218 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
219 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | ||
220 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
221 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
222 | /* SCTP_STATE_ESTABLISHED */ \ | ||
223 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | ||
224 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
225 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | ||
226 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
227 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | ||
228 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
229 | {.fn = sctp_sf_backbeat_8_3, .name = "sctp_sf_backbeat_8_3"}, \ | ||
230 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
231 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
232 | } /* TYPE_SCTP_HEARTBEAT_ACK */ | ||
233 | |||
234 | #define TYPE_SCTP_ABORT { \ | ||
235 | /* SCTP_STATE_EMPTY */ \ | ||
236 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
237 | /* SCTP_STATE_CLOSED */ \ | ||
238 | {.fn = sctp_sf_pdiscard, .name = "sctp_sf_pdiscard"}, \ | ||
239 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
240 | {.fn = sctp_sf_cookie_wait_abort, .name = "sctp_sf_cookie_wait_abort"}, \ | ||
241 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
242 | {.fn = sctp_sf_cookie_echoed_abort, \ | ||
243 | .name = "sctp_sf_cookie_echoed_abort"}, \ | ||
244 | /* SCTP_STATE_ESTABLISHED */ \ | ||
245 | {.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \ | ||
246 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
247 | {.fn = sctp_sf_shutdown_pending_abort, \ | ||
248 | .name = "sctp_sf_shutdown_pending_abort"}, \ | ||
249 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
250 | {.fn = sctp_sf_shutdown_sent_abort, \ | ||
251 | .name = "sctp_sf_shutdown_sent_abort"}, \ | ||
252 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
253 | {.fn = sctp_sf_do_9_1_abort, .name = "sctp_sf_do_9_1_abort"}, \ | ||
254 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
255 | {.fn = sctp_sf_shutdown_ack_sent_abort, \ | ||
256 | .name = "sctp_sf_shutdown_ack_sent_abort"}, \ | ||
257 | } /* TYPE_SCTP_ABORT */ | ||
258 | |||
259 | #define TYPE_SCTP_SHUTDOWN { \ | ||
260 | /* SCTP_STATE_EMPTY */ \ | ||
261 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
262 | /* SCTP_STATE_CLOSED */ \ | ||
263 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
264 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
265 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
266 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
267 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
268 | /* SCTP_STATE_ESTABLISHED */ \ | ||
269 | {.fn = sctp_sf_do_9_2_shutdown, .name = "sctp_sf_do_9_2_shutdown"}, \ | ||
270 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
271 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
272 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
273 | {.fn = sctp_sf_do_9_2_shutdown_ack, \ | ||
274 | .name = "sctp_sf_do_9_2_shutdown_ack"}, \ | ||
275 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
276 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
277 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
278 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
279 | } /* TYPE_SCTP_SHUTDOWN */ | ||
280 | |||
281 | #define TYPE_SCTP_SHUTDOWN_ACK { \ | ||
282 | /* SCTP_STATE_EMPTY */ \ | ||
283 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
284 | /* SCTP_STATE_CLOSED */ \ | ||
285 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
286 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
287 | {.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \ | ||
288 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
289 | {.fn = sctp_sf_do_8_5_1_E_sa, .name = "sctp_sf_do_8_5_1_E_sa"}, \ | ||
290 | /* SCTP_STATE_ESTABLISHED */ \ | ||
291 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | ||
292 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
293 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | ||
294 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
295 | {.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \ | ||
296 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
297 | {.fn = sctp_sf_violation, .name = "sctp_sf_violation"}, \ | ||
298 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
299 | {.fn = sctp_sf_do_9_2_final, .name = "sctp_sf_do_9_2_final"}, \ | ||
300 | } /* TYPE_SCTP_SHUTDOWN_ACK */ | ||
301 | |||
302 | #define TYPE_SCTP_ERROR { \ | ||
303 | /* SCTP_STATE_EMPTY */ \ | ||
304 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
305 | /* SCTP_STATE_CLOSED */ \ | ||
306 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
307 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
308 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
309 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
310 | {.fn = sctp_sf_cookie_echoed_err, .name = "sctp_sf_cookie_echoed_err"}, \ | ||
311 | /* SCTP_STATE_ESTABLISHED */ \ | ||
312 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | ||
313 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
314 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | ||
315 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
316 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
317 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
318 | {.fn = sctp_sf_operr_notify, .name = "sctp_sf_operr_notify"}, \ | ||
319 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
320 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
321 | } /* TYPE_SCTP_ERROR */ | ||
322 | |||
323 | #define TYPE_SCTP_COOKIE_ECHO { \ | ||
324 | /* SCTP_STATE_EMPTY */ \ | ||
325 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
326 | /* SCTP_STATE_CLOSED */ \ | ||
327 | {.fn = sctp_sf_do_5_1D_ce, .name = "sctp_sf_do_5_1D_ce"}, \ | ||
328 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
329 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
330 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
331 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
332 | /* SCTP_STATE_ESTABLISHED */ \ | ||
333 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
334 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
335 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
336 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
337 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
338 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
339 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
340 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
341 | {.fn = sctp_sf_do_5_2_4_dupcook, .name = "sctp_sf_do_5_2_4_dupcook"}, \ | ||
342 | } /* TYPE_SCTP_COOKIE_ECHO */ | ||
343 | |||
344 | #define TYPE_SCTP_COOKIE_ACK { \ | ||
345 | /* SCTP_STATE_EMPTY */ \ | ||
346 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
347 | /* SCTP_STATE_CLOSED */ \ | ||
348 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
349 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
350 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
351 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
352 | {.fn = sctp_sf_do_5_1E_ca, .name = "sctp_sf_do_5_1E_ca"}, \ | ||
353 | /* SCTP_STATE_ESTABLISHED */ \ | ||
354 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
355 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
356 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
357 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
358 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
359 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
360 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
361 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
362 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
363 | } /* TYPE_SCTP_COOKIE_ACK */ | ||
364 | |||
365 | #define TYPE_SCTP_ECN_ECNE { \ | ||
366 | /* SCTP_STATE_EMPTY */ \ | ||
367 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
368 | /* SCTP_STATE_CLOSED */ \ | ||
369 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
370 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
371 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
372 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
373 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | ||
374 | /* SCTP_STATE_ESTABLISHED */ \ | ||
375 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | ||
376 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
377 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | ||
378 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
379 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | ||
380 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
381 | {.fn = sctp_sf_do_ecne, .name = "sctp_sf_do_ecne"}, \ | ||
382 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
383 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
384 | } /* TYPE_SCTP_ECN_ECNE */ | ||
385 | |||
386 | #define TYPE_SCTP_ECN_CWR { \ | ||
387 | /* SCTP_STATE_EMPTY */ \ | ||
388 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
389 | /* SCTP_STATE_CLOSED */ \ | ||
390 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
391 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
392 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
393 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
394 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
395 | /* SCTP_STATE_ESTABLISHED */ \ | ||
396 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | ||
397 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
398 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | ||
399 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
400 | {.fn = sctp_sf_do_ecn_cwr, .name = "sctp_sf_do_ecn_cwr"}, \ | ||
401 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
402 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
403 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
404 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
405 | } /* TYPE_SCTP_ECN_CWR */ | ||
406 | |||
407 | #define TYPE_SCTP_SHUTDOWN_COMPLETE { \ | ||
408 | /* SCTP_STATE_EMPTY */ \ | ||
409 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
410 | /* SCTP_STATE_CLOSED */ \ | ||
411 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
412 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
413 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
414 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
415 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
416 | /* SCTP_STATE_ESTABLISHED */ \ | ||
417 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
418 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
419 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
420 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
421 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
422 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
423 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
424 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
425 | {.fn = sctp_sf_do_4_C, .name = "sctp_sf_do_4_C"}, \ | ||
426 | } /* TYPE_SCTP_SHUTDOWN_COMPLETE */ | ||
427 | |||
428 | /* The primary index for this table is the chunk type. | ||
429 | * The secondary index for this table is the state. | ||
430 | * | ||
431 | * For base protocol (RFC 2960). | ||
432 | */ | ||
433 | static const sctp_sm_table_entry_t chunk_event_table[SCTP_NUM_BASE_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { | ||
434 | TYPE_SCTP_DATA, | ||
435 | TYPE_SCTP_INIT, | ||
436 | TYPE_SCTP_INIT_ACK, | ||
437 | TYPE_SCTP_SACK, | ||
438 | TYPE_SCTP_HEARTBEAT, | ||
439 | TYPE_SCTP_HEARTBEAT_ACK, | ||
440 | TYPE_SCTP_ABORT, | ||
441 | TYPE_SCTP_SHUTDOWN, | ||
442 | TYPE_SCTP_SHUTDOWN_ACK, | ||
443 | TYPE_SCTP_ERROR, | ||
444 | TYPE_SCTP_COOKIE_ECHO, | ||
445 | TYPE_SCTP_COOKIE_ACK, | ||
446 | TYPE_SCTP_ECN_ECNE, | ||
447 | TYPE_SCTP_ECN_CWR, | ||
448 | TYPE_SCTP_SHUTDOWN_COMPLETE, | ||
449 | }; /* state_fn_t chunk_event_table[][] */ | ||
450 | |||
451 | #define TYPE_SCTP_ASCONF { \ | ||
452 | /* SCTP_STATE_EMPTY */ \ | ||
453 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
454 | /* SCTP_STATE_CLOSED */ \ | ||
455 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
456 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
457 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
458 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
459 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
460 | /* SCTP_STATE_ESTABLISHED */ \ | ||
461 | {.fn = sctp_sf_do_asconf, .name = "sctp_sf_do_asconf"}, \ | ||
462 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
463 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
464 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
465 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
466 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
467 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
468 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
469 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
470 | } /* TYPE_SCTP_ASCONF */ | ||
471 | |||
472 | #define TYPE_SCTP_ASCONF_ACK { \ | ||
473 | /* SCTP_STATE_EMPTY */ \ | ||
474 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
475 | /* SCTP_STATE_CLOSED */ \ | ||
476 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
477 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
478 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
479 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
480 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
481 | /* SCTP_STATE_ESTABLISHED */ \ | ||
482 | {.fn = sctp_sf_do_asconf_ack, .name = "sctp_sf_do_asconf_ack"}, \ | ||
483 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
484 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
485 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
486 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
487 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
488 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
489 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
490 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
491 | } /* TYPE_SCTP_ASCONF_ACK */ | ||
492 | |||
493 | /* The primary index for this table is the chunk type. | ||
494 | * The secondary index for this table is the state. | ||
495 | */ | ||
496 | static const sctp_sm_table_entry_t addip_chunk_event_table[SCTP_NUM_ADDIP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { | ||
497 | TYPE_SCTP_ASCONF, | ||
498 | TYPE_SCTP_ASCONF_ACK, | ||
499 | }; /*state_fn_t addip_chunk_event_table[][] */ | ||
500 | |||
501 | #define TYPE_SCTP_FWD_TSN { \ | ||
502 | /* SCTP_STATE_EMPTY */ \ | ||
503 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, \ | ||
504 | /* SCTP_STATE_CLOSED */ \ | ||
505 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, \ | ||
506 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
507 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
508 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
509 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
510 | /* SCTP_STATE_ESTABLISHED */ \ | ||
511 | {.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \ | ||
512 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
513 | {.fn = sctp_sf_eat_fwd_tsn, .name = "sctp_sf_eat_fwd_tsn"}, \ | ||
514 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
515 | {.fn = sctp_sf_eat_fwd_tsn_fast, .name = "sctp_sf_eat_fwd_tsn_fast"}, \ | ||
516 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
517 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
518 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
519 | {.fn = sctp_sf_discard_chunk, .name = "sctp_sf_discard_chunk"}, \ | ||
520 | } /* TYPE_SCTP_FWD_TSN */ | ||
521 | |||
522 | /* The primary index for this table is the chunk type. | ||
523 | * The secondary index for this table is the state. | ||
524 | */ | ||
525 | static const sctp_sm_table_entry_t prsctp_chunk_event_table[SCTP_NUM_PRSCTP_CHUNK_TYPES][SCTP_STATE_NUM_STATES] = { | ||
526 | TYPE_SCTP_FWD_TSN, | ||
527 | }; /*state_fn_t prsctp_chunk_event_table[][] */ | ||
528 | |||
529 | static const sctp_sm_table_entry_t | ||
530 | chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = { | ||
531 | /* SCTP_STATE_EMPTY */ | ||
532 | {.fn = sctp_sf_ootb, .name = "sctp_sf_ootb"}, | ||
533 | /* SCTP_STATE_CLOSED */ | ||
534 | {.fn = sctp_sf_tabort_8_4_8, .name = "sctp_sf_tabort_8_4_8"}, | ||
535 | /* SCTP_STATE_COOKIE_WAIT */ | ||
536 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
537 | /* SCTP_STATE_COOKIE_ECHOED */ | ||
538 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
539 | /* SCTP_STATE_ESTABLISHED */ | ||
540 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
541 | /* SCTP_STATE_SHUTDOWN_PENDING */ | ||
542 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
543 | /* SCTP_STATE_SHUTDOWN_SENT */ | ||
544 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
545 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ | ||
546 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
547 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ | ||
548 | {.fn = sctp_sf_unk_chunk, .name = "sctp_sf_unk_chunk"}, | ||
549 | }; /* chunk unknown */ | ||
550 | |||
551 | |||
552 | #define TYPE_SCTP_PRIMITIVE_ASSOCIATE { \ | ||
553 | /* SCTP_STATE_EMPTY */ \ | ||
554 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
555 | /* SCTP_STATE_CLOSED */ \ | ||
556 | {.fn = sctp_sf_do_prm_asoc, .name = "sctp_sf_do_prm_asoc"}, \ | ||
557 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
558 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
559 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
560 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
561 | /* SCTP_STATE_ESTABLISHED */ \ | ||
562 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
563 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
564 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
565 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
566 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
567 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
568 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
569 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
570 | {.fn = sctp_sf_not_impl, .name = "sctp_sf_not_impl"}, \ | ||
571 | } /* TYPE_SCTP_PRIMITIVE_ASSOCIATE */ | ||
572 | |||
573 | #define TYPE_SCTP_PRIMITIVE_SHUTDOWN { \ | ||
574 | /* SCTP_STATE_EMPTY */ \ | ||
575 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
576 | /* SCTP_STATE_CLOSED */ \ | ||
577 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
578 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
579 | {.fn = sctp_sf_cookie_wait_prm_shutdown, \ | ||
580 | .name = "sctp_sf_cookie_wait_prm_shutdown"}, \ | ||
581 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
582 | {.fn = sctp_sf_cookie_echoed_prm_shutdown, \ | ||
583 | .name = "sctp_sf_cookie_echoed_prm_shutdown"},\ | ||
584 | /* SCTP_STATE_ESTABLISHED */ \ | ||
585 | {.fn = sctp_sf_do_9_2_prm_shutdown, \ | ||
586 | .name = "sctp_sf_do_9_2_prm_shutdown"}, \ | ||
587 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
588 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | ||
589 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
590 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | ||
591 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
592 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | ||
593 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
594 | {.fn = sctp_sf_ignore_primitive, .name = "sctp_sf_ignore_primitive"}, \ | ||
595 | } /* TYPE_SCTP_PRIMITIVE_SHUTDOWN */ | ||
596 | |||
597 | #define TYPE_SCTP_PRIMITIVE_ABORT { \ | ||
598 | /* SCTP_STATE_EMPTY */ \ | ||
599 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
600 | /* SCTP_STATE_CLOSED */ \ | ||
601 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
602 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
603 | {.fn = sctp_sf_cookie_wait_prm_abort, \ | ||
604 | .name = "sctp_sf_cookie_wait_prm_abort"}, \ | ||
605 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
606 | {.fn = sctp_sf_cookie_echoed_prm_abort, \ | ||
607 | .name = "sctp_sf_cookie_echoed_prm_abort"}, \ | ||
608 | /* SCTP_STATE_ESTABLISHED */ \ | ||
609 | {.fn = sctp_sf_do_9_1_prm_abort, \ | ||
610 | .name = "sctp_sf_do_9_1_prm_abort"}, \ | ||
611 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
612 | {.fn = sctp_sf_shutdown_pending_prm_abort, \ | ||
613 | .name = "sctp_sf_shutdown_pending_prm_abort"}, \ | ||
614 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
615 | {.fn = sctp_sf_shutdown_sent_prm_abort, \ | ||
616 | .name = "sctp_sf_shutdown_sent_prm_abort"}, \ | ||
617 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
618 | {.fn = sctp_sf_do_9_1_prm_abort, \ | ||
619 | .name = "sctp_sf_do_9_1_prm_abort"}, \ | ||
620 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
621 | {.fn = sctp_sf_shutdown_ack_sent_prm_abort, \ | ||
622 | .name = "sctp_sf_shutdown_ack_sent_prm_abort"}, \ | ||
623 | } /* TYPE_SCTP_PRIMITIVE_ABORT */ | ||
624 | |||
625 | #define TYPE_SCTP_PRIMITIVE_SEND { \ | ||
626 | /* SCTP_STATE_EMPTY */ \ | ||
627 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
628 | /* SCTP_STATE_CLOSED */ \ | ||
629 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
630 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
631 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | ||
632 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
633 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | ||
634 | /* SCTP_STATE_ESTABLISHED */ \ | ||
635 | {.fn = sctp_sf_do_prm_send, .name = "sctp_sf_do_prm_send"}, \ | ||
636 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
637 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
638 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
639 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
640 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
641 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
642 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
643 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
644 | } /* TYPE_SCTP_PRIMITIVE_SEND */ | ||
645 | |||
646 | #define TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT { \ | ||
647 | /* SCTP_STATE_EMPTY */ \ | ||
648 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
649 | /* SCTP_STATE_CLOSED */ \ | ||
650 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
651 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
652 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
653 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
654 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
655 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
656 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
657 | /* SCTP_STATE_ESTABLISHED */ \ | ||
658 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
659 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
660 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
661 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
662 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
663 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
664 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
665 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
666 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
667 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
668 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
669 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
670 | {.fn = sctp_sf_do_prm_requestheartbeat, \ | ||
671 | .name = "sctp_sf_do_prm_requestheartbeat"}, \ | ||
672 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ | ||
673 | |||
674 | #define TYPE_SCTP_PRIMITIVE_ASCONF { \ | ||
675 | /* SCTP_STATE_EMPTY */ \ | ||
676 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
677 | /* SCTP_STATE_CLOSED */ \ | ||
678 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
679 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
680 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
681 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
682 | {.fn = sctp_sf_error_closed, .name = "sctp_sf_error_closed"}, \ | ||
683 | /* SCTP_STATE_ESTABLISHED */ \ | ||
684 | {.fn = sctp_sf_do_prm_asconf, .name = "sctp_sf_do_prm_asconf"}, \ | ||
685 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
686 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
687 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
688 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
689 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
690 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
691 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
692 | {.fn = sctp_sf_error_shutdown, .name = "sctp_sf_error_shutdown"}, \ | ||
693 | } /* TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT */ | ||
694 | |||
695 | /* The primary index for this table is the primitive type. | ||
696 | * The secondary index for this table is the state. | ||
697 | */ | ||
698 | static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPES][SCTP_STATE_NUM_STATES] = { | ||
699 | TYPE_SCTP_PRIMITIVE_ASSOCIATE, | ||
700 | TYPE_SCTP_PRIMITIVE_SHUTDOWN, | ||
701 | TYPE_SCTP_PRIMITIVE_ABORT, | ||
702 | TYPE_SCTP_PRIMITIVE_SEND, | ||
703 | TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT, | ||
704 | TYPE_SCTP_PRIMITIVE_ASCONF, | ||
705 | }; | ||
706 | |||
707 | #define TYPE_SCTP_OTHER_NO_PENDING_TSN { \ | ||
708 | /* SCTP_STATE_EMPTY */ \ | ||
709 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
710 | /* SCTP_STATE_CLOSED */ \ | ||
711 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
712 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
713 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
714 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
715 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
716 | /* SCTP_STATE_ESTABLISHED */ \ | ||
717 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
718 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
719 | {.fn = sctp_sf_do_9_2_start_shutdown, \ | ||
720 | .name = "sctp_do_9_2_start_shutdown"}, \ | ||
721 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
722 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
723 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
724 | {.fn = sctp_sf_do_9_2_shutdown_ack, \ | ||
725 | .name = "sctp_sf_do_9_2_shutdown_ack"}, \ | ||
726 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
727 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
728 | } | ||
729 | |||
730 | #define TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH { \ | ||
731 | /* SCTP_STATE_EMPTY */ \ | ||
732 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
733 | /* SCTP_STATE_CLOSED */ \ | ||
734 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
735 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
736 | {.fn = sctp_sf_cookie_wait_icmp_abort, \ | ||
737 | .name = "sctp_sf_cookie_wait_icmp_abort"}, \ | ||
738 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
739 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
740 | /* SCTP_STATE_ESTABLISHED */ \ | ||
741 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
742 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
743 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
744 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
745 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
746 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
747 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
748 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
749 | {.fn = sctp_sf_ignore_other, .name = "sctp_sf_ignore_other"}, \ | ||
750 | } | ||
751 | |||
752 | static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_STATE_NUM_STATES] = { | ||
753 | TYPE_SCTP_OTHER_NO_PENDING_TSN, | ||
754 | TYPE_SCTP_OTHER_ICMP_PROTO_UNREACH, | ||
755 | }; | ||
756 | |||
757 | #define TYPE_SCTP_EVENT_TIMEOUT_NONE { \ | ||
758 | /* SCTP_STATE_EMPTY */ \ | ||
759 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
760 | /* SCTP_STATE_CLOSED */ \ | ||
761 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
762 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
763 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
764 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
765 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
766 | /* SCTP_STATE_ESTABLISHED */ \ | ||
767 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
768 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
769 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
770 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
771 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
772 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
773 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
774 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
775 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
776 | } | ||
777 | |||
778 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE { \ | ||
779 | /* SCTP_STATE_EMPTY */ \ | ||
780 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
781 | /* SCTP_STATE_CLOSED */ \ | ||
782 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
783 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
784 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
785 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
786 | {.fn = sctp_sf_t1_timer_expire, .name = "sctp_sf_t1_timer_expire"}, \ | ||
787 | /* SCTP_STATE_ESTABLISHED */ \ | ||
788 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
789 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
790 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
791 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
792 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
793 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
794 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
795 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
796 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
797 | } | ||
798 | |||
799 | #define TYPE_SCTP_EVENT_TIMEOUT_T1_INIT { \ | ||
800 | /* SCTP_STATE_EMPTY */ \ | ||
801 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
802 | /* SCTP_STATE_CLOSED */ \ | ||
803 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
804 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
805 | {.fn = sctp_sf_t1_timer_expire, .name = "sctp_sf_t1_timer_expire"}, \ | ||
806 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
807 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
808 | /* SCTP_STATE_ESTABLISHED */ \ | ||
809 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
810 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
811 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
812 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
813 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
814 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
815 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
816 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
817 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
818 | } | ||
819 | |||
820 | #define TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN { \ | ||
821 | /* SCTP_STATE_EMPTY */ \ | ||
822 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
823 | /* SCTP_STATE_CLOSED */ \ | ||
824 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
825 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
826 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
827 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
828 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
829 | /* SCTP_STATE_ESTABLISHED */ \ | ||
830 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
831 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
832 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
833 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
834 | {.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \ | ||
835 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
836 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
837 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
838 | {.fn = sctp_sf_t2_timer_expire, .name = "sctp_sf_t2_timer_expire"}, \ | ||
839 | } | ||
840 | |||
841 | #define TYPE_SCTP_EVENT_TIMEOUT_T3_RTX { \ | ||
842 | /* SCTP_STATE_EMPTY */ \ | ||
843 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
844 | /* SCTP_STATE_CLOSED */ \ | ||
845 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
846 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
847 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
848 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
849 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | ||
850 | /* SCTP_STATE_ESTABLISHED */ \ | ||
851 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | ||
852 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
853 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | ||
854 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
855 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
856 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
857 | {.fn = sctp_sf_do_6_3_3_rtx, .name = "sctp_sf_do_6_3_3_rtx"}, \ | ||
858 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
859 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
860 | } | ||
861 | |||
862 | #define TYPE_SCTP_EVENT_TIMEOUT_T4_RTO { \ | ||
863 | /* SCTP_STATE_EMPTY */ \ | ||
864 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
865 | /* SCTP_STATE_CLOSED */ \ | ||
866 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
867 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
868 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
869 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
870 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
871 | /* SCTP_STATE_ESTABLISHED */ \ | ||
872 | {.fn = sctp_sf_t4_timer_expire, .name = "sctp_sf_t4_timer_expire"}, \ | ||
873 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
874 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
875 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
876 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
877 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
878 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
879 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
880 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
881 | } | ||
882 | |||
883 | #define TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD { \ | ||
884 | /* SCTP_STATE_EMPTY */ \ | ||
885 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
886 | /* SCTP_STATE_CLOSED */ \ | ||
887 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
888 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
889 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
890 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
891 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
892 | /* SCTP_STATE_ESTABLISHED */ \ | ||
893 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
894 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
895 | {.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \ | ||
896 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
897 | {.fn = sctp_sf_t5_timer_expire, .name = "sctp_sf_t5_timer_expire"}, \ | ||
898 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
899 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
900 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
901 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
902 | } | ||
903 | |||
904 | #define TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT { \ | ||
905 | /* SCTP_STATE_EMPTY */ \ | ||
906 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
907 | /* SCTP_STATE_CLOSED */ \ | ||
908 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
909 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
910 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
911 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
912 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
913 | /* SCTP_STATE_ESTABLISHED */ \ | ||
914 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | ||
915 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
916 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | ||
917 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
918 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
919 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
920 | {.fn = sctp_sf_sendbeat_8_3, .name = "sctp_sf_sendbeat_8_3"}, \ | ||
921 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
922 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
923 | } | ||
924 | |||
925 | #define TYPE_SCTP_EVENT_TIMEOUT_SACK { \ | ||
926 | /* SCTP_STATE_EMPTY */ \ | ||
927 | {.fn = sctp_sf_bug, .name = "sctp_sf_bug"}, \ | ||
928 | /* SCTP_STATE_CLOSED */ \ | ||
929 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
930 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
931 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
932 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
933 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
934 | /* SCTP_STATE_ESTABLISHED */ \ | ||
935 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | ||
936 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
937 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | ||
938 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
939 | {.fn = sctp_sf_do_6_2_sack, .name = "sctp_sf_do_6_2_sack"}, \ | ||
940 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
941 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
942 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
943 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
944 | } | ||
945 | |||
946 | #define TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE { \ | ||
947 | /* SCTP_STATE_EMPTY */ \ | ||
948 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
949 | /* SCTP_STATE_CLOSED */ \ | ||
950 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
951 | /* SCTP_STATE_COOKIE_WAIT */ \ | ||
952 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
953 | /* SCTP_STATE_COOKIE_ECHOED */ \ | ||
954 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
955 | /* SCTP_STATE_ESTABLISHED */ \ | ||
956 | {.fn = sctp_sf_autoclose_timer_expire, \ | ||
957 | .name = "sctp_sf_autoclose_timer_expire"}, \ | ||
958 | /* SCTP_STATE_SHUTDOWN_PENDING */ \ | ||
959 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
960 | /* SCTP_STATE_SHUTDOWN_SENT */ \ | ||
961 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
962 | /* SCTP_STATE_SHUTDOWN_RECEIVED */ \ | ||
963 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
964 | /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \ | ||
965 | {.fn = sctp_sf_timer_ignore, .name = "sctp_sf_timer_ignore"}, \ | ||
966 | } | ||
967 | |||
968 | static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = { | ||
969 | TYPE_SCTP_EVENT_TIMEOUT_NONE, | ||
970 | TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE, | ||
971 | TYPE_SCTP_EVENT_TIMEOUT_T1_INIT, | ||
972 | TYPE_SCTP_EVENT_TIMEOUT_T2_SHUTDOWN, | ||
973 | TYPE_SCTP_EVENT_TIMEOUT_T3_RTX, | ||
974 | TYPE_SCTP_EVENT_TIMEOUT_T4_RTO, | ||
975 | TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD, | ||
976 | TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT, | ||
977 | TYPE_SCTP_EVENT_TIMEOUT_SACK, | ||
978 | TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE, | ||
979 | }; | ||
980 | |||
981 | static const sctp_sm_table_entry_t *sctp_chunk_event_lookup(sctp_cid_t cid, | ||
982 | sctp_state_t state) | ||
983 | { | ||
984 | if (state > SCTP_STATE_MAX) | ||
985 | return &bug; | ||
986 | |||
987 | if (cid >= 0 && cid <= SCTP_CID_BASE_MAX) | ||
988 | return &chunk_event_table[cid][state]; | ||
989 | |||
990 | if (sctp_prsctp_enable) { | ||
991 | if (cid == SCTP_CID_FWD_TSN) | ||
992 | return &prsctp_chunk_event_table[0][state]; | ||
993 | } | ||
994 | |||
995 | if (sctp_addip_enable) { | ||
996 | if (cid == SCTP_CID_ASCONF) | ||
997 | return &addip_chunk_event_table[0][state]; | ||
998 | |||
999 | if (cid == SCTP_CID_ASCONF_ACK) | ||
1000 | return &addip_chunk_event_table[1][state]; | ||
1001 | } | ||
1002 | |||
1003 | return &chunk_event_table_unknown[state]; | ||
1004 | } | ||
diff --git a/net/sctp/socket.c b/net/sctp/socket.c new file mode 100644 index 000000000000..e8c210182571 --- /dev/null +++ b/net/sctp/socket.c | |||
@@ -0,0 +1,4797 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001-2003 Intel Corp. | ||
6 | * Copyright (c) 2001-2002 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * This file is part of the SCTP kernel reference Implementation | ||
10 | * | ||
11 | * These functions interface with the sockets layer to implement the | ||
12 | * SCTP Extensions for the Sockets API. | ||
13 | * | ||
14 | * Note that the descriptions from the specification are USER level | ||
15 | * functions--this file is the functions which populate the struct proto | ||
16 | * for SCTP which is the BOTTOM of the sockets interface. | ||
17 | * | ||
18 | * The SCTP reference implementation is free software; | ||
19 | * you can redistribute it and/or modify it under the terms of | ||
20 | * the GNU General Public License as published by | ||
21 | * the Free Software Foundation; either version 2, or (at your option) | ||
22 | * any later version. | ||
23 | * | ||
24 | * The SCTP reference implementation is distributed in the hope that it | ||
25 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
26 | * ************************ | ||
27 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
28 | * See the GNU General Public License for more details. | ||
29 | * | ||
30 | * You should have received a copy of the GNU General Public License | ||
31 | * along with GNU CC; see the file COPYING. If not, write to | ||
32 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
33 | * Boston, MA 02111-1307, USA. | ||
34 | * | ||
35 | * Please send any bug reports or fixes you make to the | ||
36 | * email address(es): | ||
37 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
38 | * | ||
39 | * Or submit a bug report through the following website: | ||
40 | * http://www.sf.net/projects/lksctp | ||
41 | * | ||
42 | * Written or modified by: | ||
43 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
44 | * Narasimha Budihal <narsi@refcode.org> | ||
45 | * Karl Knutson <karl@athena.chicago.il.us> | ||
46 | * Jon Grimm <jgrimm@us.ibm.com> | ||
47 | * Xingang Guo <xingang.guo@intel.com> | ||
48 | * Daisy Chang <daisyc@us.ibm.com> | ||
49 | * Sridhar Samudrala <samudrala@us.ibm.com> | ||
50 | * Inaky Perez-Gonzalez <inaky.gonzalez@intel.com> | ||
51 | * Ardelle Fan <ardelle.fan@intel.com> | ||
52 | * Ryan Layer <rmlayer@us.ibm.com> | ||
53 | * Anup Pemmaiah <pemmaiah@cc.usu.edu> | ||
54 | * Kevin Gao <kevin.gao@intel.com> | ||
55 | * | ||
56 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
57 | * be incorporated into the next SCTP release. | ||
58 | */ | ||
59 | |||
60 | #include <linux/config.h> | ||
61 | #include <linux/types.h> | ||
62 | #include <linux/kernel.h> | ||
63 | #include <linux/wait.h> | ||
64 | #include <linux/time.h> | ||
65 | #include <linux/ip.h> | ||
66 | #include <linux/fcntl.h> | ||
67 | #include <linux/poll.h> | ||
68 | #include <linux/init.h> | ||
69 | #include <linux/crypto.h> | ||
70 | |||
71 | #include <net/ip.h> | ||
72 | #include <net/icmp.h> | ||
73 | #include <net/route.h> | ||
74 | #include <net/ipv6.h> | ||
75 | #include <net/inet_common.h> | ||
76 | |||
77 | #include <linux/socket.h> /* for sa_family_t */ | ||
78 | #include <net/sock.h> | ||
79 | #include <net/sctp/sctp.h> | ||
80 | #include <net/sctp/sm.h> | ||
81 | |||
82 | /* WARNING: Please do not remove the SCTP_STATIC attribute to | ||
83 | * any of the functions below as they are used to export functions | ||
84 | * used by a project regression testsuite. | ||
85 | */ | ||
86 | |||
87 | /* Forward declarations for internal helper functions. */ | ||
88 | static int sctp_writeable(struct sock *sk); | ||
89 | static void sctp_wfree(struct sk_buff *skb); | ||
90 | static int sctp_wait_for_sndbuf(struct sctp_association *, long *timeo_p, | ||
91 | size_t msg_len); | ||
92 | static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p); | ||
93 | static int sctp_wait_for_connect(struct sctp_association *, long *timeo_p); | ||
94 | static int sctp_wait_for_accept(struct sock *sk, long timeo); | ||
95 | static void sctp_wait_for_close(struct sock *sk, long timeo); | ||
96 | static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | ||
97 | union sctp_addr *addr, int len); | ||
98 | static int sctp_bindx_add(struct sock *, struct sockaddr *, int); | ||
99 | static int sctp_bindx_rem(struct sock *, struct sockaddr *, int); | ||
100 | static int sctp_send_asconf_add_ip(struct sock *, struct sockaddr *, int); | ||
101 | static int sctp_send_asconf_del_ip(struct sock *, struct sockaddr *, int); | ||
102 | static int sctp_send_asconf(struct sctp_association *asoc, | ||
103 | struct sctp_chunk *chunk); | ||
104 | static int sctp_do_bind(struct sock *, union sctp_addr *, int); | ||
105 | static int sctp_autobind(struct sock *sk); | ||
106 | static void sctp_sock_migrate(struct sock *, struct sock *, | ||
107 | struct sctp_association *, sctp_socket_type_t); | ||
108 | static char *sctp_hmac_alg = SCTP_COOKIE_HMAC_ALG; | ||
109 | |||
110 | extern kmem_cache_t *sctp_bucket_cachep; | ||
111 | |||
112 | /* Get the sndbuf space available at the time on the association. */ | ||
113 | static inline int sctp_wspace(struct sctp_association *asoc) | ||
114 | { | ||
115 | struct sock *sk = asoc->base.sk; | ||
116 | int amt = 0; | ||
117 | |||
118 | amt = sk->sk_sndbuf - asoc->sndbuf_used; | ||
119 | if (amt < 0) | ||
120 | amt = 0; | ||
121 | return amt; | ||
122 | } | ||
123 | |||
124 | /* Increment the used sndbuf space count of the corresponding association by | ||
125 | * the size of the outgoing data chunk. | ||
126 | * Also, set the skb destructor for sndbuf accounting later. | ||
127 | * | ||
128 | * Since it is always 1-1 between chunk and skb, and also a new skb is always | ||
129 | * allocated for chunk bundling in sctp_packet_transmit(), we can use the | ||
130 | * destructor in the data chunk skb for the purpose of the sndbuf space | ||
131 | * tracking. | ||
132 | */ | ||
133 | static inline void sctp_set_owner_w(struct sctp_chunk *chunk) | ||
134 | { | ||
135 | struct sctp_association *asoc = chunk->asoc; | ||
136 | struct sock *sk = asoc->base.sk; | ||
137 | |||
138 | /* The sndbuf space is tracked per association. */ | ||
139 | sctp_association_hold(asoc); | ||
140 | |||
141 | chunk->skb->destructor = sctp_wfree; | ||
142 | /* Save the chunk pointer in skb for sctp_wfree to use later. */ | ||
143 | *((struct sctp_chunk **)(chunk->skb->cb)) = chunk; | ||
144 | |||
145 | asoc->sndbuf_used += SCTP_DATA_SNDSIZE(chunk); | ||
146 | sk->sk_wmem_queued += SCTP_DATA_SNDSIZE(chunk); | ||
147 | } | ||
148 | |||
149 | /* Verify that this is a valid address. */ | ||
150 | static inline int sctp_verify_addr(struct sock *sk, union sctp_addr *addr, | ||
151 | int len) | ||
152 | { | ||
153 | struct sctp_af *af; | ||
154 | |||
155 | /* Verify basic sockaddr. */ | ||
156 | af = sctp_sockaddr_af(sctp_sk(sk), addr, len); | ||
157 | if (!af) | ||
158 | return -EINVAL; | ||
159 | |||
160 | /* Is this a valid SCTP address? */ | ||
161 | if (!af->addr_valid(addr, sctp_sk(sk))) | ||
162 | return -EINVAL; | ||
163 | |||
164 | if (!sctp_sk(sk)->pf->send_verify(sctp_sk(sk), (addr))) | ||
165 | return -EINVAL; | ||
166 | |||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | /* Look up the association by its id. If this is not a UDP-style | ||
171 | * socket, the ID field is always ignored. | ||
172 | */ | ||
173 | struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id) | ||
174 | { | ||
175 | struct sctp_association *asoc = NULL; | ||
176 | |||
177 | /* If this is not a UDP-style socket, assoc id should be ignored. */ | ||
178 | if (!sctp_style(sk, UDP)) { | ||
179 | /* Return NULL if the socket state is not ESTABLISHED. It | ||
180 | * could be a TCP-style listening socket or a socket which | ||
181 | * hasn't yet called connect() to establish an association. | ||
182 | */ | ||
183 | if (!sctp_sstate(sk, ESTABLISHED)) | ||
184 | return NULL; | ||
185 | |||
186 | /* Get the first and the only association from the list. */ | ||
187 | if (!list_empty(&sctp_sk(sk)->ep->asocs)) | ||
188 | asoc = list_entry(sctp_sk(sk)->ep->asocs.next, | ||
189 | struct sctp_association, asocs); | ||
190 | return asoc; | ||
191 | } | ||
192 | |||
193 | /* Otherwise this is a UDP-style socket. */ | ||
194 | if (!id || (id == (sctp_assoc_t)-1)) | ||
195 | return NULL; | ||
196 | |||
197 | spin_lock_bh(&sctp_assocs_id_lock); | ||
198 | asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id); | ||
199 | spin_unlock_bh(&sctp_assocs_id_lock); | ||
200 | |||
201 | if (!asoc || (asoc->base.sk != sk) || asoc->base.dead) | ||
202 | return NULL; | ||
203 | |||
204 | return asoc; | ||
205 | } | ||
206 | |||
207 | /* Look up the transport from an address and an assoc id. If both address and | ||
208 | * id are specified, the associations matching the address and the id should be | ||
209 | * the same. | ||
210 | */ | ||
211 | static struct sctp_transport *sctp_addr_id2transport(struct sock *sk, | ||
212 | struct sockaddr_storage *addr, | ||
213 | sctp_assoc_t id) | ||
214 | { | ||
215 | struct sctp_association *addr_asoc = NULL, *id_asoc = NULL; | ||
216 | struct sctp_transport *transport; | ||
217 | union sctp_addr *laddr = (union sctp_addr *)addr; | ||
218 | |||
219 | laddr->v4.sin_port = ntohs(laddr->v4.sin_port); | ||
220 | addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep, | ||
221 | (union sctp_addr *)addr, | ||
222 | &transport); | ||
223 | laddr->v4.sin_port = htons(laddr->v4.sin_port); | ||
224 | |||
225 | if (!addr_asoc) | ||
226 | return NULL; | ||
227 | |||
228 | id_asoc = sctp_id2assoc(sk, id); | ||
229 | if (id_asoc && (id_asoc != addr_asoc)) | ||
230 | return NULL; | ||
231 | |||
232 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | ||
233 | (union sctp_addr *)addr); | ||
234 | |||
235 | return transport; | ||
236 | } | ||
237 | |||
238 | /* API 3.1.2 bind() - UDP Style Syntax | ||
239 | * The syntax of bind() is, | ||
240 | * | ||
241 | * ret = bind(int sd, struct sockaddr *addr, int addrlen); | ||
242 | * | ||
243 | * sd - the socket descriptor returned by socket(). | ||
244 | * addr - the address structure (struct sockaddr_in or struct | ||
245 | * sockaddr_in6 [RFC 2553]), | ||
246 | * addr_len - the size of the address structure. | ||
247 | */ | ||
248 | SCTP_STATIC int sctp_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) | ||
249 | { | ||
250 | int retval = 0; | ||
251 | |||
252 | sctp_lock_sock(sk); | ||
253 | |||
254 | SCTP_DEBUG_PRINTK("sctp_bind(sk: %p, uaddr: %p, addr_len: %d)\n", | ||
255 | sk, uaddr, addr_len); | ||
256 | |||
257 | /* Disallow binding twice. */ | ||
258 | if (!sctp_sk(sk)->ep->base.bind_addr.port) | ||
259 | retval = sctp_do_bind(sk, (union sctp_addr *)uaddr, | ||
260 | addr_len); | ||
261 | else | ||
262 | retval = -EINVAL; | ||
263 | |||
264 | sctp_release_sock(sk); | ||
265 | |||
266 | return retval; | ||
267 | } | ||
268 | |||
269 | static long sctp_get_port_local(struct sock *, union sctp_addr *); | ||
270 | |||
271 | /* Verify this is a valid sockaddr. */ | ||
272 | static struct sctp_af *sctp_sockaddr_af(struct sctp_sock *opt, | ||
273 | union sctp_addr *addr, int len) | ||
274 | { | ||
275 | struct sctp_af *af; | ||
276 | |||
277 | /* Check minimum size. */ | ||
278 | if (len < sizeof (struct sockaddr)) | ||
279 | return NULL; | ||
280 | |||
281 | /* Does this PF support this AF? */ | ||
282 | if (!opt->pf->af_supported(addr->sa.sa_family, opt)) | ||
283 | return NULL; | ||
284 | |||
285 | /* If we get this far, af is valid. */ | ||
286 | af = sctp_get_af_specific(addr->sa.sa_family); | ||
287 | |||
288 | if (len < af->sockaddr_len) | ||
289 | return NULL; | ||
290 | |||
291 | return af; | ||
292 | } | ||
293 | |||
294 | /* Bind a local address either to an endpoint or to an association. */ | ||
295 | SCTP_STATIC int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len) | ||
296 | { | ||
297 | struct sctp_sock *sp = sctp_sk(sk); | ||
298 | struct sctp_endpoint *ep = sp->ep; | ||
299 | struct sctp_bind_addr *bp = &ep->base.bind_addr; | ||
300 | struct sctp_af *af; | ||
301 | unsigned short snum; | ||
302 | int ret = 0; | ||
303 | |||
304 | SCTP_DEBUG_PRINTK("sctp_do_bind(sk: %p, newaddr: %p, len: %d)\n", | ||
305 | sk, addr, len); | ||
306 | |||
307 | /* Common sockaddr verification. */ | ||
308 | af = sctp_sockaddr_af(sp, addr, len); | ||
309 | if (!af) | ||
310 | return -EINVAL; | ||
311 | |||
312 | /* PF specific bind() address verification. */ | ||
313 | if (!sp->pf->bind_verify(sp, addr)) | ||
314 | return -EADDRNOTAVAIL; | ||
315 | |||
316 | snum= ntohs(addr->v4.sin_port); | ||
317 | |||
318 | SCTP_DEBUG_PRINTK("sctp_do_bind: port: %d, new port: %d\n", | ||
319 | bp->port, snum); | ||
320 | |||
321 | /* We must either be unbound, or bind to the same port. */ | ||
322 | if (bp->port && (snum != bp->port)) { | ||
323 | SCTP_DEBUG_PRINTK("sctp_do_bind:" | ||
324 | " New port %d does not match existing port " | ||
325 | "%d.\n", snum, bp->port); | ||
326 | return -EINVAL; | ||
327 | } | ||
328 | |||
329 | if (snum && snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE)) | ||
330 | return -EACCES; | ||
331 | |||
332 | /* Make sure we are allowed to bind here. | ||
333 | * The function sctp_get_port_local() does duplicate address | ||
334 | * detection. | ||
335 | */ | ||
336 | if ((ret = sctp_get_port_local(sk, addr))) { | ||
337 | if (ret == (long) sk) { | ||
338 | /* This endpoint has a conflicting address. */ | ||
339 | return -EINVAL; | ||
340 | } else { | ||
341 | return -EADDRINUSE; | ||
342 | } | ||
343 | } | ||
344 | |||
345 | /* Refresh ephemeral port. */ | ||
346 | if (!bp->port) | ||
347 | bp->port = inet_sk(sk)->num; | ||
348 | |||
349 | /* Add the address to the bind address list. */ | ||
350 | sctp_local_bh_disable(); | ||
351 | sctp_write_lock(&ep->base.addr_lock); | ||
352 | |||
353 | /* Use GFP_ATOMIC since BHs are disabled. */ | ||
354 | addr->v4.sin_port = ntohs(addr->v4.sin_port); | ||
355 | ret = sctp_add_bind_addr(bp, addr, GFP_ATOMIC); | ||
356 | addr->v4.sin_port = htons(addr->v4.sin_port); | ||
357 | sctp_write_unlock(&ep->base.addr_lock); | ||
358 | sctp_local_bh_enable(); | ||
359 | |||
360 | /* Copy back into socket for getsockname() use. */ | ||
361 | if (!ret) { | ||
362 | inet_sk(sk)->sport = htons(inet_sk(sk)->num); | ||
363 | af->to_sk_saddr(addr, sk); | ||
364 | } | ||
365 | |||
366 | return ret; | ||
367 | } | ||
368 | |||
369 | /* ADDIP Section 4.1.1 Congestion Control of ASCONF Chunks | ||
370 | * | ||
371 | * R1) One and only one ASCONF Chunk MAY be in transit and unacknowledged | ||
372 | * at any one time. If a sender, after sending an ASCONF chunk, decides | ||
373 | * it needs to transfer another ASCONF Chunk, it MUST wait until the | ||
374 | * ASCONF-ACK Chunk returns from the previous ASCONF Chunk before sending a | ||
375 | * subsequent ASCONF. Note this restriction binds each side, so at any | ||
376 | * time two ASCONF may be in-transit on any given association (one sent | ||
377 | * from each endpoint). | ||
378 | */ | ||
379 | static int sctp_send_asconf(struct sctp_association *asoc, | ||
380 | struct sctp_chunk *chunk) | ||
381 | { | ||
382 | int retval = 0; | ||
383 | |||
384 | /* If there is an outstanding ASCONF chunk, queue it for later | ||
385 | * transmission. | ||
386 | */ | ||
387 | if (asoc->addip_last_asconf) { | ||
388 | __skb_queue_tail(&asoc->addip_chunks, (struct sk_buff *)chunk); | ||
389 | goto out; | ||
390 | } | ||
391 | |||
392 | /* Hold the chunk until an ASCONF_ACK is received. */ | ||
393 | sctp_chunk_hold(chunk); | ||
394 | retval = sctp_primitive_ASCONF(asoc, chunk); | ||
395 | if (retval) | ||
396 | sctp_chunk_free(chunk); | ||
397 | else | ||
398 | asoc->addip_last_asconf = chunk; | ||
399 | |||
400 | out: | ||
401 | return retval; | ||
402 | } | ||
403 | |||
404 | /* Add a list of addresses as bind addresses to local endpoint or | ||
405 | * association. | ||
406 | * | ||
407 | * Basically run through each address specified in the addrs/addrcnt | ||
408 | * array/length pair, determine if it is IPv6 or IPv4 and call | ||
409 | * sctp_do_bind() on it. | ||
410 | * | ||
411 | * If any of them fails, then the operation will be reversed and the | ||
412 | * ones that were added will be removed. | ||
413 | * | ||
414 | * Only sctp_setsockopt_bindx() is supposed to call this function. | ||
415 | */ | ||
416 | int sctp_bindx_add(struct sock *sk, struct sockaddr *addrs, int addrcnt) | ||
417 | { | ||
418 | int cnt; | ||
419 | int retval = 0; | ||
420 | void *addr_buf; | ||
421 | struct sockaddr *sa_addr; | ||
422 | struct sctp_af *af; | ||
423 | |||
424 | SCTP_DEBUG_PRINTK("sctp_bindx_add (sk: %p, addrs: %p, addrcnt: %d)\n", | ||
425 | sk, addrs, addrcnt); | ||
426 | |||
427 | addr_buf = addrs; | ||
428 | for (cnt = 0; cnt < addrcnt; cnt++) { | ||
429 | /* The list may contain either IPv4 or IPv6 address; | ||
430 | * determine the address length for walking thru the list. | ||
431 | */ | ||
432 | sa_addr = (struct sockaddr *)addr_buf; | ||
433 | af = sctp_get_af_specific(sa_addr->sa_family); | ||
434 | if (!af) { | ||
435 | retval = -EINVAL; | ||
436 | goto err_bindx_add; | ||
437 | } | ||
438 | |||
439 | retval = sctp_do_bind(sk, (union sctp_addr *)sa_addr, | ||
440 | af->sockaddr_len); | ||
441 | |||
442 | addr_buf += af->sockaddr_len; | ||
443 | |||
444 | err_bindx_add: | ||
445 | if (retval < 0) { | ||
446 | /* Failed. Cleanup the ones that have been added */ | ||
447 | if (cnt > 0) | ||
448 | sctp_bindx_rem(sk, addrs, cnt); | ||
449 | return retval; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | return retval; | ||
454 | } | ||
455 | |||
456 | /* Send an ASCONF chunk with Add IP address parameters to all the peers of the | ||
457 | * associations that are part of the endpoint indicating that a list of local | ||
458 | * addresses are added to the endpoint. | ||
459 | * | ||
460 | * If any of the addresses is already in the bind address list of the | ||
461 | * association, we do not send the chunk for that association. But it will not | ||
462 | * affect other associations. | ||
463 | * | ||
464 | * Only sctp_setsockopt_bindx() is supposed to call this function. | ||
465 | */ | ||
466 | static int sctp_send_asconf_add_ip(struct sock *sk, | ||
467 | struct sockaddr *addrs, | ||
468 | int addrcnt) | ||
469 | { | ||
470 | struct sctp_sock *sp; | ||
471 | struct sctp_endpoint *ep; | ||
472 | struct sctp_association *asoc; | ||
473 | struct sctp_bind_addr *bp; | ||
474 | struct sctp_chunk *chunk; | ||
475 | struct sctp_sockaddr_entry *laddr; | ||
476 | union sctp_addr *addr; | ||
477 | void *addr_buf; | ||
478 | struct sctp_af *af; | ||
479 | struct list_head *pos; | ||
480 | struct list_head *p; | ||
481 | int i; | ||
482 | int retval = 0; | ||
483 | |||
484 | if (!sctp_addip_enable) | ||
485 | return retval; | ||
486 | |||
487 | sp = sctp_sk(sk); | ||
488 | ep = sp->ep; | ||
489 | |||
490 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", | ||
491 | __FUNCTION__, sk, addrs, addrcnt); | ||
492 | |||
493 | list_for_each(pos, &ep->asocs) { | ||
494 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
495 | |||
496 | if (!asoc->peer.asconf_capable) | ||
497 | continue; | ||
498 | |||
499 | if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP) | ||
500 | continue; | ||
501 | |||
502 | if (!sctp_state(asoc, ESTABLISHED)) | ||
503 | continue; | ||
504 | |||
505 | /* Check if any address in the packed array of addresses is | ||
506 | * in the bind address list of the association. If so, | ||
507 | * do not send the asconf chunk to its peer, but continue with | ||
508 | * other associations. | ||
509 | */ | ||
510 | addr_buf = addrs; | ||
511 | for (i = 0; i < addrcnt; i++) { | ||
512 | addr = (union sctp_addr *)addr_buf; | ||
513 | af = sctp_get_af_specific(addr->v4.sin_family); | ||
514 | if (!af) { | ||
515 | retval = -EINVAL; | ||
516 | goto out; | ||
517 | } | ||
518 | |||
519 | if (sctp_assoc_lookup_laddr(asoc, addr)) | ||
520 | break; | ||
521 | |||
522 | addr_buf += af->sockaddr_len; | ||
523 | } | ||
524 | if (i < addrcnt) | ||
525 | continue; | ||
526 | |||
527 | /* Use the first address in bind addr list of association as | ||
528 | * Address Parameter of ASCONF CHUNK. | ||
529 | */ | ||
530 | sctp_read_lock(&asoc->base.addr_lock); | ||
531 | bp = &asoc->base.bind_addr; | ||
532 | p = bp->address_list.next; | ||
533 | laddr = list_entry(p, struct sctp_sockaddr_entry, list); | ||
534 | sctp_read_unlock(&asoc->base.addr_lock); | ||
535 | |||
536 | chunk = sctp_make_asconf_update_ip(asoc, &laddr->a, addrs, | ||
537 | addrcnt, SCTP_PARAM_ADD_IP); | ||
538 | if (!chunk) { | ||
539 | retval = -ENOMEM; | ||
540 | goto out; | ||
541 | } | ||
542 | |||
543 | retval = sctp_send_asconf(asoc, chunk); | ||
544 | |||
545 | /* FIXME: After sending the add address ASCONF chunk, we | ||
546 | * cannot append the address to the association's binding | ||
547 | * address list, because the new address may be used as the | ||
548 | * source of a message sent to the peer before the ASCONF | ||
549 | * chunk is received by the peer. So we should wait until | ||
550 | * ASCONF_ACK is received. | ||
551 | */ | ||
552 | } | ||
553 | |||
554 | out: | ||
555 | return retval; | ||
556 | } | ||
557 | |||
558 | /* Remove a list of addresses from bind addresses list. Do not remove the | ||
559 | * last address. | ||
560 | * | ||
561 | * Basically run through each address specified in the addrs/addrcnt | ||
562 | * array/length pair, determine if it is IPv6 or IPv4 and call | ||
563 | * sctp_del_bind() on it. | ||
564 | * | ||
565 | * If any of them fails, then the operation will be reversed and the | ||
566 | * ones that were removed will be added back. | ||
567 | * | ||
568 | * At least one address has to be left; if only one address is | ||
569 | * available, the operation will return -EBUSY. | ||
570 | * | ||
571 | * Only sctp_setsockopt_bindx() is supposed to call this function. | ||
572 | */ | ||
573 | int sctp_bindx_rem(struct sock *sk, struct sockaddr *addrs, int addrcnt) | ||
574 | { | ||
575 | struct sctp_sock *sp = sctp_sk(sk); | ||
576 | struct sctp_endpoint *ep = sp->ep; | ||
577 | int cnt; | ||
578 | struct sctp_bind_addr *bp = &ep->base.bind_addr; | ||
579 | int retval = 0; | ||
580 | union sctp_addr saveaddr; | ||
581 | void *addr_buf; | ||
582 | struct sockaddr *sa_addr; | ||
583 | struct sctp_af *af; | ||
584 | |||
585 | SCTP_DEBUG_PRINTK("sctp_bindx_rem (sk: %p, addrs: %p, addrcnt: %d)\n", | ||
586 | sk, addrs, addrcnt); | ||
587 | |||
588 | addr_buf = addrs; | ||
589 | for (cnt = 0; cnt < addrcnt; cnt++) { | ||
590 | /* If the bind address list is empty or if there is only one | ||
591 | * bind address, there is nothing more to be removed (we need | ||
592 | * at least one address here). | ||
593 | */ | ||
594 | if (list_empty(&bp->address_list) || | ||
595 | (sctp_list_single_entry(&bp->address_list))) { | ||
596 | retval = -EBUSY; | ||
597 | goto err_bindx_rem; | ||
598 | } | ||
599 | |||
600 | /* The list may contain either IPv4 or IPv6 address; | ||
601 | * determine the address length to copy the address to | ||
602 | * saveaddr. | ||
603 | */ | ||
604 | sa_addr = (struct sockaddr *)addr_buf; | ||
605 | af = sctp_get_af_specific(sa_addr->sa_family); | ||
606 | if (!af) { | ||
607 | retval = -EINVAL; | ||
608 | goto err_bindx_rem; | ||
609 | } | ||
610 | memcpy(&saveaddr, sa_addr, af->sockaddr_len); | ||
611 | saveaddr.v4.sin_port = ntohs(saveaddr.v4.sin_port); | ||
612 | if (saveaddr.v4.sin_port != bp->port) { | ||
613 | retval = -EINVAL; | ||
614 | goto err_bindx_rem; | ||
615 | } | ||
616 | |||
617 | /* FIXME - There is probably a need to check if sk->sk_saddr and | ||
618 | * sk->sk_rcv_addr are currently set to one of the addresses to | ||
619 | * be removed. This is something which needs to be looked into | ||
620 | * when we are fixing the outstanding issues with multi-homing | ||
621 | * socket routing and failover schemes. Refer to comments in | ||
622 | * sctp_do_bind(). -daisy | ||
623 | */ | ||
624 | sctp_local_bh_disable(); | ||
625 | sctp_write_lock(&ep->base.addr_lock); | ||
626 | |||
627 | retval = sctp_del_bind_addr(bp, &saveaddr); | ||
628 | |||
629 | sctp_write_unlock(&ep->base.addr_lock); | ||
630 | sctp_local_bh_enable(); | ||
631 | |||
632 | addr_buf += af->sockaddr_len; | ||
633 | err_bindx_rem: | ||
634 | if (retval < 0) { | ||
635 | /* Failed. Add the ones that has been removed back */ | ||
636 | if (cnt > 0) | ||
637 | sctp_bindx_add(sk, addrs, cnt); | ||
638 | return retval; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | return retval; | ||
643 | } | ||
644 | |||
645 | /* Send an ASCONF chunk with Delete IP address parameters to all the peers of | ||
646 | * the associations that are part of the endpoint indicating that a list of | ||
647 | * local addresses are removed from the endpoint. | ||
648 | * | ||
649 | * If any of the addresses is already in the bind address list of the | ||
650 | * association, we do not send the chunk for that association. But it will not | ||
651 | * affect other associations. | ||
652 | * | ||
653 | * Only sctp_setsockopt_bindx() is supposed to call this function. | ||
654 | */ | ||
655 | static int sctp_send_asconf_del_ip(struct sock *sk, | ||
656 | struct sockaddr *addrs, | ||
657 | int addrcnt) | ||
658 | { | ||
659 | struct sctp_sock *sp; | ||
660 | struct sctp_endpoint *ep; | ||
661 | struct sctp_association *asoc; | ||
662 | struct sctp_bind_addr *bp; | ||
663 | struct sctp_chunk *chunk; | ||
664 | union sctp_addr *laddr; | ||
665 | void *addr_buf; | ||
666 | struct sctp_af *af; | ||
667 | struct list_head *pos; | ||
668 | int i; | ||
669 | int retval = 0; | ||
670 | |||
671 | if (!sctp_addip_enable) | ||
672 | return retval; | ||
673 | |||
674 | sp = sctp_sk(sk); | ||
675 | ep = sp->ep; | ||
676 | |||
677 | SCTP_DEBUG_PRINTK("%s: (sk: %p, addrs: %p, addrcnt: %d)\n", | ||
678 | __FUNCTION__, sk, addrs, addrcnt); | ||
679 | |||
680 | list_for_each(pos, &ep->asocs) { | ||
681 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
682 | |||
683 | if (!asoc->peer.asconf_capable) | ||
684 | continue; | ||
685 | |||
686 | if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP) | ||
687 | continue; | ||
688 | |||
689 | if (!sctp_state(asoc, ESTABLISHED)) | ||
690 | continue; | ||
691 | |||
692 | /* Check if any address in the packed array of addresses is | ||
693 | * not present in the bind address list of the association. | ||
694 | * If so, do not send the asconf chunk to its peer, but | ||
695 | * continue with other associations. | ||
696 | */ | ||
697 | addr_buf = addrs; | ||
698 | for (i = 0; i < addrcnt; i++) { | ||
699 | laddr = (union sctp_addr *)addr_buf; | ||
700 | af = sctp_get_af_specific(laddr->v4.sin_family); | ||
701 | if (!af) { | ||
702 | retval = -EINVAL; | ||
703 | goto out; | ||
704 | } | ||
705 | |||
706 | if (!sctp_assoc_lookup_laddr(asoc, laddr)) | ||
707 | break; | ||
708 | |||
709 | addr_buf += af->sockaddr_len; | ||
710 | } | ||
711 | if (i < addrcnt) | ||
712 | continue; | ||
713 | |||
714 | /* Find one address in the association's bind address list | ||
715 | * that is not in the packed array of addresses. This is to | ||
716 | * make sure that we do not delete all the addresses in the | ||
717 | * association. | ||
718 | */ | ||
719 | sctp_read_lock(&asoc->base.addr_lock); | ||
720 | bp = &asoc->base.bind_addr; | ||
721 | laddr = sctp_find_unmatch_addr(bp, (union sctp_addr *)addrs, | ||
722 | addrcnt, sp); | ||
723 | sctp_read_unlock(&asoc->base.addr_lock); | ||
724 | if (!laddr) | ||
725 | continue; | ||
726 | |||
727 | chunk = sctp_make_asconf_update_ip(asoc, laddr, addrs, addrcnt, | ||
728 | SCTP_PARAM_DEL_IP); | ||
729 | if (!chunk) { | ||
730 | retval = -ENOMEM; | ||
731 | goto out; | ||
732 | } | ||
733 | |||
734 | retval = sctp_send_asconf(asoc, chunk); | ||
735 | |||
736 | /* FIXME: After sending the delete address ASCONF chunk, we | ||
737 | * cannot remove the addresses from the association's bind | ||
738 | * address list, because there maybe some packet send to | ||
739 | * the delete addresses, so we should wait until ASCONF_ACK | ||
740 | * packet is received. | ||
741 | */ | ||
742 | } | ||
743 | out: | ||
744 | return retval; | ||
745 | } | ||
746 | |||
747 | /* Helper for tunneling sctp_bindx() requests through sctp_setsockopt() | ||
748 | * | ||
749 | * API 8.1 | ||
750 | * int sctp_bindx(int sd, struct sockaddr *addrs, int addrcnt, | ||
751 | * int flags); | ||
752 | * | ||
753 | * If sd is an IPv4 socket, the addresses passed must be IPv4 addresses. | ||
754 | * If the sd is an IPv6 socket, the addresses passed can either be IPv4 | ||
755 | * or IPv6 addresses. | ||
756 | * | ||
757 | * A single address may be specified as INADDR_ANY or IN6ADDR_ANY, see | ||
758 | * Section 3.1.2 for this usage. | ||
759 | * | ||
760 | * addrs is a pointer to an array of one or more socket addresses. Each | ||
761 | * address is contained in its appropriate structure (i.e. struct | ||
762 | * sockaddr_in or struct sockaddr_in6) the family of the address type | ||
763 | * must be used to distengish the address length (note that this | ||
764 | * representation is termed a "packed array" of addresses). The caller | ||
765 | * specifies the number of addresses in the array with addrcnt. | ||
766 | * | ||
767 | * On success, sctp_bindx() returns 0. On failure, sctp_bindx() returns | ||
768 | * -1, and sets errno to the appropriate error code. | ||
769 | * | ||
770 | * For SCTP, the port given in each socket address must be the same, or | ||
771 | * sctp_bindx() will fail, setting errno to EINVAL. | ||
772 | * | ||
773 | * The flags parameter is formed from the bitwise OR of zero or more of | ||
774 | * the following currently defined flags: | ||
775 | * | ||
776 | * SCTP_BINDX_ADD_ADDR | ||
777 | * | ||
778 | * SCTP_BINDX_REM_ADDR | ||
779 | * | ||
780 | * SCTP_BINDX_ADD_ADDR directs SCTP to add the given addresses to the | ||
781 | * association, and SCTP_BINDX_REM_ADDR directs SCTP to remove the given | ||
782 | * addresses from the association. The two flags are mutually exclusive; | ||
783 | * if both are given, sctp_bindx() will fail with EINVAL. A caller may | ||
784 | * not remove all addresses from an association; sctp_bindx() will | ||
785 | * reject such an attempt with EINVAL. | ||
786 | * | ||
787 | * An application can use sctp_bindx(SCTP_BINDX_ADD_ADDR) to associate | ||
788 | * additional addresses with an endpoint after calling bind(). Or use | ||
789 | * sctp_bindx(SCTP_BINDX_REM_ADDR) to remove some addresses a listening | ||
790 | * socket is associated with so that no new association accepted will be | ||
791 | * associated with those addresses. If the endpoint supports dynamic | ||
792 | * address a SCTP_BINDX_REM_ADDR or SCTP_BINDX_ADD_ADDR may cause a | ||
793 | * endpoint to send the appropriate message to the peer to change the | ||
794 | * peers address lists. | ||
795 | * | ||
796 | * Adding and removing addresses from a connected association is | ||
797 | * optional functionality. Implementations that do not support this | ||
798 | * functionality should return EOPNOTSUPP. | ||
799 | * | ||
800 | * Basically do nothing but copying the addresses from user to kernel | ||
801 | * land and invoking either sctp_bindx_add() or sctp_bindx_rem() on the sk. | ||
802 | * This is used for tunneling the sctp_bindx() request through sctp_setsockopt() * from userspace. | ||
803 | * | ||
804 | * We don't use copy_from_user() for optimization: we first do the | ||
805 | * sanity checks (buffer size -fast- and access check-healthy | ||
806 | * pointer); if all of those succeed, then we can alloc the memory | ||
807 | * (expensive operation) needed to copy the data to kernel. Then we do | ||
808 | * the copying without checking the user space area | ||
809 | * (__copy_from_user()). | ||
810 | * | ||
811 | * On exit there is no need to do sockfd_put(), sys_setsockopt() does | ||
812 | * it. | ||
813 | * | ||
814 | * sk The sk of the socket | ||
815 | * addrs The pointer to the addresses in user land | ||
816 | * addrssize Size of the addrs buffer | ||
817 | * op Operation to perform (add or remove, see the flags of | ||
818 | * sctp_bindx) | ||
819 | * | ||
820 | * Returns 0 if ok, <0 errno code on error. | ||
821 | */ | ||
822 | SCTP_STATIC int sctp_setsockopt_bindx(struct sock* sk, | ||
823 | struct sockaddr __user *addrs, | ||
824 | int addrs_size, int op) | ||
825 | { | ||
826 | struct sockaddr *kaddrs; | ||
827 | int err; | ||
828 | int addrcnt = 0; | ||
829 | int walk_size = 0; | ||
830 | struct sockaddr *sa_addr; | ||
831 | void *addr_buf; | ||
832 | struct sctp_af *af; | ||
833 | |||
834 | SCTP_DEBUG_PRINTK("sctp_setsocktopt_bindx: sk %p addrs %p" | ||
835 | " addrs_size %d opt %d\n", sk, addrs, addrs_size, op); | ||
836 | |||
837 | if (unlikely(addrs_size <= 0)) | ||
838 | return -EINVAL; | ||
839 | |||
840 | /* Check the user passed a healthy pointer. */ | ||
841 | if (unlikely(!access_ok(VERIFY_READ, addrs, addrs_size))) | ||
842 | return -EFAULT; | ||
843 | |||
844 | /* Alloc space for the address array in kernel memory. */ | ||
845 | kaddrs = (struct sockaddr *)kmalloc(addrs_size, GFP_KERNEL); | ||
846 | if (unlikely(!kaddrs)) | ||
847 | return -ENOMEM; | ||
848 | |||
849 | if (__copy_from_user(kaddrs, addrs, addrs_size)) { | ||
850 | kfree(kaddrs); | ||
851 | return -EFAULT; | ||
852 | } | ||
853 | |||
854 | /* Walk through the addrs buffer and count the number of addresses. */ | ||
855 | addr_buf = kaddrs; | ||
856 | while (walk_size < addrs_size) { | ||
857 | sa_addr = (struct sockaddr *)addr_buf; | ||
858 | af = sctp_get_af_specific(sa_addr->sa_family); | ||
859 | |||
860 | /* If the address family is not supported or if this address | ||
861 | * causes the address buffer to overflow return EINVAL. | ||
862 | */ | ||
863 | if (!af || (walk_size + af->sockaddr_len) > addrs_size) { | ||
864 | kfree(kaddrs); | ||
865 | return -EINVAL; | ||
866 | } | ||
867 | addrcnt++; | ||
868 | addr_buf += af->sockaddr_len; | ||
869 | walk_size += af->sockaddr_len; | ||
870 | } | ||
871 | |||
872 | /* Do the work. */ | ||
873 | switch (op) { | ||
874 | case SCTP_BINDX_ADD_ADDR: | ||
875 | err = sctp_bindx_add(sk, kaddrs, addrcnt); | ||
876 | if (err) | ||
877 | goto out; | ||
878 | err = sctp_send_asconf_add_ip(sk, kaddrs, addrcnt); | ||
879 | break; | ||
880 | |||
881 | case SCTP_BINDX_REM_ADDR: | ||
882 | err = sctp_bindx_rem(sk, kaddrs, addrcnt); | ||
883 | if (err) | ||
884 | goto out; | ||
885 | err = sctp_send_asconf_del_ip(sk, kaddrs, addrcnt); | ||
886 | break; | ||
887 | |||
888 | default: | ||
889 | err = -EINVAL; | ||
890 | break; | ||
891 | }; | ||
892 | |||
893 | out: | ||
894 | kfree(kaddrs); | ||
895 | |||
896 | return err; | ||
897 | } | ||
898 | |||
899 | /* API 3.1.4 close() - UDP Style Syntax | ||
900 | * Applications use close() to perform graceful shutdown (as described in | ||
901 | * Section 10.1 of [SCTP]) on ALL the associations currently represented | ||
902 | * by a UDP-style socket. | ||
903 | * | ||
904 | * The syntax is | ||
905 | * | ||
906 | * ret = close(int sd); | ||
907 | * | ||
908 | * sd - the socket descriptor of the associations to be closed. | ||
909 | * | ||
910 | * To gracefully shutdown a specific association represented by the | ||
911 | * UDP-style socket, an application should use the sendmsg() call, | ||
912 | * passing no user data, but including the appropriate flag in the | ||
913 | * ancillary data (see Section xxxx). | ||
914 | * | ||
915 | * If sd in the close() call is a branched-off socket representing only | ||
916 | * one association, the shutdown is performed on that association only. | ||
917 | * | ||
918 | * 4.1.6 close() - TCP Style Syntax | ||
919 | * | ||
920 | * Applications use close() to gracefully close down an association. | ||
921 | * | ||
922 | * The syntax is: | ||
923 | * | ||
924 | * int close(int sd); | ||
925 | * | ||
926 | * sd - the socket descriptor of the association to be closed. | ||
927 | * | ||
928 | * After an application calls close() on a socket descriptor, no further | ||
929 | * socket operations will succeed on that descriptor. | ||
930 | * | ||
931 | * API 7.1.4 SO_LINGER | ||
932 | * | ||
933 | * An application using the TCP-style socket can use this option to | ||
934 | * perform the SCTP ABORT primitive. The linger option structure is: | ||
935 | * | ||
936 | * struct linger { | ||
937 | * int l_onoff; // option on/off | ||
938 | * int l_linger; // linger time | ||
939 | * }; | ||
940 | * | ||
941 | * To enable the option, set l_onoff to 1. If the l_linger value is set | ||
942 | * to 0, calling close() is the same as the ABORT primitive. If the | ||
943 | * value is set to a negative value, the setsockopt() call will return | ||
944 | * an error. If the value is set to a positive value linger_time, the | ||
945 | * close() can be blocked for at most linger_time ms. If the graceful | ||
946 | * shutdown phase does not finish during this period, close() will | ||
947 | * return but the graceful shutdown phase continues in the system. | ||
948 | */ | ||
949 | SCTP_STATIC void sctp_close(struct sock *sk, long timeout) | ||
950 | { | ||
951 | struct sctp_endpoint *ep; | ||
952 | struct sctp_association *asoc; | ||
953 | struct list_head *pos, *temp; | ||
954 | |||
955 | SCTP_DEBUG_PRINTK("sctp_close(sk: 0x%p, timeout:%ld)\n", sk, timeout); | ||
956 | |||
957 | sctp_lock_sock(sk); | ||
958 | sk->sk_shutdown = SHUTDOWN_MASK; | ||
959 | |||
960 | ep = sctp_sk(sk)->ep; | ||
961 | |||
962 | /* Walk all associations on a socket, not on an endpoint. */ | ||
963 | list_for_each_safe(pos, temp, &ep->asocs) { | ||
964 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
965 | |||
966 | if (sctp_style(sk, TCP)) { | ||
967 | /* A closed association can still be in the list if | ||
968 | * it belongs to a TCP-style listening socket that is | ||
969 | * not yet accepted. If so, free it. If not, send an | ||
970 | * ABORT or SHUTDOWN based on the linger options. | ||
971 | */ | ||
972 | if (sctp_state(asoc, CLOSED)) { | ||
973 | sctp_unhash_established(asoc); | ||
974 | sctp_association_free(asoc); | ||
975 | |||
976 | } else if (sock_flag(sk, SOCK_LINGER) && | ||
977 | !sk->sk_lingertime) | ||
978 | sctp_primitive_ABORT(asoc, NULL); | ||
979 | else | ||
980 | sctp_primitive_SHUTDOWN(asoc, NULL); | ||
981 | } else | ||
982 | sctp_primitive_SHUTDOWN(asoc, NULL); | ||
983 | } | ||
984 | |||
985 | /* Clean up any skbs sitting on the receive queue. */ | ||
986 | sctp_queue_purge_ulpevents(&sk->sk_receive_queue); | ||
987 | sctp_queue_purge_ulpevents(&sctp_sk(sk)->pd_lobby); | ||
988 | |||
989 | /* On a TCP-style socket, block for at most linger_time if set. */ | ||
990 | if (sctp_style(sk, TCP) && timeout) | ||
991 | sctp_wait_for_close(sk, timeout); | ||
992 | |||
993 | /* This will run the backlog queue. */ | ||
994 | sctp_release_sock(sk); | ||
995 | |||
996 | /* Supposedly, no process has access to the socket, but | ||
997 | * the net layers still may. | ||
998 | */ | ||
999 | sctp_local_bh_disable(); | ||
1000 | sctp_bh_lock_sock(sk); | ||
1001 | |||
1002 | /* Hold the sock, since sk_common_release() will put sock_put() | ||
1003 | * and we have just a little more cleanup. | ||
1004 | */ | ||
1005 | sock_hold(sk); | ||
1006 | sk_common_release(sk); | ||
1007 | |||
1008 | sctp_bh_unlock_sock(sk); | ||
1009 | sctp_local_bh_enable(); | ||
1010 | |||
1011 | sock_put(sk); | ||
1012 | |||
1013 | SCTP_DBG_OBJCNT_DEC(sock); | ||
1014 | } | ||
1015 | |||
1016 | /* Handle EPIPE error. */ | ||
1017 | static int sctp_error(struct sock *sk, int flags, int err) | ||
1018 | { | ||
1019 | if (err == -EPIPE) | ||
1020 | err = sock_error(sk) ? : -EPIPE; | ||
1021 | if (err == -EPIPE && !(flags & MSG_NOSIGNAL)) | ||
1022 | send_sig(SIGPIPE, current, 0); | ||
1023 | return err; | ||
1024 | } | ||
1025 | |||
1026 | /* API 3.1.3 sendmsg() - UDP Style Syntax | ||
1027 | * | ||
1028 | * An application uses sendmsg() and recvmsg() calls to transmit data to | ||
1029 | * and receive data from its peer. | ||
1030 | * | ||
1031 | * ssize_t sendmsg(int socket, const struct msghdr *message, | ||
1032 | * int flags); | ||
1033 | * | ||
1034 | * socket - the socket descriptor of the endpoint. | ||
1035 | * message - pointer to the msghdr structure which contains a single | ||
1036 | * user message and possibly some ancillary data. | ||
1037 | * | ||
1038 | * See Section 5 for complete description of the data | ||
1039 | * structures. | ||
1040 | * | ||
1041 | * flags - flags sent or received with the user message, see Section | ||
1042 | * 5 for complete description of the flags. | ||
1043 | * | ||
1044 | * Note: This function could use a rewrite especially when explicit | ||
1045 | * connect support comes in. | ||
1046 | */ | ||
1047 | /* BUG: We do not implement the equivalent of sk_stream_wait_memory(). */ | ||
1048 | |||
1049 | SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *, sctp_cmsgs_t *); | ||
1050 | |||
1051 | SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, | ||
1052 | struct msghdr *msg, size_t msg_len) | ||
1053 | { | ||
1054 | struct sctp_sock *sp; | ||
1055 | struct sctp_endpoint *ep; | ||
1056 | struct sctp_association *new_asoc=NULL, *asoc=NULL; | ||
1057 | struct sctp_transport *transport, *chunk_tp; | ||
1058 | struct sctp_chunk *chunk; | ||
1059 | union sctp_addr to; | ||
1060 | struct sockaddr *msg_name = NULL; | ||
1061 | struct sctp_sndrcvinfo default_sinfo = { 0 }; | ||
1062 | struct sctp_sndrcvinfo *sinfo; | ||
1063 | struct sctp_initmsg *sinit; | ||
1064 | sctp_assoc_t associd = 0; | ||
1065 | sctp_cmsgs_t cmsgs = { NULL }; | ||
1066 | int err; | ||
1067 | sctp_scope_t scope; | ||
1068 | long timeo; | ||
1069 | __u16 sinfo_flags = 0; | ||
1070 | struct sctp_datamsg *datamsg; | ||
1071 | struct list_head *pos; | ||
1072 | int msg_flags = msg->msg_flags; | ||
1073 | |||
1074 | SCTP_DEBUG_PRINTK("sctp_sendmsg(sk: %p, msg: %p, msg_len: %zu)\n", | ||
1075 | sk, msg, msg_len); | ||
1076 | |||
1077 | err = 0; | ||
1078 | sp = sctp_sk(sk); | ||
1079 | ep = sp->ep; | ||
1080 | |||
1081 | SCTP_DEBUG_PRINTK("Using endpoint: %s.\n", ep->debug_name); | ||
1082 | |||
1083 | /* We cannot send a message over a TCP-style listening socket. */ | ||
1084 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) { | ||
1085 | err = -EPIPE; | ||
1086 | goto out_nounlock; | ||
1087 | } | ||
1088 | |||
1089 | /* Parse out the SCTP CMSGs. */ | ||
1090 | err = sctp_msghdr_parse(msg, &cmsgs); | ||
1091 | |||
1092 | if (err) { | ||
1093 | SCTP_DEBUG_PRINTK("msghdr parse err = %x\n", err); | ||
1094 | goto out_nounlock; | ||
1095 | } | ||
1096 | |||
1097 | /* Fetch the destination address for this packet. This | ||
1098 | * address only selects the association--it is not necessarily | ||
1099 | * the address we will send to. | ||
1100 | * For a peeled-off socket, msg_name is ignored. | ||
1101 | */ | ||
1102 | if (!sctp_style(sk, UDP_HIGH_BANDWIDTH) && msg->msg_name) { | ||
1103 | int msg_namelen = msg->msg_namelen; | ||
1104 | |||
1105 | err = sctp_verify_addr(sk, (union sctp_addr *)msg->msg_name, | ||
1106 | msg_namelen); | ||
1107 | if (err) | ||
1108 | return err; | ||
1109 | |||
1110 | if (msg_namelen > sizeof(to)) | ||
1111 | msg_namelen = sizeof(to); | ||
1112 | memcpy(&to, msg->msg_name, msg_namelen); | ||
1113 | SCTP_DEBUG_PRINTK("Just memcpy'd. msg_name is " | ||
1114 | "0x%x:%u.\n", | ||
1115 | to.v4.sin_addr.s_addr, to.v4.sin_port); | ||
1116 | |||
1117 | to.v4.sin_port = ntohs(to.v4.sin_port); | ||
1118 | msg_name = msg->msg_name; | ||
1119 | } | ||
1120 | |||
1121 | sinfo = cmsgs.info; | ||
1122 | sinit = cmsgs.init; | ||
1123 | |||
1124 | /* Did the user specify SNDRCVINFO? */ | ||
1125 | if (sinfo) { | ||
1126 | sinfo_flags = sinfo->sinfo_flags; | ||
1127 | associd = sinfo->sinfo_assoc_id; | ||
1128 | } | ||
1129 | |||
1130 | SCTP_DEBUG_PRINTK("msg_len: %zu, sinfo_flags: 0x%x\n", | ||
1131 | msg_len, sinfo_flags); | ||
1132 | |||
1133 | /* MSG_EOF or MSG_ABORT cannot be set on a TCP-style socket. */ | ||
1134 | if (sctp_style(sk, TCP) && (sinfo_flags & (MSG_EOF | MSG_ABORT))) { | ||
1135 | err = -EINVAL; | ||
1136 | goto out_nounlock; | ||
1137 | } | ||
1138 | |||
1139 | /* If MSG_EOF is set, no data can be sent. Disallow sending zero | ||
1140 | * length messages when MSG_EOF|MSG_ABORT is not set. | ||
1141 | * If MSG_ABORT is set, the message length could be non zero with | ||
1142 | * the msg_iov set to the user abort reason. | ||
1143 | */ | ||
1144 | if (((sinfo_flags & MSG_EOF) && (msg_len > 0)) || | ||
1145 | (!(sinfo_flags & (MSG_EOF|MSG_ABORT)) && (msg_len == 0))) { | ||
1146 | err = -EINVAL; | ||
1147 | goto out_nounlock; | ||
1148 | } | ||
1149 | |||
1150 | /* If MSG_ADDR_OVER is set, there must be an address | ||
1151 | * specified in msg_name. | ||
1152 | */ | ||
1153 | if ((sinfo_flags & MSG_ADDR_OVER) && (!msg->msg_name)) { | ||
1154 | err = -EINVAL; | ||
1155 | goto out_nounlock; | ||
1156 | } | ||
1157 | |||
1158 | transport = NULL; | ||
1159 | |||
1160 | SCTP_DEBUG_PRINTK("About to look up association.\n"); | ||
1161 | |||
1162 | sctp_lock_sock(sk); | ||
1163 | |||
1164 | /* If a msg_name has been specified, assume this is to be used. */ | ||
1165 | if (msg_name) { | ||
1166 | /* Look for a matching association on the endpoint. */ | ||
1167 | asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); | ||
1168 | if (!asoc) { | ||
1169 | /* If we could not find a matching association on the | ||
1170 | * endpoint, make sure that it is not a TCP-style | ||
1171 | * socket that already has an association or there is | ||
1172 | * no peeled-off association on another socket. | ||
1173 | */ | ||
1174 | if ((sctp_style(sk, TCP) && | ||
1175 | sctp_sstate(sk, ESTABLISHED)) || | ||
1176 | sctp_endpoint_is_peeled_off(ep, &to)) { | ||
1177 | err = -EADDRNOTAVAIL; | ||
1178 | goto out_unlock; | ||
1179 | } | ||
1180 | } | ||
1181 | } else { | ||
1182 | asoc = sctp_id2assoc(sk, associd); | ||
1183 | if (!asoc) { | ||
1184 | err = -EPIPE; | ||
1185 | goto out_unlock; | ||
1186 | } | ||
1187 | } | ||
1188 | |||
1189 | if (asoc) { | ||
1190 | SCTP_DEBUG_PRINTK("Just looked up association: %p.\n", asoc); | ||
1191 | |||
1192 | /* We cannot send a message on a TCP-style SCTP_SS_ESTABLISHED | ||
1193 | * socket that has an association in CLOSED state. This can | ||
1194 | * happen when an accepted socket has an association that is | ||
1195 | * already CLOSED. | ||
1196 | */ | ||
1197 | if (sctp_state(asoc, CLOSED) && sctp_style(sk, TCP)) { | ||
1198 | err = -EPIPE; | ||
1199 | goto out_unlock; | ||
1200 | } | ||
1201 | |||
1202 | if (sinfo_flags & MSG_EOF) { | ||
1203 | SCTP_DEBUG_PRINTK("Shutting down association: %p\n", | ||
1204 | asoc); | ||
1205 | sctp_primitive_SHUTDOWN(asoc, NULL); | ||
1206 | err = 0; | ||
1207 | goto out_unlock; | ||
1208 | } | ||
1209 | if (sinfo_flags & MSG_ABORT) { | ||
1210 | SCTP_DEBUG_PRINTK("Aborting association: %p\n", asoc); | ||
1211 | sctp_primitive_ABORT(asoc, msg); | ||
1212 | err = 0; | ||
1213 | goto out_unlock; | ||
1214 | } | ||
1215 | } | ||
1216 | |||
1217 | /* Do we need to create the association? */ | ||
1218 | if (!asoc) { | ||
1219 | SCTP_DEBUG_PRINTK("There is no association yet.\n"); | ||
1220 | |||
1221 | if (sinfo_flags & (MSG_EOF | MSG_ABORT)) { | ||
1222 | err = -EINVAL; | ||
1223 | goto out_unlock; | ||
1224 | } | ||
1225 | |||
1226 | /* Check for invalid stream against the stream counts, | ||
1227 | * either the default or the user specified stream counts. | ||
1228 | */ | ||
1229 | if (sinfo) { | ||
1230 | if (!sinit || (sinit && !sinit->sinit_num_ostreams)) { | ||
1231 | /* Check against the defaults. */ | ||
1232 | if (sinfo->sinfo_stream >= | ||
1233 | sp->initmsg.sinit_num_ostreams) { | ||
1234 | err = -EINVAL; | ||
1235 | goto out_unlock; | ||
1236 | } | ||
1237 | } else { | ||
1238 | /* Check against the requested. */ | ||
1239 | if (sinfo->sinfo_stream >= | ||
1240 | sinit->sinit_num_ostreams) { | ||
1241 | err = -EINVAL; | ||
1242 | goto out_unlock; | ||
1243 | } | ||
1244 | } | ||
1245 | } | ||
1246 | |||
1247 | /* | ||
1248 | * API 3.1.2 bind() - UDP Style Syntax | ||
1249 | * If a bind() or sctp_bindx() is not called prior to a | ||
1250 | * sendmsg() call that initiates a new association, the | ||
1251 | * system picks an ephemeral port and will choose an address | ||
1252 | * set equivalent to binding with a wildcard address. | ||
1253 | */ | ||
1254 | if (!ep->base.bind_addr.port) { | ||
1255 | if (sctp_autobind(sk)) { | ||
1256 | err = -EAGAIN; | ||
1257 | goto out_unlock; | ||
1258 | } | ||
1259 | } | ||
1260 | |||
1261 | scope = sctp_scope(&to); | ||
1262 | new_asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); | ||
1263 | if (!new_asoc) { | ||
1264 | err = -ENOMEM; | ||
1265 | goto out_unlock; | ||
1266 | } | ||
1267 | asoc = new_asoc; | ||
1268 | |||
1269 | /* If the SCTP_INIT ancillary data is specified, set all | ||
1270 | * the association init values accordingly. | ||
1271 | */ | ||
1272 | if (sinit) { | ||
1273 | if (sinit->sinit_num_ostreams) { | ||
1274 | asoc->c.sinit_num_ostreams = | ||
1275 | sinit->sinit_num_ostreams; | ||
1276 | } | ||
1277 | if (sinit->sinit_max_instreams) { | ||
1278 | asoc->c.sinit_max_instreams = | ||
1279 | sinit->sinit_max_instreams; | ||
1280 | } | ||
1281 | if (sinit->sinit_max_attempts) { | ||
1282 | asoc->max_init_attempts | ||
1283 | = sinit->sinit_max_attempts; | ||
1284 | } | ||
1285 | if (sinit->sinit_max_init_timeo) { | ||
1286 | asoc->max_init_timeo = | ||
1287 | msecs_to_jiffies(sinit->sinit_max_init_timeo); | ||
1288 | } | ||
1289 | } | ||
1290 | |||
1291 | /* Prime the peer's transport structures. */ | ||
1292 | transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL); | ||
1293 | if (!transport) { | ||
1294 | err = -ENOMEM; | ||
1295 | goto out_free; | ||
1296 | } | ||
1297 | err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); | ||
1298 | if (err < 0) { | ||
1299 | err = -ENOMEM; | ||
1300 | goto out_free; | ||
1301 | } | ||
1302 | } | ||
1303 | |||
1304 | /* ASSERT: we have a valid association at this point. */ | ||
1305 | SCTP_DEBUG_PRINTK("We have a valid association.\n"); | ||
1306 | |||
1307 | if (!sinfo) { | ||
1308 | /* If the user didn't specify SNDRCVINFO, make up one with | ||
1309 | * some defaults. | ||
1310 | */ | ||
1311 | default_sinfo.sinfo_stream = asoc->default_stream; | ||
1312 | default_sinfo.sinfo_flags = asoc->default_flags; | ||
1313 | default_sinfo.sinfo_ppid = asoc->default_ppid; | ||
1314 | default_sinfo.sinfo_context = asoc->default_context; | ||
1315 | default_sinfo.sinfo_timetolive = asoc->default_timetolive; | ||
1316 | default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc); | ||
1317 | sinfo = &default_sinfo; | ||
1318 | } | ||
1319 | |||
1320 | /* API 7.1.7, the sndbuf size per association bounds the | ||
1321 | * maximum size of data that can be sent in a single send call. | ||
1322 | */ | ||
1323 | if (msg_len > sk->sk_sndbuf) { | ||
1324 | err = -EMSGSIZE; | ||
1325 | goto out_free; | ||
1326 | } | ||
1327 | |||
1328 | /* If fragmentation is disabled and the message length exceeds the | ||
1329 | * association fragmentation point, return EMSGSIZE. The I-D | ||
1330 | * does not specify what this error is, but this looks like | ||
1331 | * a great fit. | ||
1332 | */ | ||
1333 | if (sctp_sk(sk)->disable_fragments && (msg_len > asoc->frag_point)) { | ||
1334 | err = -EMSGSIZE; | ||
1335 | goto out_free; | ||
1336 | } | ||
1337 | |||
1338 | if (sinfo) { | ||
1339 | /* Check for invalid stream. */ | ||
1340 | if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { | ||
1341 | err = -EINVAL; | ||
1342 | goto out_free; | ||
1343 | } | ||
1344 | } | ||
1345 | |||
1346 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | ||
1347 | if (!sctp_wspace(asoc)) { | ||
1348 | err = sctp_wait_for_sndbuf(asoc, &timeo, msg_len); | ||
1349 | if (err) | ||
1350 | goto out_free; | ||
1351 | } | ||
1352 | |||
1353 | /* If an address is passed with the sendto/sendmsg call, it is used | ||
1354 | * to override the primary destination address in the TCP model, or | ||
1355 | * when MSG_ADDR_OVER flag is set in the UDP model. | ||
1356 | */ | ||
1357 | if ((sctp_style(sk, TCP) && msg_name) || | ||
1358 | (sinfo_flags & MSG_ADDR_OVER)) { | ||
1359 | chunk_tp = sctp_assoc_lookup_paddr(asoc, &to); | ||
1360 | if (!chunk_tp) { | ||
1361 | err = -EINVAL; | ||
1362 | goto out_free; | ||
1363 | } | ||
1364 | } else | ||
1365 | chunk_tp = NULL; | ||
1366 | |||
1367 | /* Auto-connect, if we aren't connected already. */ | ||
1368 | if (sctp_state(asoc, CLOSED)) { | ||
1369 | err = sctp_primitive_ASSOCIATE(asoc, NULL); | ||
1370 | if (err < 0) | ||
1371 | goto out_free; | ||
1372 | SCTP_DEBUG_PRINTK("We associated primitively.\n"); | ||
1373 | } | ||
1374 | |||
1375 | /* Break the message into multiple chunks of maximum size. */ | ||
1376 | datamsg = sctp_datamsg_from_user(asoc, sinfo, msg, msg_len); | ||
1377 | if (!datamsg) { | ||
1378 | err = -ENOMEM; | ||
1379 | goto out_free; | ||
1380 | } | ||
1381 | |||
1382 | /* Now send the (possibly) fragmented message. */ | ||
1383 | list_for_each(pos, &datamsg->chunks) { | ||
1384 | chunk = list_entry(pos, struct sctp_chunk, frag_list); | ||
1385 | sctp_datamsg_track(chunk); | ||
1386 | |||
1387 | /* Do accounting for the write space. */ | ||
1388 | sctp_set_owner_w(chunk); | ||
1389 | |||
1390 | chunk->transport = chunk_tp; | ||
1391 | |||
1392 | /* Send it to the lower layers. Note: all chunks | ||
1393 | * must either fail or succeed. The lower layer | ||
1394 | * works that way today. Keep it that way or this | ||
1395 | * breaks. | ||
1396 | */ | ||
1397 | err = sctp_primitive_SEND(asoc, chunk); | ||
1398 | /* Did the lower layer accept the chunk? */ | ||
1399 | if (err) | ||
1400 | sctp_chunk_free(chunk); | ||
1401 | SCTP_DEBUG_PRINTK("We sent primitively.\n"); | ||
1402 | } | ||
1403 | |||
1404 | sctp_datamsg_free(datamsg); | ||
1405 | if (err) | ||
1406 | goto out_free; | ||
1407 | else | ||
1408 | err = msg_len; | ||
1409 | |||
1410 | /* If we are already past ASSOCIATE, the lower | ||
1411 | * layers are responsible for association cleanup. | ||
1412 | */ | ||
1413 | goto out_unlock; | ||
1414 | |||
1415 | out_free: | ||
1416 | if (new_asoc) | ||
1417 | sctp_association_free(asoc); | ||
1418 | out_unlock: | ||
1419 | sctp_release_sock(sk); | ||
1420 | |||
1421 | out_nounlock: | ||
1422 | return sctp_error(sk, msg_flags, err); | ||
1423 | |||
1424 | #if 0 | ||
1425 | do_sock_err: | ||
1426 | if (msg_len) | ||
1427 | err = msg_len; | ||
1428 | else | ||
1429 | err = sock_error(sk); | ||
1430 | goto out; | ||
1431 | |||
1432 | do_interrupted: | ||
1433 | if (msg_len) | ||
1434 | err = msg_len; | ||
1435 | goto out; | ||
1436 | #endif /* 0 */ | ||
1437 | } | ||
1438 | |||
1439 | /* This is an extended version of skb_pull() that removes the data from the | ||
1440 | * start of a skb even when data is spread across the list of skb's in the | ||
1441 | * frag_list. len specifies the total amount of data that needs to be removed. | ||
1442 | * when 'len' bytes could be removed from the skb, it returns 0. | ||
1443 | * If 'len' exceeds the total skb length, it returns the no. of bytes that | ||
1444 | * could not be removed. | ||
1445 | */ | ||
1446 | static int sctp_skb_pull(struct sk_buff *skb, int len) | ||
1447 | { | ||
1448 | struct sk_buff *list; | ||
1449 | int skb_len = skb_headlen(skb); | ||
1450 | int rlen; | ||
1451 | |||
1452 | if (len <= skb_len) { | ||
1453 | __skb_pull(skb, len); | ||
1454 | return 0; | ||
1455 | } | ||
1456 | len -= skb_len; | ||
1457 | __skb_pull(skb, skb_len); | ||
1458 | |||
1459 | for (list = skb_shinfo(skb)->frag_list; list; list = list->next) { | ||
1460 | rlen = sctp_skb_pull(list, len); | ||
1461 | skb->len -= (len-rlen); | ||
1462 | skb->data_len -= (len-rlen); | ||
1463 | |||
1464 | if (!rlen) | ||
1465 | return 0; | ||
1466 | |||
1467 | len = rlen; | ||
1468 | } | ||
1469 | |||
1470 | return len; | ||
1471 | } | ||
1472 | |||
1473 | /* API 3.1.3 recvmsg() - UDP Style Syntax | ||
1474 | * | ||
1475 | * ssize_t recvmsg(int socket, struct msghdr *message, | ||
1476 | * int flags); | ||
1477 | * | ||
1478 | * socket - the socket descriptor of the endpoint. | ||
1479 | * message - pointer to the msghdr structure which contains a single | ||
1480 | * user message and possibly some ancillary data. | ||
1481 | * | ||
1482 | * See Section 5 for complete description of the data | ||
1483 | * structures. | ||
1484 | * | ||
1485 | * flags - flags sent or received with the user message, see Section | ||
1486 | * 5 for complete description of the flags. | ||
1487 | */ | ||
1488 | static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *); | ||
1489 | |||
1490 | SCTP_STATIC int sctp_recvmsg(struct kiocb *iocb, struct sock *sk, | ||
1491 | struct msghdr *msg, size_t len, int noblock, | ||
1492 | int flags, int *addr_len) | ||
1493 | { | ||
1494 | struct sctp_ulpevent *event = NULL; | ||
1495 | struct sctp_sock *sp = sctp_sk(sk); | ||
1496 | struct sk_buff *skb; | ||
1497 | int copied; | ||
1498 | int err = 0; | ||
1499 | int skb_len; | ||
1500 | |||
1501 | SCTP_DEBUG_PRINTK("sctp_recvmsg(%s: %p, %s: %p, %s: %zd, %s: %d, %s: " | ||
1502 | "0x%x, %s: %p)\n", "sk", sk, "msghdr", msg, | ||
1503 | "len", len, "knoblauch", noblock, | ||
1504 | "flags", flags, "addr_len", addr_len); | ||
1505 | |||
1506 | sctp_lock_sock(sk); | ||
1507 | |||
1508 | if (sctp_style(sk, TCP) && !sctp_sstate(sk, ESTABLISHED)) { | ||
1509 | err = -ENOTCONN; | ||
1510 | goto out; | ||
1511 | } | ||
1512 | |||
1513 | skb = sctp_skb_recv_datagram(sk, flags, noblock, &err); | ||
1514 | if (!skb) | ||
1515 | goto out; | ||
1516 | |||
1517 | /* Get the total length of the skb including any skb's in the | ||
1518 | * frag_list. | ||
1519 | */ | ||
1520 | skb_len = skb->len; | ||
1521 | |||
1522 | copied = skb_len; | ||
1523 | if (copied > len) | ||
1524 | copied = len; | ||
1525 | |||
1526 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); | ||
1527 | |||
1528 | event = sctp_skb2event(skb); | ||
1529 | |||
1530 | if (err) | ||
1531 | goto out_free; | ||
1532 | |||
1533 | sock_recv_timestamp(msg, sk, skb); | ||
1534 | if (sctp_ulpevent_is_notification(event)) { | ||
1535 | msg->msg_flags |= MSG_NOTIFICATION; | ||
1536 | sp->pf->event_msgname(event, msg->msg_name, addr_len); | ||
1537 | } else { | ||
1538 | sp->pf->skb_msgname(skb, msg->msg_name, addr_len); | ||
1539 | } | ||
1540 | |||
1541 | /* Check if we allow SCTP_SNDRCVINFO. */ | ||
1542 | if (sp->subscribe.sctp_data_io_event) | ||
1543 | sctp_ulpevent_read_sndrcvinfo(event, msg); | ||
1544 | #if 0 | ||
1545 | /* FIXME: we should be calling IP/IPv6 layers. */ | ||
1546 | if (sk->sk_protinfo.af_inet.cmsg_flags) | ||
1547 | ip_cmsg_recv(msg, skb); | ||
1548 | #endif | ||
1549 | |||
1550 | err = copied; | ||
1551 | |||
1552 | /* If skb's length exceeds the user's buffer, update the skb and | ||
1553 | * push it back to the receive_queue so that the next call to | ||
1554 | * recvmsg() will return the remaining data. Don't set MSG_EOR. | ||
1555 | */ | ||
1556 | if (skb_len > copied) { | ||
1557 | msg->msg_flags &= ~MSG_EOR; | ||
1558 | if (flags & MSG_PEEK) | ||
1559 | goto out_free; | ||
1560 | sctp_skb_pull(skb, copied); | ||
1561 | skb_queue_head(&sk->sk_receive_queue, skb); | ||
1562 | |||
1563 | /* When only partial message is copied to the user, increase | ||
1564 | * rwnd by that amount. If all the data in the skb is read, | ||
1565 | * rwnd is updated when the event is freed. | ||
1566 | */ | ||
1567 | sctp_assoc_rwnd_increase(event->asoc, copied); | ||
1568 | goto out; | ||
1569 | } else if ((event->msg_flags & MSG_NOTIFICATION) || | ||
1570 | (event->msg_flags & MSG_EOR)) | ||
1571 | msg->msg_flags |= MSG_EOR; | ||
1572 | else | ||
1573 | msg->msg_flags &= ~MSG_EOR; | ||
1574 | |||
1575 | out_free: | ||
1576 | if (flags & MSG_PEEK) { | ||
1577 | /* Release the skb reference acquired after peeking the skb in | ||
1578 | * sctp_skb_recv_datagram(). | ||
1579 | */ | ||
1580 | kfree_skb(skb); | ||
1581 | } else { | ||
1582 | /* Free the event which includes releasing the reference to | ||
1583 | * the owner of the skb, freeing the skb and updating the | ||
1584 | * rwnd. | ||
1585 | */ | ||
1586 | sctp_ulpevent_free(event); | ||
1587 | } | ||
1588 | out: | ||
1589 | sctp_release_sock(sk); | ||
1590 | return err; | ||
1591 | } | ||
1592 | |||
1593 | /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) | ||
1594 | * | ||
1595 | * This option is a on/off flag. If enabled no SCTP message | ||
1596 | * fragmentation will be performed. Instead if a message being sent | ||
1597 | * exceeds the current PMTU size, the message will NOT be sent and | ||
1598 | * instead a error will be indicated to the user. | ||
1599 | */ | ||
1600 | static int sctp_setsockopt_disable_fragments(struct sock *sk, | ||
1601 | char __user *optval, int optlen) | ||
1602 | { | ||
1603 | int val; | ||
1604 | |||
1605 | if (optlen < sizeof(int)) | ||
1606 | return -EINVAL; | ||
1607 | |||
1608 | if (get_user(val, (int __user *)optval)) | ||
1609 | return -EFAULT; | ||
1610 | |||
1611 | sctp_sk(sk)->disable_fragments = (val == 0) ? 0 : 1; | ||
1612 | |||
1613 | return 0; | ||
1614 | } | ||
1615 | |||
1616 | static int sctp_setsockopt_events(struct sock *sk, char __user *optval, | ||
1617 | int optlen) | ||
1618 | { | ||
1619 | if (optlen != sizeof(struct sctp_event_subscribe)) | ||
1620 | return -EINVAL; | ||
1621 | if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) | ||
1622 | return -EFAULT; | ||
1623 | return 0; | ||
1624 | } | ||
1625 | |||
1626 | /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) | ||
1627 | * | ||
1628 | * This socket option is applicable to the UDP-style socket only. When | ||
1629 | * set it will cause associations that are idle for more than the | ||
1630 | * specified number of seconds to automatically close. An association | ||
1631 | * being idle is defined an association that has NOT sent or received | ||
1632 | * user data. The special value of '0' indicates that no automatic | ||
1633 | * close of any associations should be performed. The option expects an | ||
1634 | * integer defining the number of seconds of idle time before an | ||
1635 | * association is closed. | ||
1636 | */ | ||
1637 | static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval, | ||
1638 | int optlen) | ||
1639 | { | ||
1640 | struct sctp_sock *sp = sctp_sk(sk); | ||
1641 | |||
1642 | /* Applicable to UDP-style socket only */ | ||
1643 | if (sctp_style(sk, TCP)) | ||
1644 | return -EOPNOTSUPP; | ||
1645 | if (optlen != sizeof(int)) | ||
1646 | return -EINVAL; | ||
1647 | if (copy_from_user(&sp->autoclose, optval, optlen)) | ||
1648 | return -EFAULT; | ||
1649 | |||
1650 | sp->ep->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ; | ||
1651 | return 0; | ||
1652 | } | ||
1653 | |||
1654 | /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) | ||
1655 | * | ||
1656 | * Applications can enable or disable heartbeats for any peer address of | ||
1657 | * an association, modify an address's heartbeat interval, force a | ||
1658 | * heartbeat to be sent immediately, and adjust the address's maximum | ||
1659 | * number of retransmissions sent before an address is considered | ||
1660 | * unreachable. The following structure is used to access and modify an | ||
1661 | * address's parameters: | ||
1662 | * | ||
1663 | * struct sctp_paddrparams { | ||
1664 | * sctp_assoc_t spp_assoc_id; | ||
1665 | * struct sockaddr_storage spp_address; | ||
1666 | * uint32_t spp_hbinterval; | ||
1667 | * uint16_t spp_pathmaxrxt; | ||
1668 | * }; | ||
1669 | * | ||
1670 | * spp_assoc_id - (UDP style socket) This is filled in the application, | ||
1671 | * and identifies the association for this query. | ||
1672 | * spp_address - This specifies which address is of interest. | ||
1673 | * spp_hbinterval - This contains the value of the heartbeat interval, | ||
1674 | * in milliseconds. A value of 0, when modifying the | ||
1675 | * parameter, specifies that the heartbeat on this | ||
1676 | * address should be disabled. A value of UINT32_MAX | ||
1677 | * (4294967295), when modifying the parameter, | ||
1678 | * specifies that a heartbeat should be sent | ||
1679 | * immediately to the peer address, and the current | ||
1680 | * interval should remain unchanged. | ||
1681 | * spp_pathmaxrxt - This contains the maximum number of | ||
1682 | * retransmissions before this address shall be | ||
1683 | * considered unreachable. | ||
1684 | */ | ||
1685 | static int sctp_setsockopt_peer_addr_params(struct sock *sk, | ||
1686 | char __user *optval, int optlen) | ||
1687 | { | ||
1688 | struct sctp_paddrparams params; | ||
1689 | struct sctp_transport *trans; | ||
1690 | int error; | ||
1691 | |||
1692 | if (optlen != sizeof(struct sctp_paddrparams)) | ||
1693 | return -EINVAL; | ||
1694 | if (copy_from_user(¶ms, optval, optlen)) | ||
1695 | return -EFAULT; | ||
1696 | |||
1697 | /* | ||
1698 | * API 7. Socket Options (setting the default value for the endpoint) | ||
1699 | * All options that support specific settings on an association by | ||
1700 | * filling in either an association id variable or a sockaddr_storage | ||
1701 | * SHOULD also support setting of the same value for the entire endpoint | ||
1702 | * (i.e. future associations). To accomplish this the following logic is | ||
1703 | * used when setting one of these options: | ||
1704 | |||
1705 | * c) If neither the sockaddr_storage or association identification is | ||
1706 | * set i.e. the sockaddr_storage is set to all 0's (INADDR_ANY) and | ||
1707 | * the association identification is 0, the settings are a default | ||
1708 | * and to be applied to the endpoint (all future associations). | ||
1709 | */ | ||
1710 | |||
1711 | /* update default value for endpoint (all future associations) */ | ||
1712 | if (!params.spp_assoc_id && | ||
1713 | sctp_is_any(( union sctp_addr *)¶ms.spp_address)) { | ||
1714 | /* Manual heartbeat on an endpoint is invalid. */ | ||
1715 | if (0xffffffff == params.spp_hbinterval) | ||
1716 | return -EINVAL; | ||
1717 | else if (params.spp_hbinterval) | ||
1718 | sctp_sk(sk)->paddrparam.spp_hbinterval = | ||
1719 | params.spp_hbinterval; | ||
1720 | if (params.spp_pathmaxrxt) | ||
1721 | sctp_sk(sk)->paddrparam.spp_pathmaxrxt = | ||
1722 | params.spp_pathmaxrxt; | ||
1723 | return 0; | ||
1724 | } | ||
1725 | |||
1726 | trans = sctp_addr_id2transport(sk, ¶ms.spp_address, | ||
1727 | params.spp_assoc_id); | ||
1728 | if (!trans) | ||
1729 | return -EINVAL; | ||
1730 | |||
1731 | /* Applications can enable or disable heartbeats for any peer address | ||
1732 | * of an association, modify an address's heartbeat interval, force a | ||
1733 | * heartbeat to be sent immediately, and adjust the address's maximum | ||
1734 | * number of retransmissions sent before an address is considered | ||
1735 | * unreachable. | ||
1736 | * | ||
1737 | * The value of the heartbeat interval, in milliseconds. A value of | ||
1738 | * UINT32_MAX (4294967295), when modifying the parameter, specifies | ||
1739 | * that a heartbeat should be sent immediately to the peer address, | ||
1740 | * and the current interval should remain unchanged. | ||
1741 | */ | ||
1742 | if (0xffffffff == params.spp_hbinterval) { | ||
1743 | error = sctp_primitive_REQUESTHEARTBEAT (trans->asoc, trans); | ||
1744 | if (error) | ||
1745 | return error; | ||
1746 | } else { | ||
1747 | /* The value of the heartbeat interval, in milliseconds. A value of 0, | ||
1748 | * when modifying the parameter, specifies that the heartbeat on this | ||
1749 | * address should be disabled. | ||
1750 | */ | ||
1751 | if (params.spp_hbinterval) { | ||
1752 | trans->hb_allowed = 1; | ||
1753 | trans->hb_interval = | ||
1754 | msecs_to_jiffies(params.spp_hbinterval); | ||
1755 | } else | ||
1756 | trans->hb_allowed = 0; | ||
1757 | } | ||
1758 | |||
1759 | /* spp_pathmaxrxt contains the maximum number of retransmissions | ||
1760 | * before this address shall be considered unreachable. | ||
1761 | */ | ||
1762 | if (params.spp_pathmaxrxt) | ||
1763 | trans->max_retrans = params.spp_pathmaxrxt; | ||
1764 | |||
1765 | return 0; | ||
1766 | } | ||
1767 | |||
1768 | /* 7.1.3 Initialization Parameters (SCTP_INITMSG) | ||
1769 | * | ||
1770 | * Applications can specify protocol parameters for the default association | ||
1771 | * initialization. The option name argument to setsockopt() and getsockopt() | ||
1772 | * is SCTP_INITMSG. | ||
1773 | * | ||
1774 | * Setting initialization parameters is effective only on an unconnected | ||
1775 | * socket (for UDP-style sockets only future associations are effected | ||
1776 | * by the change). With TCP-style sockets, this option is inherited by | ||
1777 | * sockets derived from a listener socket. | ||
1778 | */ | ||
1779 | static int sctp_setsockopt_initmsg(struct sock *sk, char __user *optval, int optlen) | ||
1780 | { | ||
1781 | struct sctp_initmsg sinit; | ||
1782 | struct sctp_sock *sp = sctp_sk(sk); | ||
1783 | |||
1784 | if (optlen != sizeof(struct sctp_initmsg)) | ||
1785 | return -EINVAL; | ||
1786 | if (copy_from_user(&sinit, optval, optlen)) | ||
1787 | return -EFAULT; | ||
1788 | |||
1789 | if (sinit.sinit_num_ostreams) | ||
1790 | sp->initmsg.sinit_num_ostreams = sinit.sinit_num_ostreams; | ||
1791 | if (sinit.sinit_max_instreams) | ||
1792 | sp->initmsg.sinit_max_instreams = sinit.sinit_max_instreams; | ||
1793 | if (sinit.sinit_max_attempts) | ||
1794 | sp->initmsg.sinit_max_attempts = sinit.sinit_max_attempts; | ||
1795 | if (sinit.sinit_max_init_timeo) | ||
1796 | sp->initmsg.sinit_max_init_timeo = sinit.sinit_max_init_timeo; | ||
1797 | |||
1798 | return 0; | ||
1799 | } | ||
1800 | |||
1801 | /* | ||
1802 | * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) | ||
1803 | * | ||
1804 | * Applications that wish to use the sendto() system call may wish to | ||
1805 | * specify a default set of parameters that would normally be supplied | ||
1806 | * through the inclusion of ancillary data. This socket option allows | ||
1807 | * such an application to set the default sctp_sndrcvinfo structure. | ||
1808 | * The application that wishes to use this socket option simply passes | ||
1809 | * in to this call the sctp_sndrcvinfo structure defined in Section | ||
1810 | * 5.2.2) The input parameters accepted by this call include | ||
1811 | * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, | ||
1812 | * sinfo_timetolive. The user must provide the sinfo_assoc_id field in | ||
1813 | * to this call if the caller is using the UDP model. | ||
1814 | */ | ||
1815 | static int sctp_setsockopt_default_send_param(struct sock *sk, | ||
1816 | char __user *optval, int optlen) | ||
1817 | { | ||
1818 | struct sctp_sndrcvinfo info; | ||
1819 | struct sctp_association *asoc; | ||
1820 | struct sctp_sock *sp = sctp_sk(sk); | ||
1821 | |||
1822 | if (optlen != sizeof(struct sctp_sndrcvinfo)) | ||
1823 | return -EINVAL; | ||
1824 | if (copy_from_user(&info, optval, optlen)) | ||
1825 | return -EFAULT; | ||
1826 | |||
1827 | asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); | ||
1828 | if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) | ||
1829 | return -EINVAL; | ||
1830 | |||
1831 | if (asoc) { | ||
1832 | asoc->default_stream = info.sinfo_stream; | ||
1833 | asoc->default_flags = info.sinfo_flags; | ||
1834 | asoc->default_ppid = info.sinfo_ppid; | ||
1835 | asoc->default_context = info.sinfo_context; | ||
1836 | asoc->default_timetolive = info.sinfo_timetolive; | ||
1837 | } else { | ||
1838 | sp->default_stream = info.sinfo_stream; | ||
1839 | sp->default_flags = info.sinfo_flags; | ||
1840 | sp->default_ppid = info.sinfo_ppid; | ||
1841 | sp->default_context = info.sinfo_context; | ||
1842 | sp->default_timetolive = info.sinfo_timetolive; | ||
1843 | } | ||
1844 | |||
1845 | return 0; | ||
1846 | } | ||
1847 | |||
1848 | /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) | ||
1849 | * | ||
1850 | * Requests that the local SCTP stack use the enclosed peer address as | ||
1851 | * the association primary. The enclosed address must be one of the | ||
1852 | * association peer's addresses. | ||
1853 | */ | ||
1854 | static int sctp_setsockopt_primary_addr(struct sock *sk, char __user *optval, | ||
1855 | int optlen) | ||
1856 | { | ||
1857 | struct sctp_prim prim; | ||
1858 | struct sctp_transport *trans; | ||
1859 | |||
1860 | if (optlen != sizeof(struct sctp_prim)) | ||
1861 | return -EINVAL; | ||
1862 | |||
1863 | if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) | ||
1864 | return -EFAULT; | ||
1865 | |||
1866 | trans = sctp_addr_id2transport(sk, &prim.ssp_addr, prim.ssp_assoc_id); | ||
1867 | if (!trans) | ||
1868 | return -EINVAL; | ||
1869 | |||
1870 | sctp_assoc_set_primary(trans->asoc, trans); | ||
1871 | |||
1872 | return 0; | ||
1873 | } | ||
1874 | |||
1875 | /* | ||
1876 | * 7.1.5 SCTP_NODELAY | ||
1877 | * | ||
1878 | * Turn on/off any Nagle-like algorithm. This means that packets are | ||
1879 | * generally sent as soon as possible and no unnecessary delays are | ||
1880 | * introduced, at the cost of more packets in the network. Expects an | ||
1881 | * integer boolean flag. | ||
1882 | */ | ||
1883 | static int sctp_setsockopt_nodelay(struct sock *sk, char __user *optval, | ||
1884 | int optlen) | ||
1885 | { | ||
1886 | int val; | ||
1887 | |||
1888 | if (optlen < sizeof(int)) | ||
1889 | return -EINVAL; | ||
1890 | if (get_user(val, (int __user *)optval)) | ||
1891 | return -EFAULT; | ||
1892 | |||
1893 | sctp_sk(sk)->nodelay = (val == 0) ? 0 : 1; | ||
1894 | return 0; | ||
1895 | } | ||
1896 | |||
1897 | /* | ||
1898 | * | ||
1899 | * 7.1.1 SCTP_RTOINFO | ||
1900 | * | ||
1901 | * The protocol parameters used to initialize and bound retransmission | ||
1902 | * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access | ||
1903 | * and modify these parameters. | ||
1904 | * All parameters are time values, in milliseconds. A value of 0, when | ||
1905 | * modifying the parameters, indicates that the current value should not | ||
1906 | * be changed. | ||
1907 | * | ||
1908 | */ | ||
1909 | static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, int optlen) { | ||
1910 | struct sctp_rtoinfo rtoinfo; | ||
1911 | struct sctp_association *asoc; | ||
1912 | |||
1913 | if (optlen != sizeof (struct sctp_rtoinfo)) | ||
1914 | return -EINVAL; | ||
1915 | |||
1916 | if (copy_from_user(&rtoinfo, optval, optlen)) | ||
1917 | return -EFAULT; | ||
1918 | |||
1919 | asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); | ||
1920 | |||
1921 | /* Set the values to the specific association */ | ||
1922 | if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) | ||
1923 | return -EINVAL; | ||
1924 | |||
1925 | if (asoc) { | ||
1926 | if (rtoinfo.srto_initial != 0) | ||
1927 | asoc->rto_initial = | ||
1928 | msecs_to_jiffies(rtoinfo.srto_initial); | ||
1929 | if (rtoinfo.srto_max != 0) | ||
1930 | asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max); | ||
1931 | if (rtoinfo.srto_min != 0) | ||
1932 | asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min); | ||
1933 | } else { | ||
1934 | /* If there is no association or the association-id = 0 | ||
1935 | * set the values to the endpoint. | ||
1936 | */ | ||
1937 | struct sctp_sock *sp = sctp_sk(sk); | ||
1938 | |||
1939 | if (rtoinfo.srto_initial != 0) | ||
1940 | sp->rtoinfo.srto_initial = rtoinfo.srto_initial; | ||
1941 | if (rtoinfo.srto_max != 0) | ||
1942 | sp->rtoinfo.srto_max = rtoinfo.srto_max; | ||
1943 | if (rtoinfo.srto_min != 0) | ||
1944 | sp->rtoinfo.srto_min = rtoinfo.srto_min; | ||
1945 | } | ||
1946 | |||
1947 | return 0; | ||
1948 | } | ||
1949 | |||
1950 | /* | ||
1951 | * | ||
1952 | * 7.1.2 SCTP_ASSOCINFO | ||
1953 | * | ||
1954 | * This option is used to tune the the maximum retransmission attempts | ||
1955 | * of the association. | ||
1956 | * Returns an error if the new association retransmission value is | ||
1957 | * greater than the sum of the retransmission value of the peer. | ||
1958 | * See [SCTP] for more information. | ||
1959 | * | ||
1960 | */ | ||
1961 | static int sctp_setsockopt_associnfo(struct sock *sk, char __user *optval, int optlen) | ||
1962 | { | ||
1963 | |||
1964 | struct sctp_assocparams assocparams; | ||
1965 | struct sctp_association *asoc; | ||
1966 | |||
1967 | if (optlen != sizeof(struct sctp_assocparams)) | ||
1968 | return -EINVAL; | ||
1969 | if (copy_from_user(&assocparams, optval, optlen)) | ||
1970 | return -EFAULT; | ||
1971 | |||
1972 | asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); | ||
1973 | |||
1974 | if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) | ||
1975 | return -EINVAL; | ||
1976 | |||
1977 | /* Set the values to the specific association */ | ||
1978 | if (asoc) { | ||
1979 | if (assocparams.sasoc_asocmaxrxt != 0) | ||
1980 | asoc->max_retrans = assocparams.sasoc_asocmaxrxt; | ||
1981 | if (assocparams.sasoc_cookie_life != 0) { | ||
1982 | asoc->cookie_life.tv_sec = | ||
1983 | assocparams.sasoc_cookie_life / 1000; | ||
1984 | asoc->cookie_life.tv_usec = | ||
1985 | (assocparams.sasoc_cookie_life % 1000) | ||
1986 | * 1000; | ||
1987 | } | ||
1988 | } else { | ||
1989 | /* Set the values to the endpoint */ | ||
1990 | struct sctp_sock *sp = sctp_sk(sk); | ||
1991 | |||
1992 | if (assocparams.sasoc_asocmaxrxt != 0) | ||
1993 | sp->assocparams.sasoc_asocmaxrxt = | ||
1994 | assocparams.sasoc_asocmaxrxt; | ||
1995 | if (assocparams.sasoc_cookie_life != 0) | ||
1996 | sp->assocparams.sasoc_cookie_life = | ||
1997 | assocparams.sasoc_cookie_life; | ||
1998 | } | ||
1999 | return 0; | ||
2000 | } | ||
2001 | |||
2002 | /* | ||
2003 | * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) | ||
2004 | * | ||
2005 | * This socket option is a boolean flag which turns on or off mapped V4 | ||
2006 | * addresses. If this option is turned on and the socket is type | ||
2007 | * PF_INET6, then IPv4 addresses will be mapped to V6 representation. | ||
2008 | * If this option is turned off, then no mapping will be done of V4 | ||
2009 | * addresses and a user will receive both PF_INET6 and PF_INET type | ||
2010 | * addresses on the socket. | ||
2011 | */ | ||
2012 | static int sctp_setsockopt_mappedv4(struct sock *sk, char __user *optval, int optlen) | ||
2013 | { | ||
2014 | int val; | ||
2015 | struct sctp_sock *sp = sctp_sk(sk); | ||
2016 | |||
2017 | if (optlen < sizeof(int)) | ||
2018 | return -EINVAL; | ||
2019 | if (get_user(val, (int __user *)optval)) | ||
2020 | return -EFAULT; | ||
2021 | if (val) | ||
2022 | sp->v4mapped = 1; | ||
2023 | else | ||
2024 | sp->v4mapped = 0; | ||
2025 | |||
2026 | return 0; | ||
2027 | } | ||
2028 | |||
2029 | /* | ||
2030 | * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG) | ||
2031 | * | ||
2032 | * This socket option specifies the maximum size to put in any outgoing | ||
2033 | * SCTP chunk. If a message is larger than this size it will be | ||
2034 | * fragmented by SCTP into the specified size. Note that the underlying | ||
2035 | * SCTP implementation may fragment into smaller sized chunks when the | ||
2036 | * PMTU of the underlying association is smaller than the value set by | ||
2037 | * the user. | ||
2038 | */ | ||
2039 | static int sctp_setsockopt_maxseg(struct sock *sk, char __user *optval, int optlen) | ||
2040 | { | ||
2041 | struct sctp_association *asoc; | ||
2042 | struct list_head *pos; | ||
2043 | struct sctp_sock *sp = sctp_sk(sk); | ||
2044 | int val; | ||
2045 | |||
2046 | if (optlen < sizeof(int)) | ||
2047 | return -EINVAL; | ||
2048 | if (get_user(val, (int __user *)optval)) | ||
2049 | return -EFAULT; | ||
2050 | if ((val < 8) || (val > SCTP_MAX_CHUNK_LEN)) | ||
2051 | return -EINVAL; | ||
2052 | sp->user_frag = val; | ||
2053 | |||
2054 | if (val) { | ||
2055 | /* Update the frag_point of the existing associations. */ | ||
2056 | list_for_each(pos, &(sp->ep->asocs)) { | ||
2057 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
2058 | asoc->frag_point = sctp_frag_point(sp, asoc->pmtu); | ||
2059 | } | ||
2060 | } | ||
2061 | |||
2062 | return 0; | ||
2063 | } | ||
2064 | |||
2065 | |||
2066 | /* | ||
2067 | * 7.1.9 Set Peer Primary Address (SCTP_SET_PEER_PRIMARY_ADDR) | ||
2068 | * | ||
2069 | * Requests that the peer mark the enclosed address as the association | ||
2070 | * primary. The enclosed address must be one of the association's | ||
2071 | * locally bound addresses. The following structure is used to make a | ||
2072 | * set primary request: | ||
2073 | */ | ||
2074 | static int sctp_setsockopt_peer_primary_addr(struct sock *sk, char __user *optval, | ||
2075 | int optlen) | ||
2076 | { | ||
2077 | struct sctp_sock *sp; | ||
2078 | struct sctp_endpoint *ep; | ||
2079 | struct sctp_association *asoc = NULL; | ||
2080 | struct sctp_setpeerprim prim; | ||
2081 | struct sctp_chunk *chunk; | ||
2082 | int err; | ||
2083 | |||
2084 | sp = sctp_sk(sk); | ||
2085 | ep = sp->ep; | ||
2086 | |||
2087 | if (!sctp_addip_enable) | ||
2088 | return -EPERM; | ||
2089 | |||
2090 | if (optlen != sizeof(struct sctp_setpeerprim)) | ||
2091 | return -EINVAL; | ||
2092 | |||
2093 | if (copy_from_user(&prim, optval, optlen)) | ||
2094 | return -EFAULT; | ||
2095 | |||
2096 | asoc = sctp_id2assoc(sk, prim.sspp_assoc_id); | ||
2097 | if (!asoc) | ||
2098 | return -EINVAL; | ||
2099 | |||
2100 | if (!asoc->peer.asconf_capable) | ||
2101 | return -EPERM; | ||
2102 | |||
2103 | if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY) | ||
2104 | return -EPERM; | ||
2105 | |||
2106 | if (!sctp_state(asoc, ESTABLISHED)) | ||
2107 | return -ENOTCONN; | ||
2108 | |||
2109 | if (!sctp_assoc_lookup_laddr(asoc, (union sctp_addr *)&prim.sspp_addr)) | ||
2110 | return -EADDRNOTAVAIL; | ||
2111 | |||
2112 | /* Create an ASCONF chunk with SET_PRIMARY parameter */ | ||
2113 | chunk = sctp_make_asconf_set_prim(asoc, | ||
2114 | (union sctp_addr *)&prim.sspp_addr); | ||
2115 | if (!chunk) | ||
2116 | return -ENOMEM; | ||
2117 | |||
2118 | err = sctp_send_asconf(asoc, chunk); | ||
2119 | |||
2120 | SCTP_DEBUG_PRINTK("We set peer primary addr primitively.\n"); | ||
2121 | |||
2122 | return err; | ||
2123 | } | ||
2124 | |||
2125 | static int sctp_setsockopt_adaption_layer(struct sock *sk, char __user *optval, | ||
2126 | int optlen) | ||
2127 | { | ||
2128 | __u32 val; | ||
2129 | |||
2130 | if (optlen < sizeof(__u32)) | ||
2131 | return -EINVAL; | ||
2132 | if (copy_from_user(&val, optval, sizeof(__u32))) | ||
2133 | return -EFAULT; | ||
2134 | |||
2135 | sctp_sk(sk)->adaption_ind = val; | ||
2136 | |||
2137 | return 0; | ||
2138 | } | ||
2139 | |||
2140 | /* API 6.2 setsockopt(), getsockopt() | ||
2141 | * | ||
2142 | * Applications use setsockopt() and getsockopt() to set or retrieve | ||
2143 | * socket options. Socket options are used to change the default | ||
2144 | * behavior of sockets calls. They are described in Section 7. | ||
2145 | * | ||
2146 | * The syntax is: | ||
2147 | * | ||
2148 | * ret = getsockopt(int sd, int level, int optname, void __user *optval, | ||
2149 | * int __user *optlen); | ||
2150 | * ret = setsockopt(int sd, int level, int optname, const void __user *optval, | ||
2151 | * int optlen); | ||
2152 | * | ||
2153 | * sd - the socket descript. | ||
2154 | * level - set to IPPROTO_SCTP for all SCTP options. | ||
2155 | * optname - the option name. | ||
2156 | * optval - the buffer to store the value of the option. | ||
2157 | * optlen - the size of the buffer. | ||
2158 | */ | ||
2159 | SCTP_STATIC int sctp_setsockopt(struct sock *sk, int level, int optname, | ||
2160 | char __user *optval, int optlen) | ||
2161 | { | ||
2162 | int retval = 0; | ||
2163 | |||
2164 | SCTP_DEBUG_PRINTK("sctp_setsockopt(sk: %p... optname: %d)\n", | ||
2165 | sk, optname); | ||
2166 | |||
2167 | /* I can hardly begin to describe how wrong this is. This is | ||
2168 | * so broken as to be worse than useless. The API draft | ||
2169 | * REALLY is NOT helpful here... I am not convinced that the | ||
2170 | * semantics of setsockopt() with a level OTHER THAN SOL_SCTP | ||
2171 | * are at all well-founded. | ||
2172 | */ | ||
2173 | if (level != SOL_SCTP) { | ||
2174 | struct sctp_af *af = sctp_sk(sk)->pf->af; | ||
2175 | retval = af->setsockopt(sk, level, optname, optval, optlen); | ||
2176 | goto out_nounlock; | ||
2177 | } | ||
2178 | |||
2179 | sctp_lock_sock(sk); | ||
2180 | |||
2181 | switch (optname) { | ||
2182 | case SCTP_SOCKOPT_BINDX_ADD: | ||
2183 | /* 'optlen' is the size of the addresses buffer. */ | ||
2184 | retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, | ||
2185 | optlen, SCTP_BINDX_ADD_ADDR); | ||
2186 | break; | ||
2187 | |||
2188 | case SCTP_SOCKOPT_BINDX_REM: | ||
2189 | /* 'optlen' is the size of the addresses buffer. */ | ||
2190 | retval = sctp_setsockopt_bindx(sk, (struct sockaddr __user *)optval, | ||
2191 | optlen, SCTP_BINDX_REM_ADDR); | ||
2192 | break; | ||
2193 | |||
2194 | case SCTP_DISABLE_FRAGMENTS: | ||
2195 | retval = sctp_setsockopt_disable_fragments(sk, optval, optlen); | ||
2196 | break; | ||
2197 | |||
2198 | case SCTP_EVENTS: | ||
2199 | retval = sctp_setsockopt_events(sk, optval, optlen); | ||
2200 | break; | ||
2201 | |||
2202 | case SCTP_AUTOCLOSE: | ||
2203 | retval = sctp_setsockopt_autoclose(sk, optval, optlen); | ||
2204 | break; | ||
2205 | |||
2206 | case SCTP_PEER_ADDR_PARAMS: | ||
2207 | retval = sctp_setsockopt_peer_addr_params(sk, optval, optlen); | ||
2208 | break; | ||
2209 | |||
2210 | case SCTP_INITMSG: | ||
2211 | retval = sctp_setsockopt_initmsg(sk, optval, optlen); | ||
2212 | break; | ||
2213 | case SCTP_DEFAULT_SEND_PARAM: | ||
2214 | retval = sctp_setsockopt_default_send_param(sk, optval, | ||
2215 | optlen); | ||
2216 | break; | ||
2217 | case SCTP_PRIMARY_ADDR: | ||
2218 | retval = sctp_setsockopt_primary_addr(sk, optval, optlen); | ||
2219 | break; | ||
2220 | case SCTP_SET_PEER_PRIMARY_ADDR: | ||
2221 | retval = sctp_setsockopt_peer_primary_addr(sk, optval, optlen); | ||
2222 | break; | ||
2223 | case SCTP_NODELAY: | ||
2224 | retval = sctp_setsockopt_nodelay(sk, optval, optlen); | ||
2225 | break; | ||
2226 | case SCTP_RTOINFO: | ||
2227 | retval = sctp_setsockopt_rtoinfo(sk, optval, optlen); | ||
2228 | break; | ||
2229 | case SCTP_ASSOCINFO: | ||
2230 | retval = sctp_setsockopt_associnfo(sk, optval, optlen); | ||
2231 | break; | ||
2232 | case SCTP_I_WANT_MAPPED_V4_ADDR: | ||
2233 | retval = sctp_setsockopt_mappedv4(sk, optval, optlen); | ||
2234 | break; | ||
2235 | case SCTP_MAXSEG: | ||
2236 | retval = sctp_setsockopt_maxseg(sk, optval, optlen); | ||
2237 | break; | ||
2238 | case SCTP_ADAPTION_LAYER: | ||
2239 | retval = sctp_setsockopt_adaption_layer(sk, optval, optlen); | ||
2240 | break; | ||
2241 | |||
2242 | default: | ||
2243 | retval = -ENOPROTOOPT; | ||
2244 | break; | ||
2245 | }; | ||
2246 | |||
2247 | sctp_release_sock(sk); | ||
2248 | |||
2249 | out_nounlock: | ||
2250 | return retval; | ||
2251 | } | ||
2252 | |||
2253 | /* API 3.1.6 connect() - UDP Style Syntax | ||
2254 | * | ||
2255 | * An application may use the connect() call in the UDP model to initiate an | ||
2256 | * association without sending data. | ||
2257 | * | ||
2258 | * The syntax is: | ||
2259 | * | ||
2260 | * ret = connect(int sd, const struct sockaddr *nam, socklen_t len); | ||
2261 | * | ||
2262 | * sd: the socket descriptor to have a new association added to. | ||
2263 | * | ||
2264 | * nam: the address structure (either struct sockaddr_in or struct | ||
2265 | * sockaddr_in6 defined in RFC2553 [7]). | ||
2266 | * | ||
2267 | * len: the size of the address. | ||
2268 | */ | ||
2269 | SCTP_STATIC int sctp_connect(struct sock *sk, struct sockaddr *uaddr, | ||
2270 | int addr_len) | ||
2271 | { | ||
2272 | struct sctp_sock *sp; | ||
2273 | struct sctp_endpoint *ep; | ||
2274 | struct sctp_association *asoc; | ||
2275 | struct sctp_transport *transport; | ||
2276 | union sctp_addr to; | ||
2277 | struct sctp_af *af; | ||
2278 | sctp_scope_t scope; | ||
2279 | long timeo; | ||
2280 | int err = 0; | ||
2281 | |||
2282 | sctp_lock_sock(sk); | ||
2283 | |||
2284 | SCTP_DEBUG_PRINTK("%s - sk: %p, sockaddr: %p, addr_len: %d)\n", | ||
2285 | __FUNCTION__, sk, uaddr, addr_len); | ||
2286 | |||
2287 | sp = sctp_sk(sk); | ||
2288 | ep = sp->ep; | ||
2289 | |||
2290 | /* connect() cannot be done on a socket that is already in ESTABLISHED | ||
2291 | * state - UDP-style peeled off socket or a TCP-style socket that | ||
2292 | * is already connected. | ||
2293 | * It cannot be done even on a TCP-style listening socket. | ||
2294 | */ | ||
2295 | if (sctp_sstate(sk, ESTABLISHED) || | ||
2296 | (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING))) { | ||
2297 | err = -EISCONN; | ||
2298 | goto out_unlock; | ||
2299 | } | ||
2300 | |||
2301 | err = sctp_verify_addr(sk, (union sctp_addr *)uaddr, addr_len); | ||
2302 | if (err) | ||
2303 | goto out_unlock; | ||
2304 | |||
2305 | if (addr_len > sizeof(to)) | ||
2306 | addr_len = sizeof(to); | ||
2307 | memcpy(&to, uaddr, addr_len); | ||
2308 | to.v4.sin_port = ntohs(to.v4.sin_port); | ||
2309 | |||
2310 | asoc = sctp_endpoint_lookup_assoc(ep, &to, &transport); | ||
2311 | if (asoc) { | ||
2312 | if (asoc->state >= SCTP_STATE_ESTABLISHED) | ||
2313 | err = -EISCONN; | ||
2314 | else | ||
2315 | err = -EALREADY; | ||
2316 | goto out_unlock; | ||
2317 | } | ||
2318 | |||
2319 | /* If we could not find a matching association on the endpoint, | ||
2320 | * make sure that there is no peeled-off association matching the | ||
2321 | * peer address even on another socket. | ||
2322 | */ | ||
2323 | if (sctp_endpoint_is_peeled_off(ep, &to)) { | ||
2324 | err = -EADDRNOTAVAIL; | ||
2325 | goto out_unlock; | ||
2326 | } | ||
2327 | |||
2328 | /* If a bind() or sctp_bindx() is not called prior to a connect() | ||
2329 | * call, the system picks an ephemeral port and will choose an address | ||
2330 | * set equivalent to binding with a wildcard address. | ||
2331 | */ | ||
2332 | if (!ep->base.bind_addr.port) { | ||
2333 | if (sctp_autobind(sk)) { | ||
2334 | err = -EAGAIN; | ||
2335 | goto out_unlock; | ||
2336 | } | ||
2337 | } | ||
2338 | |||
2339 | scope = sctp_scope(&to); | ||
2340 | asoc = sctp_association_new(ep, sk, scope, GFP_KERNEL); | ||
2341 | if (!asoc) { | ||
2342 | err = -ENOMEM; | ||
2343 | goto out_unlock; | ||
2344 | } | ||
2345 | |||
2346 | /* Prime the peer's transport structures. */ | ||
2347 | transport = sctp_assoc_add_peer(asoc, &to, GFP_KERNEL); | ||
2348 | if (!transport) { | ||
2349 | sctp_association_free(asoc); | ||
2350 | goto out_unlock; | ||
2351 | } | ||
2352 | err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL); | ||
2353 | if (err < 0) { | ||
2354 | sctp_association_free(asoc); | ||
2355 | goto out_unlock; | ||
2356 | } | ||
2357 | |||
2358 | err = sctp_primitive_ASSOCIATE(asoc, NULL); | ||
2359 | if (err < 0) { | ||
2360 | sctp_association_free(asoc); | ||
2361 | goto out_unlock; | ||
2362 | } | ||
2363 | |||
2364 | /* Initialize sk's dport and daddr for getpeername() */ | ||
2365 | inet_sk(sk)->dport = htons(asoc->peer.port); | ||
2366 | af = sctp_get_af_specific(to.sa.sa_family); | ||
2367 | af->to_sk_daddr(&to, sk); | ||
2368 | |||
2369 | timeo = sock_sndtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); | ||
2370 | err = sctp_wait_for_connect(asoc, &timeo); | ||
2371 | |||
2372 | out_unlock: | ||
2373 | sctp_release_sock(sk); | ||
2374 | |||
2375 | return err; | ||
2376 | } | ||
2377 | |||
2378 | /* FIXME: Write comments. */ | ||
2379 | SCTP_STATIC int sctp_disconnect(struct sock *sk, int flags) | ||
2380 | { | ||
2381 | return -EOPNOTSUPP; /* STUB */ | ||
2382 | } | ||
2383 | |||
2384 | /* 4.1.4 accept() - TCP Style Syntax | ||
2385 | * | ||
2386 | * Applications use accept() call to remove an established SCTP | ||
2387 | * association from the accept queue of the endpoint. A new socket | ||
2388 | * descriptor will be returned from accept() to represent the newly | ||
2389 | * formed association. | ||
2390 | */ | ||
2391 | SCTP_STATIC struct sock *sctp_accept(struct sock *sk, int flags, int *err) | ||
2392 | { | ||
2393 | struct sctp_sock *sp; | ||
2394 | struct sctp_endpoint *ep; | ||
2395 | struct sock *newsk = NULL; | ||
2396 | struct sctp_association *asoc; | ||
2397 | long timeo; | ||
2398 | int error = 0; | ||
2399 | |||
2400 | sctp_lock_sock(sk); | ||
2401 | |||
2402 | sp = sctp_sk(sk); | ||
2403 | ep = sp->ep; | ||
2404 | |||
2405 | if (!sctp_style(sk, TCP)) { | ||
2406 | error = -EOPNOTSUPP; | ||
2407 | goto out; | ||
2408 | } | ||
2409 | |||
2410 | if (!sctp_sstate(sk, LISTENING)) { | ||
2411 | error = -EINVAL; | ||
2412 | goto out; | ||
2413 | } | ||
2414 | |||
2415 | timeo = sock_rcvtimeo(sk, sk->sk_socket->file->f_flags & O_NONBLOCK); | ||
2416 | |||
2417 | error = sctp_wait_for_accept(sk, timeo); | ||
2418 | if (error) | ||
2419 | goto out; | ||
2420 | |||
2421 | /* We treat the list of associations on the endpoint as the accept | ||
2422 | * queue and pick the first association on the list. | ||
2423 | */ | ||
2424 | asoc = list_entry(ep->asocs.next, struct sctp_association, asocs); | ||
2425 | |||
2426 | newsk = sp->pf->create_accept_sk(sk, asoc); | ||
2427 | if (!newsk) { | ||
2428 | error = -ENOMEM; | ||
2429 | goto out; | ||
2430 | } | ||
2431 | |||
2432 | /* Populate the fields of the newsk from the oldsk and migrate the | ||
2433 | * asoc to the newsk. | ||
2434 | */ | ||
2435 | sctp_sock_migrate(sk, newsk, asoc, SCTP_SOCKET_TCP); | ||
2436 | |||
2437 | out: | ||
2438 | sctp_release_sock(sk); | ||
2439 | *err = error; | ||
2440 | return newsk; | ||
2441 | } | ||
2442 | |||
2443 | /* The SCTP ioctl handler. */ | ||
2444 | SCTP_STATIC int sctp_ioctl(struct sock *sk, int cmd, unsigned long arg) | ||
2445 | { | ||
2446 | return -ENOIOCTLCMD; | ||
2447 | } | ||
2448 | |||
2449 | /* This is the function which gets called during socket creation to | ||
2450 | * initialized the SCTP-specific portion of the sock. | ||
2451 | * The sock structure should already be zero-filled memory. | ||
2452 | */ | ||
2453 | SCTP_STATIC int sctp_init_sock(struct sock *sk) | ||
2454 | { | ||
2455 | struct sctp_endpoint *ep; | ||
2456 | struct sctp_sock *sp; | ||
2457 | |||
2458 | SCTP_DEBUG_PRINTK("sctp_init_sock(sk: %p)\n", sk); | ||
2459 | |||
2460 | sp = sctp_sk(sk); | ||
2461 | |||
2462 | /* Initialize the SCTP per socket area. */ | ||
2463 | switch (sk->sk_type) { | ||
2464 | case SOCK_SEQPACKET: | ||
2465 | sp->type = SCTP_SOCKET_UDP; | ||
2466 | break; | ||
2467 | case SOCK_STREAM: | ||
2468 | sp->type = SCTP_SOCKET_TCP; | ||
2469 | break; | ||
2470 | default: | ||
2471 | return -ESOCKTNOSUPPORT; | ||
2472 | } | ||
2473 | |||
2474 | /* Initialize default send parameters. These parameters can be | ||
2475 | * modified with the SCTP_DEFAULT_SEND_PARAM socket option. | ||
2476 | */ | ||
2477 | sp->default_stream = 0; | ||
2478 | sp->default_ppid = 0; | ||
2479 | sp->default_flags = 0; | ||
2480 | sp->default_context = 0; | ||
2481 | sp->default_timetolive = 0; | ||
2482 | |||
2483 | /* Initialize default setup parameters. These parameters | ||
2484 | * can be modified with the SCTP_INITMSG socket option or | ||
2485 | * overridden by the SCTP_INIT CMSG. | ||
2486 | */ | ||
2487 | sp->initmsg.sinit_num_ostreams = sctp_max_outstreams; | ||
2488 | sp->initmsg.sinit_max_instreams = sctp_max_instreams; | ||
2489 | sp->initmsg.sinit_max_attempts = sctp_max_retrans_init; | ||
2490 | sp->initmsg.sinit_max_init_timeo = jiffies_to_msecs(sctp_rto_max); | ||
2491 | |||
2492 | /* Initialize default RTO related parameters. These parameters can | ||
2493 | * be modified for with the SCTP_RTOINFO socket option. | ||
2494 | */ | ||
2495 | sp->rtoinfo.srto_initial = jiffies_to_msecs(sctp_rto_initial); | ||
2496 | sp->rtoinfo.srto_max = jiffies_to_msecs(sctp_rto_max); | ||
2497 | sp->rtoinfo.srto_min = jiffies_to_msecs(sctp_rto_min); | ||
2498 | |||
2499 | /* Initialize default association related parameters. These parameters | ||
2500 | * can be modified with the SCTP_ASSOCINFO socket option. | ||
2501 | */ | ||
2502 | sp->assocparams.sasoc_asocmaxrxt = sctp_max_retrans_association; | ||
2503 | sp->assocparams.sasoc_number_peer_destinations = 0; | ||
2504 | sp->assocparams.sasoc_peer_rwnd = 0; | ||
2505 | sp->assocparams.sasoc_local_rwnd = 0; | ||
2506 | sp->assocparams.sasoc_cookie_life = | ||
2507 | jiffies_to_msecs(sctp_valid_cookie_life); | ||
2508 | |||
2509 | /* Initialize default event subscriptions. By default, all the | ||
2510 | * options are off. | ||
2511 | */ | ||
2512 | memset(&sp->subscribe, 0, sizeof(struct sctp_event_subscribe)); | ||
2513 | |||
2514 | /* Default Peer Address Parameters. These defaults can | ||
2515 | * be modified via SCTP_PEER_ADDR_PARAMS | ||
2516 | */ | ||
2517 | sp->paddrparam.spp_hbinterval = jiffies_to_msecs(sctp_hb_interval); | ||
2518 | sp->paddrparam.spp_pathmaxrxt = sctp_max_retrans_path; | ||
2519 | |||
2520 | /* If enabled no SCTP message fragmentation will be performed. | ||
2521 | * Configure through SCTP_DISABLE_FRAGMENTS socket option. | ||
2522 | */ | ||
2523 | sp->disable_fragments = 0; | ||
2524 | |||
2525 | /* Turn on/off any Nagle-like algorithm. */ | ||
2526 | sp->nodelay = 1; | ||
2527 | |||
2528 | /* Enable by default. */ | ||
2529 | sp->v4mapped = 1; | ||
2530 | |||
2531 | /* Auto-close idle associations after the configured | ||
2532 | * number of seconds. A value of 0 disables this | ||
2533 | * feature. Configure through the SCTP_AUTOCLOSE socket option, | ||
2534 | * for UDP-style sockets only. | ||
2535 | */ | ||
2536 | sp->autoclose = 0; | ||
2537 | |||
2538 | /* User specified fragmentation limit. */ | ||
2539 | sp->user_frag = 0; | ||
2540 | |||
2541 | sp->adaption_ind = 0; | ||
2542 | |||
2543 | sp->pf = sctp_get_pf_specific(sk->sk_family); | ||
2544 | |||
2545 | /* Control variables for partial data delivery. */ | ||
2546 | sp->pd_mode = 0; | ||
2547 | skb_queue_head_init(&sp->pd_lobby); | ||
2548 | |||
2549 | /* Create a per socket endpoint structure. Even if we | ||
2550 | * change the data structure relationships, this may still | ||
2551 | * be useful for storing pre-connect address information. | ||
2552 | */ | ||
2553 | ep = sctp_endpoint_new(sk, GFP_KERNEL); | ||
2554 | if (!ep) | ||
2555 | return -ENOMEM; | ||
2556 | |||
2557 | sp->ep = ep; | ||
2558 | sp->hmac = NULL; | ||
2559 | |||
2560 | SCTP_DBG_OBJCNT_INC(sock); | ||
2561 | return 0; | ||
2562 | } | ||
2563 | |||
2564 | /* Cleanup any SCTP per socket resources. */ | ||
2565 | SCTP_STATIC int sctp_destroy_sock(struct sock *sk) | ||
2566 | { | ||
2567 | struct sctp_endpoint *ep; | ||
2568 | |||
2569 | SCTP_DEBUG_PRINTK("sctp_destroy_sock(sk: %p)\n", sk); | ||
2570 | |||
2571 | /* Release our hold on the endpoint. */ | ||
2572 | ep = sctp_sk(sk)->ep; | ||
2573 | sctp_endpoint_free(ep); | ||
2574 | |||
2575 | return 0; | ||
2576 | } | ||
2577 | |||
2578 | /* API 4.1.7 shutdown() - TCP Style Syntax | ||
2579 | * int shutdown(int socket, int how); | ||
2580 | * | ||
2581 | * sd - the socket descriptor of the association to be closed. | ||
2582 | * how - Specifies the type of shutdown. The values are | ||
2583 | * as follows: | ||
2584 | * SHUT_RD | ||
2585 | * Disables further receive operations. No SCTP | ||
2586 | * protocol action is taken. | ||
2587 | * SHUT_WR | ||
2588 | * Disables further send operations, and initiates | ||
2589 | * the SCTP shutdown sequence. | ||
2590 | * SHUT_RDWR | ||
2591 | * Disables further send and receive operations | ||
2592 | * and initiates the SCTP shutdown sequence. | ||
2593 | */ | ||
2594 | SCTP_STATIC void sctp_shutdown(struct sock *sk, int how) | ||
2595 | { | ||
2596 | struct sctp_endpoint *ep; | ||
2597 | struct sctp_association *asoc; | ||
2598 | |||
2599 | if (!sctp_style(sk, TCP)) | ||
2600 | return; | ||
2601 | |||
2602 | if (how & SEND_SHUTDOWN) { | ||
2603 | ep = sctp_sk(sk)->ep; | ||
2604 | if (!list_empty(&ep->asocs)) { | ||
2605 | asoc = list_entry(ep->asocs.next, | ||
2606 | struct sctp_association, asocs); | ||
2607 | sctp_primitive_SHUTDOWN(asoc, NULL); | ||
2608 | } | ||
2609 | } | ||
2610 | } | ||
2611 | |||
2612 | /* 7.2.1 Association Status (SCTP_STATUS) | ||
2613 | |||
2614 | * Applications can retrieve current status information about an | ||
2615 | * association, including association state, peer receiver window size, | ||
2616 | * number of unacked data chunks, and number of data chunks pending | ||
2617 | * receipt. This information is read-only. | ||
2618 | */ | ||
2619 | static int sctp_getsockopt_sctp_status(struct sock *sk, int len, | ||
2620 | char __user *optval, | ||
2621 | int __user *optlen) | ||
2622 | { | ||
2623 | struct sctp_status status; | ||
2624 | struct sctp_association *asoc = NULL; | ||
2625 | struct sctp_transport *transport; | ||
2626 | sctp_assoc_t associd; | ||
2627 | int retval = 0; | ||
2628 | |||
2629 | if (len != sizeof(status)) { | ||
2630 | retval = -EINVAL; | ||
2631 | goto out; | ||
2632 | } | ||
2633 | |||
2634 | if (copy_from_user(&status, optval, sizeof(status))) { | ||
2635 | retval = -EFAULT; | ||
2636 | goto out; | ||
2637 | } | ||
2638 | |||
2639 | associd = status.sstat_assoc_id; | ||
2640 | asoc = sctp_id2assoc(sk, associd); | ||
2641 | if (!asoc) { | ||
2642 | retval = -EINVAL; | ||
2643 | goto out; | ||
2644 | } | ||
2645 | |||
2646 | transport = asoc->peer.primary_path; | ||
2647 | |||
2648 | status.sstat_assoc_id = sctp_assoc2id(asoc); | ||
2649 | status.sstat_state = asoc->state; | ||
2650 | status.sstat_rwnd = asoc->peer.rwnd; | ||
2651 | status.sstat_unackdata = asoc->unack_data; | ||
2652 | |||
2653 | status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map); | ||
2654 | status.sstat_instrms = asoc->c.sinit_max_instreams; | ||
2655 | status.sstat_outstrms = asoc->c.sinit_num_ostreams; | ||
2656 | status.sstat_fragmentation_point = asoc->frag_point; | ||
2657 | status.sstat_primary.spinfo_assoc_id = sctp_assoc2id(transport->asoc); | ||
2658 | memcpy(&status.sstat_primary.spinfo_address, | ||
2659 | &(transport->ipaddr), sizeof(union sctp_addr)); | ||
2660 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | ||
2661 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | ||
2662 | (union sctp_addr *)&status.sstat_primary.spinfo_address); | ||
2663 | status.sstat_primary.spinfo_state = transport->active; | ||
2664 | status.sstat_primary.spinfo_cwnd = transport->cwnd; | ||
2665 | status.sstat_primary.spinfo_srtt = transport->srtt; | ||
2666 | status.sstat_primary.spinfo_rto = jiffies_to_msecs(transport->rto); | ||
2667 | status.sstat_primary.spinfo_mtu = transport->pmtu; | ||
2668 | |||
2669 | if (put_user(len, optlen)) { | ||
2670 | retval = -EFAULT; | ||
2671 | goto out; | ||
2672 | } | ||
2673 | |||
2674 | SCTP_DEBUG_PRINTK("sctp_getsockopt_sctp_status(%d): %d %d %d\n", | ||
2675 | len, status.sstat_state, status.sstat_rwnd, | ||
2676 | status.sstat_assoc_id); | ||
2677 | |||
2678 | if (copy_to_user(optval, &status, len)) { | ||
2679 | retval = -EFAULT; | ||
2680 | goto out; | ||
2681 | } | ||
2682 | |||
2683 | out: | ||
2684 | return (retval); | ||
2685 | } | ||
2686 | |||
2687 | |||
2688 | /* 7.2.2 Peer Address Information (SCTP_GET_PEER_ADDR_INFO) | ||
2689 | * | ||
2690 | * Applications can retrieve information about a specific peer address | ||
2691 | * of an association, including its reachability state, congestion | ||
2692 | * window, and retransmission timer values. This information is | ||
2693 | * read-only. | ||
2694 | */ | ||
2695 | static int sctp_getsockopt_peer_addr_info(struct sock *sk, int len, | ||
2696 | char __user *optval, | ||
2697 | int __user *optlen) | ||
2698 | { | ||
2699 | struct sctp_paddrinfo pinfo; | ||
2700 | struct sctp_transport *transport; | ||
2701 | int retval = 0; | ||
2702 | |||
2703 | if (len != sizeof(pinfo)) { | ||
2704 | retval = -EINVAL; | ||
2705 | goto out; | ||
2706 | } | ||
2707 | |||
2708 | if (copy_from_user(&pinfo, optval, sizeof(pinfo))) { | ||
2709 | retval = -EFAULT; | ||
2710 | goto out; | ||
2711 | } | ||
2712 | |||
2713 | transport = sctp_addr_id2transport(sk, &pinfo.spinfo_address, | ||
2714 | pinfo.spinfo_assoc_id); | ||
2715 | if (!transport) | ||
2716 | return -EINVAL; | ||
2717 | |||
2718 | pinfo.spinfo_assoc_id = sctp_assoc2id(transport->asoc); | ||
2719 | pinfo.spinfo_state = transport->active; | ||
2720 | pinfo.spinfo_cwnd = transport->cwnd; | ||
2721 | pinfo.spinfo_srtt = transport->srtt; | ||
2722 | pinfo.spinfo_rto = jiffies_to_msecs(transport->rto); | ||
2723 | pinfo.spinfo_mtu = transport->pmtu; | ||
2724 | |||
2725 | if (put_user(len, optlen)) { | ||
2726 | retval = -EFAULT; | ||
2727 | goto out; | ||
2728 | } | ||
2729 | |||
2730 | if (copy_to_user(optval, &pinfo, len)) { | ||
2731 | retval = -EFAULT; | ||
2732 | goto out; | ||
2733 | } | ||
2734 | |||
2735 | out: | ||
2736 | return (retval); | ||
2737 | } | ||
2738 | |||
2739 | /* 7.1.12 Enable/Disable message fragmentation (SCTP_DISABLE_FRAGMENTS) | ||
2740 | * | ||
2741 | * This option is a on/off flag. If enabled no SCTP message | ||
2742 | * fragmentation will be performed. Instead if a message being sent | ||
2743 | * exceeds the current PMTU size, the message will NOT be sent and | ||
2744 | * instead a error will be indicated to the user. | ||
2745 | */ | ||
2746 | static int sctp_getsockopt_disable_fragments(struct sock *sk, int len, | ||
2747 | char __user *optval, int __user *optlen) | ||
2748 | { | ||
2749 | int val; | ||
2750 | |||
2751 | if (len < sizeof(int)) | ||
2752 | return -EINVAL; | ||
2753 | |||
2754 | len = sizeof(int); | ||
2755 | val = (sctp_sk(sk)->disable_fragments == 1); | ||
2756 | if (put_user(len, optlen)) | ||
2757 | return -EFAULT; | ||
2758 | if (copy_to_user(optval, &val, len)) | ||
2759 | return -EFAULT; | ||
2760 | return 0; | ||
2761 | } | ||
2762 | |||
2763 | /* 7.1.15 Set notification and ancillary events (SCTP_EVENTS) | ||
2764 | * | ||
2765 | * This socket option is used to specify various notifications and | ||
2766 | * ancillary data the user wishes to receive. | ||
2767 | */ | ||
2768 | static int sctp_getsockopt_events(struct sock *sk, int len, char __user *optval, | ||
2769 | int __user *optlen) | ||
2770 | { | ||
2771 | if (len != sizeof(struct sctp_event_subscribe)) | ||
2772 | return -EINVAL; | ||
2773 | if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) | ||
2774 | return -EFAULT; | ||
2775 | return 0; | ||
2776 | } | ||
2777 | |||
2778 | /* 7.1.8 Automatic Close of associations (SCTP_AUTOCLOSE) | ||
2779 | * | ||
2780 | * This socket option is applicable to the UDP-style socket only. When | ||
2781 | * set it will cause associations that are idle for more than the | ||
2782 | * specified number of seconds to automatically close. An association | ||
2783 | * being idle is defined an association that has NOT sent or received | ||
2784 | * user data. The special value of '0' indicates that no automatic | ||
2785 | * close of any associations should be performed. The option expects an | ||
2786 | * integer defining the number of seconds of idle time before an | ||
2787 | * association is closed. | ||
2788 | */ | ||
2789 | static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optval, int __user *optlen) | ||
2790 | { | ||
2791 | /* Applicable to UDP-style socket only */ | ||
2792 | if (sctp_style(sk, TCP)) | ||
2793 | return -EOPNOTSUPP; | ||
2794 | if (len != sizeof(int)) | ||
2795 | return -EINVAL; | ||
2796 | if (copy_to_user(optval, &sctp_sk(sk)->autoclose, len)) | ||
2797 | return -EFAULT; | ||
2798 | return 0; | ||
2799 | } | ||
2800 | |||
2801 | /* Helper routine to branch off an association to a new socket. */ | ||
2802 | SCTP_STATIC int sctp_do_peeloff(struct sctp_association *asoc, | ||
2803 | struct socket **sockp) | ||
2804 | { | ||
2805 | struct sock *sk = asoc->base.sk; | ||
2806 | struct socket *sock; | ||
2807 | int err = 0; | ||
2808 | |||
2809 | /* An association cannot be branched off from an already peeled-off | ||
2810 | * socket, nor is this supported for tcp style sockets. | ||
2811 | */ | ||
2812 | if (!sctp_style(sk, UDP)) | ||
2813 | return -EINVAL; | ||
2814 | |||
2815 | /* Create a new socket. */ | ||
2816 | err = sock_create(sk->sk_family, SOCK_SEQPACKET, IPPROTO_SCTP, &sock); | ||
2817 | if (err < 0) | ||
2818 | return err; | ||
2819 | |||
2820 | /* Populate the fields of the newsk from the oldsk and migrate the | ||
2821 | * asoc to the newsk. | ||
2822 | */ | ||
2823 | sctp_sock_migrate(sk, sock->sk, asoc, SCTP_SOCKET_UDP_HIGH_BANDWIDTH); | ||
2824 | *sockp = sock; | ||
2825 | |||
2826 | return err; | ||
2827 | } | ||
2828 | |||
2829 | static int sctp_getsockopt_peeloff(struct sock *sk, int len, char __user *optval, int __user *optlen) | ||
2830 | { | ||
2831 | sctp_peeloff_arg_t peeloff; | ||
2832 | struct socket *newsock; | ||
2833 | int retval = 0; | ||
2834 | struct sctp_association *asoc; | ||
2835 | |||
2836 | if (len != sizeof(sctp_peeloff_arg_t)) | ||
2837 | return -EINVAL; | ||
2838 | if (copy_from_user(&peeloff, optval, len)) | ||
2839 | return -EFAULT; | ||
2840 | |||
2841 | asoc = sctp_id2assoc(sk, peeloff.associd); | ||
2842 | if (!asoc) { | ||
2843 | retval = -EINVAL; | ||
2844 | goto out; | ||
2845 | } | ||
2846 | |||
2847 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p\n", __FUNCTION__, sk, asoc); | ||
2848 | |||
2849 | retval = sctp_do_peeloff(asoc, &newsock); | ||
2850 | if (retval < 0) | ||
2851 | goto out; | ||
2852 | |||
2853 | /* Map the socket to an unused fd that can be returned to the user. */ | ||
2854 | retval = sock_map_fd(newsock); | ||
2855 | if (retval < 0) { | ||
2856 | sock_release(newsock); | ||
2857 | goto out; | ||
2858 | } | ||
2859 | |||
2860 | SCTP_DEBUG_PRINTK("%s: sk: %p asoc: %p newsk: %p sd: %d\n", | ||
2861 | __FUNCTION__, sk, asoc, newsock->sk, retval); | ||
2862 | |||
2863 | /* Return the fd mapped to the new socket. */ | ||
2864 | peeloff.sd = retval; | ||
2865 | if (copy_to_user(optval, &peeloff, len)) | ||
2866 | retval = -EFAULT; | ||
2867 | |||
2868 | out: | ||
2869 | return retval; | ||
2870 | } | ||
2871 | |||
2872 | /* 7.1.13 Peer Address Parameters (SCTP_PEER_ADDR_PARAMS) | ||
2873 | * | ||
2874 | * Applications can enable or disable heartbeats for any peer address of | ||
2875 | * an association, modify an address's heartbeat interval, force a | ||
2876 | * heartbeat to be sent immediately, and adjust the address's maximum | ||
2877 | * number of retransmissions sent before an address is considered | ||
2878 | * unreachable. The following structure is used to access and modify an | ||
2879 | * address's parameters: | ||
2880 | * | ||
2881 | * struct sctp_paddrparams { | ||
2882 | * sctp_assoc_t spp_assoc_id; | ||
2883 | * struct sockaddr_storage spp_address; | ||
2884 | * uint32_t spp_hbinterval; | ||
2885 | * uint16_t spp_pathmaxrxt; | ||
2886 | * }; | ||
2887 | * | ||
2888 | * spp_assoc_id - (UDP style socket) This is filled in the application, | ||
2889 | * and identifies the association for this query. | ||
2890 | * spp_address - This specifies which address is of interest. | ||
2891 | * spp_hbinterval - This contains the value of the heartbeat interval, | ||
2892 | * in milliseconds. A value of 0, when modifying the | ||
2893 | * parameter, specifies that the heartbeat on this | ||
2894 | * address should be disabled. A value of UINT32_MAX | ||
2895 | * (4294967295), when modifying the parameter, | ||
2896 | * specifies that a heartbeat should be sent | ||
2897 | * immediately to the peer address, and the current | ||
2898 | * interval should remain unchanged. | ||
2899 | * spp_pathmaxrxt - This contains the maximum number of | ||
2900 | * retransmissions before this address shall be | ||
2901 | * considered unreachable. | ||
2902 | */ | ||
2903 | static int sctp_getsockopt_peer_addr_params(struct sock *sk, int len, | ||
2904 | char __user *optval, int __user *optlen) | ||
2905 | { | ||
2906 | struct sctp_paddrparams params; | ||
2907 | struct sctp_transport *trans; | ||
2908 | |||
2909 | if (len != sizeof(struct sctp_paddrparams)) | ||
2910 | return -EINVAL; | ||
2911 | if (copy_from_user(¶ms, optval, len)) | ||
2912 | return -EFAULT; | ||
2913 | |||
2914 | /* If no association id is specified retrieve the default value | ||
2915 | * for the endpoint that will be used for all future associations | ||
2916 | */ | ||
2917 | if (!params.spp_assoc_id && | ||
2918 | sctp_is_any(( union sctp_addr *)¶ms.spp_address)) { | ||
2919 | params.spp_hbinterval = sctp_sk(sk)->paddrparam.spp_hbinterval; | ||
2920 | params.spp_pathmaxrxt = sctp_sk(sk)->paddrparam.spp_pathmaxrxt; | ||
2921 | |||
2922 | goto done; | ||
2923 | } | ||
2924 | |||
2925 | trans = sctp_addr_id2transport(sk, ¶ms.spp_address, | ||
2926 | params.spp_assoc_id); | ||
2927 | if (!trans) | ||
2928 | return -EINVAL; | ||
2929 | |||
2930 | /* The value of the heartbeat interval, in milliseconds. A value of 0, | ||
2931 | * when modifying the parameter, specifies that the heartbeat on this | ||
2932 | * address should be disabled. | ||
2933 | */ | ||
2934 | if (!trans->hb_allowed) | ||
2935 | params.spp_hbinterval = 0; | ||
2936 | else | ||
2937 | params.spp_hbinterval = jiffies_to_msecs(trans->hb_interval); | ||
2938 | |||
2939 | /* spp_pathmaxrxt contains the maximum number of retransmissions | ||
2940 | * before this address shall be considered unreachable. | ||
2941 | */ | ||
2942 | params.spp_pathmaxrxt = trans->max_retrans; | ||
2943 | |||
2944 | done: | ||
2945 | if (copy_to_user(optval, ¶ms, len)) | ||
2946 | return -EFAULT; | ||
2947 | |||
2948 | if (put_user(len, optlen)) | ||
2949 | return -EFAULT; | ||
2950 | |||
2951 | return 0; | ||
2952 | } | ||
2953 | |||
2954 | /* 7.1.3 Initialization Parameters (SCTP_INITMSG) | ||
2955 | * | ||
2956 | * Applications can specify protocol parameters for the default association | ||
2957 | * initialization. The option name argument to setsockopt() and getsockopt() | ||
2958 | * is SCTP_INITMSG. | ||
2959 | * | ||
2960 | * Setting initialization parameters is effective only on an unconnected | ||
2961 | * socket (for UDP-style sockets only future associations are effected | ||
2962 | * by the change). With TCP-style sockets, this option is inherited by | ||
2963 | * sockets derived from a listener socket. | ||
2964 | */ | ||
2965 | static int sctp_getsockopt_initmsg(struct sock *sk, int len, char __user *optval, int __user *optlen) | ||
2966 | { | ||
2967 | if (len != sizeof(struct sctp_initmsg)) | ||
2968 | return -EINVAL; | ||
2969 | if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) | ||
2970 | return -EFAULT; | ||
2971 | return 0; | ||
2972 | } | ||
2973 | |||
2974 | static int sctp_getsockopt_peer_addrs_num(struct sock *sk, int len, | ||
2975 | char __user *optval, int __user *optlen) | ||
2976 | { | ||
2977 | sctp_assoc_t id; | ||
2978 | struct sctp_association *asoc; | ||
2979 | struct list_head *pos; | ||
2980 | int cnt = 0; | ||
2981 | |||
2982 | if (len != sizeof(sctp_assoc_t)) | ||
2983 | return -EINVAL; | ||
2984 | |||
2985 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) | ||
2986 | return -EFAULT; | ||
2987 | |||
2988 | /* For UDP-style sockets, id specifies the association to query. */ | ||
2989 | asoc = sctp_id2assoc(sk, id); | ||
2990 | if (!asoc) | ||
2991 | return -EINVAL; | ||
2992 | |||
2993 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
2994 | cnt ++; | ||
2995 | } | ||
2996 | |||
2997 | return cnt; | ||
2998 | } | ||
2999 | |||
3000 | static int sctp_getsockopt_peer_addrs(struct sock *sk, int len, | ||
3001 | char __user *optval, int __user *optlen) | ||
3002 | { | ||
3003 | struct sctp_association *asoc; | ||
3004 | struct list_head *pos; | ||
3005 | int cnt = 0; | ||
3006 | struct sctp_getaddrs getaddrs; | ||
3007 | struct sctp_transport *from; | ||
3008 | void __user *to; | ||
3009 | union sctp_addr temp; | ||
3010 | struct sctp_sock *sp = sctp_sk(sk); | ||
3011 | int addrlen; | ||
3012 | |||
3013 | if (len != sizeof(struct sctp_getaddrs)) | ||
3014 | return -EINVAL; | ||
3015 | |||
3016 | if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) | ||
3017 | return -EFAULT; | ||
3018 | |||
3019 | if (getaddrs.addr_num <= 0) return -EINVAL; | ||
3020 | |||
3021 | /* For UDP-style sockets, id specifies the association to query. */ | ||
3022 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); | ||
3023 | if (!asoc) | ||
3024 | return -EINVAL; | ||
3025 | |||
3026 | to = (void __user *)getaddrs.addrs; | ||
3027 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
3028 | from = list_entry(pos, struct sctp_transport, transports); | ||
3029 | memcpy(&temp, &from->ipaddr, sizeof(temp)); | ||
3030 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | ||
3031 | addrlen = sctp_get_af_specific(sk->sk_family)->sockaddr_len; | ||
3032 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
3033 | if (copy_to_user(to, &temp, addrlen)) | ||
3034 | return -EFAULT; | ||
3035 | to += addrlen ; | ||
3036 | cnt ++; | ||
3037 | if (cnt >= getaddrs.addr_num) break; | ||
3038 | } | ||
3039 | getaddrs.addr_num = cnt; | ||
3040 | if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) | ||
3041 | return -EFAULT; | ||
3042 | |||
3043 | return 0; | ||
3044 | } | ||
3045 | |||
3046 | static int sctp_getsockopt_local_addrs_num(struct sock *sk, int len, | ||
3047 | char __user *optval, | ||
3048 | int __user *optlen) | ||
3049 | { | ||
3050 | sctp_assoc_t id; | ||
3051 | struct sctp_bind_addr *bp; | ||
3052 | struct sctp_association *asoc; | ||
3053 | struct list_head *pos; | ||
3054 | struct sctp_sockaddr_entry *addr; | ||
3055 | rwlock_t *addr_lock; | ||
3056 | unsigned long flags; | ||
3057 | int cnt = 0; | ||
3058 | |||
3059 | if (len != sizeof(sctp_assoc_t)) | ||
3060 | return -EINVAL; | ||
3061 | |||
3062 | if (copy_from_user(&id, optval, sizeof(sctp_assoc_t))) | ||
3063 | return -EFAULT; | ||
3064 | |||
3065 | /* | ||
3066 | * For UDP-style sockets, id specifies the association to query. | ||
3067 | * If the id field is set to the value '0' then the locally bound | ||
3068 | * addresses are returned without regard to any particular | ||
3069 | * association. | ||
3070 | */ | ||
3071 | if (0 == id) { | ||
3072 | bp = &sctp_sk(sk)->ep->base.bind_addr; | ||
3073 | addr_lock = &sctp_sk(sk)->ep->base.addr_lock; | ||
3074 | } else { | ||
3075 | asoc = sctp_id2assoc(sk, id); | ||
3076 | if (!asoc) | ||
3077 | return -EINVAL; | ||
3078 | bp = &asoc->base.bind_addr; | ||
3079 | addr_lock = &asoc->base.addr_lock; | ||
3080 | } | ||
3081 | |||
3082 | sctp_read_lock(addr_lock); | ||
3083 | |||
3084 | /* If the endpoint is bound to 0.0.0.0 or ::0, count the valid | ||
3085 | * addresses from the global local address list. | ||
3086 | */ | ||
3087 | if (sctp_list_single_entry(&bp->address_list)) { | ||
3088 | addr = list_entry(bp->address_list.next, | ||
3089 | struct sctp_sockaddr_entry, list); | ||
3090 | if (sctp_is_any(&addr->a)) { | ||
3091 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
3092 | list_for_each(pos, &sctp_local_addr_list) { | ||
3093 | addr = list_entry(pos, | ||
3094 | struct sctp_sockaddr_entry, | ||
3095 | list); | ||
3096 | if ((PF_INET == sk->sk_family) && | ||
3097 | (AF_INET6 == addr->a.sa.sa_family)) | ||
3098 | continue; | ||
3099 | cnt++; | ||
3100 | } | ||
3101 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | ||
3102 | flags); | ||
3103 | } else { | ||
3104 | cnt = 1; | ||
3105 | } | ||
3106 | goto done; | ||
3107 | } | ||
3108 | |||
3109 | list_for_each(pos, &bp->address_list) { | ||
3110 | cnt ++; | ||
3111 | } | ||
3112 | |||
3113 | done: | ||
3114 | sctp_read_unlock(addr_lock); | ||
3115 | return cnt; | ||
3116 | } | ||
3117 | |||
3118 | /* Helper function that copies local addresses to user and returns the number | ||
3119 | * of addresses copied. | ||
3120 | */ | ||
3121 | static int sctp_copy_laddrs_to_user(struct sock *sk, __u16 port, int max_addrs, | ||
3122 | void __user *to) | ||
3123 | { | ||
3124 | struct list_head *pos; | ||
3125 | struct sctp_sockaddr_entry *addr; | ||
3126 | unsigned long flags; | ||
3127 | union sctp_addr temp; | ||
3128 | int cnt = 0; | ||
3129 | int addrlen; | ||
3130 | |||
3131 | sctp_spin_lock_irqsave(&sctp_local_addr_lock, flags); | ||
3132 | list_for_each(pos, &sctp_local_addr_list) { | ||
3133 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
3134 | if ((PF_INET == sk->sk_family) && | ||
3135 | (AF_INET6 == addr->a.sa.sa_family)) | ||
3136 | continue; | ||
3137 | memcpy(&temp, &addr->a, sizeof(temp)); | ||
3138 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk), | ||
3139 | &temp); | ||
3140 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | ||
3141 | temp.v4.sin_port = htons(port); | ||
3142 | if (copy_to_user(to, &temp, addrlen)) { | ||
3143 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, | ||
3144 | flags); | ||
3145 | return -EFAULT; | ||
3146 | } | ||
3147 | to += addrlen; | ||
3148 | cnt ++; | ||
3149 | if (cnt >= max_addrs) break; | ||
3150 | } | ||
3151 | sctp_spin_unlock_irqrestore(&sctp_local_addr_lock, flags); | ||
3152 | |||
3153 | return cnt; | ||
3154 | } | ||
3155 | |||
3156 | static int sctp_getsockopt_local_addrs(struct sock *sk, int len, | ||
3157 | char __user *optval, int __user *optlen) | ||
3158 | { | ||
3159 | struct sctp_bind_addr *bp; | ||
3160 | struct sctp_association *asoc; | ||
3161 | struct list_head *pos; | ||
3162 | int cnt = 0; | ||
3163 | struct sctp_getaddrs getaddrs; | ||
3164 | struct sctp_sockaddr_entry *addr; | ||
3165 | void __user *to; | ||
3166 | union sctp_addr temp; | ||
3167 | struct sctp_sock *sp = sctp_sk(sk); | ||
3168 | int addrlen; | ||
3169 | rwlock_t *addr_lock; | ||
3170 | int err = 0; | ||
3171 | |||
3172 | if (len != sizeof(struct sctp_getaddrs)) | ||
3173 | return -EINVAL; | ||
3174 | |||
3175 | if (copy_from_user(&getaddrs, optval, sizeof(struct sctp_getaddrs))) | ||
3176 | return -EFAULT; | ||
3177 | |||
3178 | if (getaddrs.addr_num <= 0) return -EINVAL; | ||
3179 | /* | ||
3180 | * For UDP-style sockets, id specifies the association to query. | ||
3181 | * If the id field is set to the value '0' then the locally bound | ||
3182 | * addresses are returned without regard to any particular | ||
3183 | * association. | ||
3184 | */ | ||
3185 | if (0 == getaddrs.assoc_id) { | ||
3186 | bp = &sctp_sk(sk)->ep->base.bind_addr; | ||
3187 | addr_lock = &sctp_sk(sk)->ep->base.addr_lock; | ||
3188 | } else { | ||
3189 | asoc = sctp_id2assoc(sk, getaddrs.assoc_id); | ||
3190 | if (!asoc) | ||
3191 | return -EINVAL; | ||
3192 | bp = &asoc->base.bind_addr; | ||
3193 | addr_lock = &asoc->base.addr_lock; | ||
3194 | } | ||
3195 | |||
3196 | to = getaddrs.addrs; | ||
3197 | |||
3198 | sctp_read_lock(addr_lock); | ||
3199 | |||
3200 | /* If the endpoint is bound to 0.0.0.0 or ::0, get the valid | ||
3201 | * addresses from the global local address list. | ||
3202 | */ | ||
3203 | if (sctp_list_single_entry(&bp->address_list)) { | ||
3204 | addr = list_entry(bp->address_list.next, | ||
3205 | struct sctp_sockaddr_entry, list); | ||
3206 | if (sctp_is_any(&addr->a)) { | ||
3207 | cnt = sctp_copy_laddrs_to_user(sk, bp->port, | ||
3208 | getaddrs.addr_num, to); | ||
3209 | if (cnt < 0) { | ||
3210 | err = cnt; | ||
3211 | goto unlock; | ||
3212 | } | ||
3213 | goto copy_getaddrs; | ||
3214 | } | ||
3215 | } | ||
3216 | |||
3217 | list_for_each(pos, &bp->address_list) { | ||
3218 | addr = list_entry(pos, struct sctp_sockaddr_entry, list); | ||
3219 | memcpy(&temp, &addr->a, sizeof(temp)); | ||
3220 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp); | ||
3221 | addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; | ||
3222 | temp.v4.sin_port = htons(temp.v4.sin_port); | ||
3223 | if (copy_to_user(to, &temp, addrlen)) { | ||
3224 | err = -EFAULT; | ||
3225 | goto unlock; | ||
3226 | } | ||
3227 | to += addrlen; | ||
3228 | cnt ++; | ||
3229 | if (cnt >= getaddrs.addr_num) break; | ||
3230 | } | ||
3231 | |||
3232 | copy_getaddrs: | ||
3233 | getaddrs.addr_num = cnt; | ||
3234 | if (copy_to_user(optval, &getaddrs, sizeof(struct sctp_getaddrs))) | ||
3235 | err = -EFAULT; | ||
3236 | |||
3237 | unlock: | ||
3238 | sctp_read_unlock(addr_lock); | ||
3239 | return err; | ||
3240 | } | ||
3241 | |||
3242 | /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR) | ||
3243 | * | ||
3244 | * Requests that the local SCTP stack use the enclosed peer address as | ||
3245 | * the association primary. The enclosed address must be one of the | ||
3246 | * association peer's addresses. | ||
3247 | */ | ||
3248 | static int sctp_getsockopt_primary_addr(struct sock *sk, int len, | ||
3249 | char __user *optval, int __user *optlen) | ||
3250 | { | ||
3251 | struct sctp_prim prim; | ||
3252 | struct sctp_association *asoc; | ||
3253 | struct sctp_sock *sp = sctp_sk(sk); | ||
3254 | |||
3255 | if (len != sizeof(struct sctp_prim)) | ||
3256 | return -EINVAL; | ||
3257 | |||
3258 | if (copy_from_user(&prim, optval, sizeof(struct sctp_prim))) | ||
3259 | return -EFAULT; | ||
3260 | |||
3261 | asoc = sctp_id2assoc(sk, prim.ssp_assoc_id); | ||
3262 | if (!asoc) | ||
3263 | return -EINVAL; | ||
3264 | |||
3265 | if (!asoc->peer.primary_path) | ||
3266 | return -ENOTCONN; | ||
3267 | |||
3268 | asoc->peer.primary_path->ipaddr.v4.sin_port = | ||
3269 | htons(asoc->peer.primary_path->ipaddr.v4.sin_port); | ||
3270 | memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr, | ||
3271 | sizeof(union sctp_addr)); | ||
3272 | asoc->peer.primary_path->ipaddr.v4.sin_port = | ||
3273 | ntohs(asoc->peer.primary_path->ipaddr.v4.sin_port); | ||
3274 | |||
3275 | sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, | ||
3276 | (union sctp_addr *)&prim.ssp_addr); | ||
3277 | |||
3278 | if (copy_to_user(optval, &prim, sizeof(struct sctp_prim))) | ||
3279 | return -EFAULT; | ||
3280 | |||
3281 | return 0; | ||
3282 | } | ||
3283 | |||
3284 | /* | ||
3285 | * 7.1.11 Set Adaption Layer Indicator (SCTP_ADAPTION_LAYER) | ||
3286 | * | ||
3287 | * Requests that the local endpoint set the specified Adaption Layer | ||
3288 | * Indication parameter for all future INIT and INIT-ACK exchanges. | ||
3289 | */ | ||
3290 | static int sctp_getsockopt_adaption_layer(struct sock *sk, int len, | ||
3291 | char __user *optval, int __user *optlen) | ||
3292 | { | ||
3293 | __u32 val; | ||
3294 | |||
3295 | if (len < sizeof(__u32)) | ||
3296 | return -EINVAL; | ||
3297 | |||
3298 | len = sizeof(__u32); | ||
3299 | val = sctp_sk(sk)->adaption_ind; | ||
3300 | if (put_user(len, optlen)) | ||
3301 | return -EFAULT; | ||
3302 | if (copy_to_user(optval, &val, len)) | ||
3303 | return -EFAULT; | ||
3304 | return 0; | ||
3305 | } | ||
3306 | |||
3307 | /* | ||
3308 | * | ||
3309 | * 7.1.14 Set default send parameters (SCTP_DEFAULT_SEND_PARAM) | ||
3310 | * | ||
3311 | * Applications that wish to use the sendto() system call may wish to | ||
3312 | * specify a default set of parameters that would normally be supplied | ||
3313 | * through the inclusion of ancillary data. This socket option allows | ||
3314 | * such an application to set the default sctp_sndrcvinfo structure. | ||
3315 | |||
3316 | |||
3317 | * The application that wishes to use this socket option simply passes | ||
3318 | * in to this call the sctp_sndrcvinfo structure defined in Section | ||
3319 | * 5.2.2) The input parameters accepted by this call include | ||
3320 | * sinfo_stream, sinfo_flags, sinfo_ppid, sinfo_context, | ||
3321 | * sinfo_timetolive. The user must provide the sinfo_assoc_id field in | ||
3322 | * to this call if the caller is using the UDP model. | ||
3323 | * | ||
3324 | * For getsockopt, it get the default sctp_sndrcvinfo structure. | ||
3325 | */ | ||
3326 | static int sctp_getsockopt_default_send_param(struct sock *sk, | ||
3327 | int len, char __user *optval, | ||
3328 | int __user *optlen) | ||
3329 | { | ||
3330 | struct sctp_sndrcvinfo info; | ||
3331 | struct sctp_association *asoc; | ||
3332 | struct sctp_sock *sp = sctp_sk(sk); | ||
3333 | |||
3334 | if (len != sizeof(struct sctp_sndrcvinfo)) | ||
3335 | return -EINVAL; | ||
3336 | if (copy_from_user(&info, optval, sizeof(struct sctp_sndrcvinfo))) | ||
3337 | return -EFAULT; | ||
3338 | |||
3339 | asoc = sctp_id2assoc(sk, info.sinfo_assoc_id); | ||
3340 | if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP)) | ||
3341 | return -EINVAL; | ||
3342 | |||
3343 | if (asoc) { | ||
3344 | info.sinfo_stream = asoc->default_stream; | ||
3345 | info.sinfo_flags = asoc->default_flags; | ||
3346 | info.sinfo_ppid = asoc->default_ppid; | ||
3347 | info.sinfo_context = asoc->default_context; | ||
3348 | info.sinfo_timetolive = asoc->default_timetolive; | ||
3349 | } else { | ||
3350 | info.sinfo_stream = sp->default_stream; | ||
3351 | info.sinfo_flags = sp->default_flags; | ||
3352 | info.sinfo_ppid = sp->default_ppid; | ||
3353 | info.sinfo_context = sp->default_context; | ||
3354 | info.sinfo_timetolive = sp->default_timetolive; | ||
3355 | } | ||
3356 | |||
3357 | if (copy_to_user(optval, &info, sizeof(struct sctp_sndrcvinfo))) | ||
3358 | return -EFAULT; | ||
3359 | |||
3360 | return 0; | ||
3361 | } | ||
3362 | |||
3363 | /* | ||
3364 | * | ||
3365 | * 7.1.5 SCTP_NODELAY | ||
3366 | * | ||
3367 | * Turn on/off any Nagle-like algorithm. This means that packets are | ||
3368 | * generally sent as soon as possible and no unnecessary delays are | ||
3369 | * introduced, at the cost of more packets in the network. Expects an | ||
3370 | * integer boolean flag. | ||
3371 | */ | ||
3372 | |||
3373 | static int sctp_getsockopt_nodelay(struct sock *sk, int len, | ||
3374 | char __user *optval, int __user *optlen) | ||
3375 | { | ||
3376 | int val; | ||
3377 | |||
3378 | if (len < sizeof(int)) | ||
3379 | return -EINVAL; | ||
3380 | |||
3381 | len = sizeof(int); | ||
3382 | val = (sctp_sk(sk)->nodelay == 1); | ||
3383 | if (put_user(len, optlen)) | ||
3384 | return -EFAULT; | ||
3385 | if (copy_to_user(optval, &val, len)) | ||
3386 | return -EFAULT; | ||
3387 | return 0; | ||
3388 | } | ||
3389 | |||
3390 | /* | ||
3391 | * | ||
3392 | * 7.1.1 SCTP_RTOINFO | ||
3393 | * | ||
3394 | * The protocol parameters used to initialize and bound retransmission | ||
3395 | * timeout (RTO) are tunable. sctp_rtoinfo structure is used to access | ||
3396 | * and modify these parameters. | ||
3397 | * All parameters are time values, in milliseconds. A value of 0, when | ||
3398 | * modifying the parameters, indicates that the current value should not | ||
3399 | * be changed. | ||
3400 | * | ||
3401 | */ | ||
3402 | static int sctp_getsockopt_rtoinfo(struct sock *sk, int len, | ||
3403 | char __user *optval, | ||
3404 | int __user *optlen) { | ||
3405 | struct sctp_rtoinfo rtoinfo; | ||
3406 | struct sctp_association *asoc; | ||
3407 | |||
3408 | if (len != sizeof (struct sctp_rtoinfo)) | ||
3409 | return -EINVAL; | ||
3410 | |||
3411 | if (copy_from_user(&rtoinfo, optval, sizeof (struct sctp_rtoinfo))) | ||
3412 | return -EFAULT; | ||
3413 | |||
3414 | asoc = sctp_id2assoc(sk, rtoinfo.srto_assoc_id); | ||
3415 | |||
3416 | if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP)) | ||
3417 | return -EINVAL; | ||
3418 | |||
3419 | /* Values corresponding to the specific association. */ | ||
3420 | if (asoc) { | ||
3421 | rtoinfo.srto_initial = jiffies_to_msecs(asoc->rto_initial); | ||
3422 | rtoinfo.srto_max = jiffies_to_msecs(asoc->rto_max); | ||
3423 | rtoinfo.srto_min = jiffies_to_msecs(asoc->rto_min); | ||
3424 | } else { | ||
3425 | /* Values corresponding to the endpoint. */ | ||
3426 | struct sctp_sock *sp = sctp_sk(sk); | ||
3427 | |||
3428 | rtoinfo.srto_initial = sp->rtoinfo.srto_initial; | ||
3429 | rtoinfo.srto_max = sp->rtoinfo.srto_max; | ||
3430 | rtoinfo.srto_min = sp->rtoinfo.srto_min; | ||
3431 | } | ||
3432 | |||
3433 | if (put_user(len, optlen)) | ||
3434 | return -EFAULT; | ||
3435 | |||
3436 | if (copy_to_user(optval, &rtoinfo, len)) | ||
3437 | return -EFAULT; | ||
3438 | |||
3439 | return 0; | ||
3440 | } | ||
3441 | |||
3442 | /* | ||
3443 | * | ||
3444 | * 7.1.2 SCTP_ASSOCINFO | ||
3445 | * | ||
3446 | * This option is used to tune the the maximum retransmission attempts | ||
3447 | * of the association. | ||
3448 | * Returns an error if the new association retransmission value is | ||
3449 | * greater than the sum of the retransmission value of the peer. | ||
3450 | * See [SCTP] for more information. | ||
3451 | * | ||
3452 | */ | ||
3453 | static int sctp_getsockopt_associnfo(struct sock *sk, int len, | ||
3454 | char __user *optval, | ||
3455 | int __user *optlen) | ||
3456 | { | ||
3457 | |||
3458 | struct sctp_assocparams assocparams; | ||
3459 | struct sctp_association *asoc; | ||
3460 | struct list_head *pos; | ||
3461 | int cnt = 0; | ||
3462 | |||
3463 | if (len != sizeof (struct sctp_assocparams)) | ||
3464 | return -EINVAL; | ||
3465 | |||
3466 | if (copy_from_user(&assocparams, optval, | ||
3467 | sizeof (struct sctp_assocparams))) | ||
3468 | return -EFAULT; | ||
3469 | |||
3470 | asoc = sctp_id2assoc(sk, assocparams.sasoc_assoc_id); | ||
3471 | |||
3472 | if (!asoc && assocparams.sasoc_assoc_id && sctp_style(sk, UDP)) | ||
3473 | return -EINVAL; | ||
3474 | |||
3475 | /* Values correspoinding to the specific association */ | ||
3476 | if (assocparams.sasoc_assoc_id != 0) { | ||
3477 | assocparams.sasoc_asocmaxrxt = asoc->max_retrans; | ||
3478 | assocparams.sasoc_peer_rwnd = asoc->peer.rwnd; | ||
3479 | assocparams.sasoc_local_rwnd = asoc->a_rwnd; | ||
3480 | assocparams.sasoc_cookie_life = (asoc->cookie_life.tv_sec | ||
3481 | * 1000) + | ||
3482 | (asoc->cookie_life.tv_usec | ||
3483 | / 1000); | ||
3484 | |||
3485 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
3486 | cnt ++; | ||
3487 | } | ||
3488 | |||
3489 | assocparams.sasoc_number_peer_destinations = cnt; | ||
3490 | } else { | ||
3491 | /* Values corresponding to the endpoint */ | ||
3492 | struct sctp_sock *sp = sctp_sk(sk); | ||
3493 | |||
3494 | assocparams.sasoc_asocmaxrxt = sp->assocparams.sasoc_asocmaxrxt; | ||
3495 | assocparams.sasoc_peer_rwnd = sp->assocparams.sasoc_peer_rwnd; | ||
3496 | assocparams.sasoc_local_rwnd = sp->assocparams.sasoc_local_rwnd; | ||
3497 | assocparams.sasoc_cookie_life = | ||
3498 | sp->assocparams.sasoc_cookie_life; | ||
3499 | assocparams.sasoc_number_peer_destinations = | ||
3500 | sp->assocparams. | ||
3501 | sasoc_number_peer_destinations; | ||
3502 | } | ||
3503 | |||
3504 | if (put_user(len, optlen)) | ||
3505 | return -EFAULT; | ||
3506 | |||
3507 | if (copy_to_user(optval, &assocparams, len)) | ||
3508 | return -EFAULT; | ||
3509 | |||
3510 | return 0; | ||
3511 | } | ||
3512 | |||
3513 | /* | ||
3514 | * 7.1.16 Set/clear IPv4 mapped addresses (SCTP_I_WANT_MAPPED_V4_ADDR) | ||
3515 | * | ||
3516 | * This socket option is a boolean flag which turns on or off mapped V4 | ||
3517 | * addresses. If this option is turned on and the socket is type | ||
3518 | * PF_INET6, then IPv4 addresses will be mapped to V6 representation. | ||
3519 | * If this option is turned off, then no mapping will be done of V4 | ||
3520 | * addresses and a user will receive both PF_INET6 and PF_INET type | ||
3521 | * addresses on the socket. | ||
3522 | */ | ||
3523 | static int sctp_getsockopt_mappedv4(struct sock *sk, int len, | ||
3524 | char __user *optval, int __user *optlen) | ||
3525 | { | ||
3526 | int val; | ||
3527 | struct sctp_sock *sp = sctp_sk(sk); | ||
3528 | |||
3529 | if (len < sizeof(int)) | ||
3530 | return -EINVAL; | ||
3531 | |||
3532 | len = sizeof(int); | ||
3533 | val = sp->v4mapped; | ||
3534 | if (put_user(len, optlen)) | ||
3535 | return -EFAULT; | ||
3536 | if (copy_to_user(optval, &val, len)) | ||
3537 | return -EFAULT; | ||
3538 | |||
3539 | return 0; | ||
3540 | } | ||
3541 | |||
3542 | /* | ||
3543 | * 7.1.17 Set the maximum fragrmentation size (SCTP_MAXSEG) | ||
3544 | * | ||
3545 | * This socket option specifies the maximum size to put in any outgoing | ||
3546 | * SCTP chunk. If a message is larger than this size it will be | ||
3547 | * fragmented by SCTP into the specified size. Note that the underlying | ||
3548 | * SCTP implementation may fragment into smaller sized chunks when the | ||
3549 | * PMTU of the underlying association is smaller than the value set by | ||
3550 | * the user. | ||
3551 | */ | ||
3552 | static int sctp_getsockopt_maxseg(struct sock *sk, int len, | ||
3553 | char __user *optval, int __user *optlen) | ||
3554 | { | ||
3555 | int val; | ||
3556 | |||
3557 | if (len < sizeof(int)) | ||
3558 | return -EINVAL; | ||
3559 | |||
3560 | len = sizeof(int); | ||
3561 | |||
3562 | val = sctp_sk(sk)->user_frag; | ||
3563 | if (put_user(len, optlen)) | ||
3564 | return -EFAULT; | ||
3565 | if (copy_to_user(optval, &val, len)) | ||
3566 | return -EFAULT; | ||
3567 | |||
3568 | return 0; | ||
3569 | } | ||
3570 | |||
3571 | SCTP_STATIC int sctp_getsockopt(struct sock *sk, int level, int optname, | ||
3572 | char __user *optval, int __user *optlen) | ||
3573 | { | ||
3574 | int retval = 0; | ||
3575 | int len; | ||
3576 | |||
3577 | SCTP_DEBUG_PRINTK("sctp_getsockopt(sk: %p, ...)\n", sk); | ||
3578 | |||
3579 | /* I can hardly begin to describe how wrong this is. This is | ||
3580 | * so broken as to be worse than useless. The API draft | ||
3581 | * REALLY is NOT helpful here... I am not convinced that the | ||
3582 | * semantics of getsockopt() with a level OTHER THAN SOL_SCTP | ||
3583 | * are at all well-founded. | ||
3584 | */ | ||
3585 | if (level != SOL_SCTP) { | ||
3586 | struct sctp_af *af = sctp_sk(sk)->pf->af; | ||
3587 | |||
3588 | retval = af->getsockopt(sk, level, optname, optval, optlen); | ||
3589 | return retval; | ||
3590 | } | ||
3591 | |||
3592 | if (get_user(len, optlen)) | ||
3593 | return -EFAULT; | ||
3594 | |||
3595 | sctp_lock_sock(sk); | ||
3596 | |||
3597 | switch (optname) { | ||
3598 | case SCTP_STATUS: | ||
3599 | retval = sctp_getsockopt_sctp_status(sk, len, optval, optlen); | ||
3600 | break; | ||
3601 | case SCTP_DISABLE_FRAGMENTS: | ||
3602 | retval = sctp_getsockopt_disable_fragments(sk, len, optval, | ||
3603 | optlen); | ||
3604 | break; | ||
3605 | case SCTP_EVENTS: | ||
3606 | retval = sctp_getsockopt_events(sk, len, optval, optlen); | ||
3607 | break; | ||
3608 | case SCTP_AUTOCLOSE: | ||
3609 | retval = sctp_getsockopt_autoclose(sk, len, optval, optlen); | ||
3610 | break; | ||
3611 | case SCTP_SOCKOPT_PEELOFF: | ||
3612 | retval = sctp_getsockopt_peeloff(sk, len, optval, optlen); | ||
3613 | break; | ||
3614 | case SCTP_PEER_ADDR_PARAMS: | ||
3615 | retval = sctp_getsockopt_peer_addr_params(sk, len, optval, | ||
3616 | optlen); | ||
3617 | break; | ||
3618 | case SCTP_INITMSG: | ||
3619 | retval = sctp_getsockopt_initmsg(sk, len, optval, optlen); | ||
3620 | break; | ||
3621 | case SCTP_GET_PEER_ADDRS_NUM: | ||
3622 | retval = sctp_getsockopt_peer_addrs_num(sk, len, optval, | ||
3623 | optlen); | ||
3624 | break; | ||
3625 | case SCTP_GET_LOCAL_ADDRS_NUM: | ||
3626 | retval = sctp_getsockopt_local_addrs_num(sk, len, optval, | ||
3627 | optlen); | ||
3628 | break; | ||
3629 | case SCTP_GET_PEER_ADDRS: | ||
3630 | retval = sctp_getsockopt_peer_addrs(sk, len, optval, | ||
3631 | optlen); | ||
3632 | break; | ||
3633 | case SCTP_GET_LOCAL_ADDRS: | ||
3634 | retval = sctp_getsockopt_local_addrs(sk, len, optval, | ||
3635 | optlen); | ||
3636 | break; | ||
3637 | case SCTP_DEFAULT_SEND_PARAM: | ||
3638 | retval = sctp_getsockopt_default_send_param(sk, len, | ||
3639 | optval, optlen); | ||
3640 | break; | ||
3641 | case SCTP_PRIMARY_ADDR: | ||
3642 | retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen); | ||
3643 | break; | ||
3644 | case SCTP_NODELAY: | ||
3645 | retval = sctp_getsockopt_nodelay(sk, len, optval, optlen); | ||
3646 | break; | ||
3647 | case SCTP_RTOINFO: | ||
3648 | retval = sctp_getsockopt_rtoinfo(sk, len, optval, optlen); | ||
3649 | break; | ||
3650 | case SCTP_ASSOCINFO: | ||
3651 | retval = sctp_getsockopt_associnfo(sk, len, optval, optlen); | ||
3652 | break; | ||
3653 | case SCTP_I_WANT_MAPPED_V4_ADDR: | ||
3654 | retval = sctp_getsockopt_mappedv4(sk, len, optval, optlen); | ||
3655 | break; | ||
3656 | case SCTP_MAXSEG: | ||
3657 | retval = sctp_getsockopt_maxseg(sk, len, optval, optlen); | ||
3658 | break; | ||
3659 | case SCTP_GET_PEER_ADDR_INFO: | ||
3660 | retval = sctp_getsockopt_peer_addr_info(sk, len, optval, | ||
3661 | optlen); | ||
3662 | break; | ||
3663 | case SCTP_ADAPTION_LAYER: | ||
3664 | retval = sctp_getsockopt_adaption_layer(sk, len, optval, | ||
3665 | optlen); | ||
3666 | break; | ||
3667 | default: | ||
3668 | retval = -ENOPROTOOPT; | ||
3669 | break; | ||
3670 | }; | ||
3671 | |||
3672 | sctp_release_sock(sk); | ||
3673 | return retval; | ||
3674 | } | ||
3675 | |||
3676 | static void sctp_hash(struct sock *sk) | ||
3677 | { | ||
3678 | /* STUB */ | ||
3679 | } | ||
3680 | |||
3681 | static void sctp_unhash(struct sock *sk) | ||
3682 | { | ||
3683 | /* STUB */ | ||
3684 | } | ||
3685 | |||
3686 | /* Check if port is acceptable. Possibly find first available port. | ||
3687 | * | ||
3688 | * The port hash table (contained in the 'global' SCTP protocol storage | ||
3689 | * returned by struct sctp_protocol *sctp_get_protocol()). The hash | ||
3690 | * table is an array of 4096 lists (sctp_bind_hashbucket). Each | ||
3691 | * list (the list number is the port number hashed out, so as you | ||
3692 | * would expect from a hash function, all the ports in a given list have | ||
3693 | * such a number that hashes out to the same list number; you were | ||
3694 | * expecting that, right?); so each list has a set of ports, with a | ||
3695 | * link to the socket (struct sock) that uses it, the port number and | ||
3696 | * a fastreuse flag (FIXME: NPI ipg). | ||
3697 | */ | ||
3698 | static struct sctp_bind_bucket *sctp_bucket_create( | ||
3699 | struct sctp_bind_hashbucket *head, unsigned short snum); | ||
3700 | |||
3701 | static long sctp_get_port_local(struct sock *sk, union sctp_addr *addr) | ||
3702 | { | ||
3703 | struct sctp_bind_hashbucket *head; /* hash list */ | ||
3704 | struct sctp_bind_bucket *pp; /* hash list port iterator */ | ||
3705 | unsigned short snum; | ||
3706 | int ret; | ||
3707 | |||
3708 | /* NOTE: Remember to put this back to net order. */ | ||
3709 | addr->v4.sin_port = ntohs(addr->v4.sin_port); | ||
3710 | snum = addr->v4.sin_port; | ||
3711 | |||
3712 | SCTP_DEBUG_PRINTK("sctp_get_port() begins, snum=%d\n", snum); | ||
3713 | sctp_local_bh_disable(); | ||
3714 | |||
3715 | if (snum == 0) { | ||
3716 | /* Search for an available port. | ||
3717 | * | ||
3718 | * 'sctp_port_rover' was the last port assigned, so | ||
3719 | * we start to search from 'sctp_port_rover + | ||
3720 | * 1'. What we do is first check if port 'rover' is | ||
3721 | * already in the hash table; if not, we use that; if | ||
3722 | * it is, we try next. | ||
3723 | */ | ||
3724 | int low = sysctl_local_port_range[0]; | ||
3725 | int high = sysctl_local_port_range[1]; | ||
3726 | int remaining = (high - low) + 1; | ||
3727 | int rover; | ||
3728 | int index; | ||
3729 | |||
3730 | sctp_spin_lock(&sctp_port_alloc_lock); | ||
3731 | rover = sctp_port_rover; | ||
3732 | do { | ||
3733 | rover++; | ||
3734 | if ((rover < low) || (rover > high)) | ||
3735 | rover = low; | ||
3736 | index = sctp_phashfn(rover); | ||
3737 | head = &sctp_port_hashtable[index]; | ||
3738 | sctp_spin_lock(&head->lock); | ||
3739 | for (pp = head->chain; pp; pp = pp->next) | ||
3740 | if (pp->port == rover) | ||
3741 | goto next; | ||
3742 | break; | ||
3743 | next: | ||
3744 | sctp_spin_unlock(&head->lock); | ||
3745 | } while (--remaining > 0); | ||
3746 | sctp_port_rover = rover; | ||
3747 | sctp_spin_unlock(&sctp_port_alloc_lock); | ||
3748 | |||
3749 | /* Exhausted local port range during search? */ | ||
3750 | ret = 1; | ||
3751 | if (remaining <= 0) | ||
3752 | goto fail; | ||
3753 | |||
3754 | /* OK, here is the one we will use. HEAD (the port | ||
3755 | * hash table list entry) is non-NULL and we hold it's | ||
3756 | * mutex. | ||
3757 | */ | ||
3758 | snum = rover; | ||
3759 | } else { | ||
3760 | /* We are given an specific port number; we verify | ||
3761 | * that it is not being used. If it is used, we will | ||
3762 | * exahust the search in the hash list corresponding | ||
3763 | * to the port number (snum) - we detect that with the | ||
3764 | * port iterator, pp being NULL. | ||
3765 | */ | ||
3766 | head = &sctp_port_hashtable[sctp_phashfn(snum)]; | ||
3767 | sctp_spin_lock(&head->lock); | ||
3768 | for (pp = head->chain; pp; pp = pp->next) { | ||
3769 | if (pp->port == snum) | ||
3770 | goto pp_found; | ||
3771 | } | ||
3772 | } | ||
3773 | pp = NULL; | ||
3774 | goto pp_not_found; | ||
3775 | pp_found: | ||
3776 | if (!hlist_empty(&pp->owner)) { | ||
3777 | /* We had a port hash table hit - there is an | ||
3778 | * available port (pp != NULL) and it is being | ||
3779 | * used by other socket (pp->owner not empty); that other | ||
3780 | * socket is going to be sk2. | ||
3781 | */ | ||
3782 | int reuse = sk->sk_reuse; | ||
3783 | struct sock *sk2; | ||
3784 | struct hlist_node *node; | ||
3785 | |||
3786 | SCTP_DEBUG_PRINTK("sctp_get_port() found a possible match\n"); | ||
3787 | if (pp->fastreuse && sk->sk_reuse) | ||
3788 | goto success; | ||
3789 | |||
3790 | /* Run through the list of sockets bound to the port | ||
3791 | * (pp->port) [via the pointers bind_next and | ||
3792 | * bind_pprev in the struct sock *sk2 (pp->sk)]. On each one, | ||
3793 | * we get the endpoint they describe and run through | ||
3794 | * the endpoint's list of IP (v4 or v6) addresses, | ||
3795 | * comparing each of the addresses with the address of | ||
3796 | * the socket sk. If we find a match, then that means | ||
3797 | * that this port/socket (sk) combination are already | ||
3798 | * in an endpoint. | ||
3799 | */ | ||
3800 | sk_for_each_bound(sk2, node, &pp->owner) { | ||
3801 | struct sctp_endpoint *ep2; | ||
3802 | ep2 = sctp_sk(sk2)->ep; | ||
3803 | |||
3804 | if (reuse && sk2->sk_reuse) | ||
3805 | continue; | ||
3806 | |||
3807 | if (sctp_bind_addr_match(&ep2->base.bind_addr, addr, | ||
3808 | sctp_sk(sk))) { | ||
3809 | ret = (long)sk2; | ||
3810 | goto fail_unlock; | ||
3811 | } | ||
3812 | } | ||
3813 | SCTP_DEBUG_PRINTK("sctp_get_port(): Found a match\n"); | ||
3814 | } | ||
3815 | pp_not_found: | ||
3816 | /* If there was a hash table miss, create a new port. */ | ||
3817 | ret = 1; | ||
3818 | if (!pp && !(pp = sctp_bucket_create(head, snum))) | ||
3819 | goto fail_unlock; | ||
3820 | |||
3821 | /* In either case (hit or miss), make sure fastreuse is 1 only | ||
3822 | * if sk->sk_reuse is too (that is, if the caller requested | ||
3823 | * SO_REUSEADDR on this socket -sk-). | ||
3824 | */ | ||
3825 | if (hlist_empty(&pp->owner)) | ||
3826 | pp->fastreuse = sk->sk_reuse ? 1 : 0; | ||
3827 | else if (pp->fastreuse && !sk->sk_reuse) | ||
3828 | pp->fastreuse = 0; | ||
3829 | |||
3830 | /* We are set, so fill up all the data in the hash table | ||
3831 | * entry, tie the socket list information with the rest of the | ||
3832 | * sockets FIXME: Blurry, NPI (ipg). | ||
3833 | */ | ||
3834 | success: | ||
3835 | inet_sk(sk)->num = snum; | ||
3836 | if (!sctp_sk(sk)->bind_hash) { | ||
3837 | sk_add_bind_node(sk, &pp->owner); | ||
3838 | sctp_sk(sk)->bind_hash = pp; | ||
3839 | } | ||
3840 | ret = 0; | ||
3841 | |||
3842 | fail_unlock: | ||
3843 | sctp_spin_unlock(&head->lock); | ||
3844 | |||
3845 | fail: | ||
3846 | sctp_local_bh_enable(); | ||
3847 | addr->v4.sin_port = htons(addr->v4.sin_port); | ||
3848 | return ret; | ||
3849 | } | ||
3850 | |||
3851 | /* Assign a 'snum' port to the socket. If snum == 0, an ephemeral | ||
3852 | * port is requested. | ||
3853 | */ | ||
3854 | static int sctp_get_port(struct sock *sk, unsigned short snum) | ||
3855 | { | ||
3856 | long ret; | ||
3857 | union sctp_addr addr; | ||
3858 | struct sctp_af *af = sctp_sk(sk)->pf->af; | ||
3859 | |||
3860 | /* Set up a dummy address struct from the sk. */ | ||
3861 | af->from_sk(&addr, sk); | ||
3862 | addr.v4.sin_port = htons(snum); | ||
3863 | |||
3864 | /* Note: sk->sk_num gets filled in if ephemeral port request. */ | ||
3865 | ret = sctp_get_port_local(sk, &addr); | ||
3866 | |||
3867 | return (ret ? 1 : 0); | ||
3868 | } | ||
3869 | |||
3870 | /* | ||
3871 | * 3.1.3 listen() - UDP Style Syntax | ||
3872 | * | ||
3873 | * By default, new associations are not accepted for UDP style sockets. | ||
3874 | * An application uses listen() to mark a socket as being able to | ||
3875 | * accept new associations. | ||
3876 | */ | ||
3877 | SCTP_STATIC int sctp_seqpacket_listen(struct sock *sk, int backlog) | ||
3878 | { | ||
3879 | struct sctp_sock *sp = sctp_sk(sk); | ||
3880 | struct sctp_endpoint *ep = sp->ep; | ||
3881 | |||
3882 | /* Only UDP style sockets that are not peeled off are allowed to | ||
3883 | * listen(). | ||
3884 | */ | ||
3885 | if (!sctp_style(sk, UDP)) | ||
3886 | return -EINVAL; | ||
3887 | |||
3888 | /* If backlog is zero, disable listening. */ | ||
3889 | if (!backlog) { | ||
3890 | if (sctp_sstate(sk, CLOSED)) | ||
3891 | return 0; | ||
3892 | |||
3893 | sctp_unhash_endpoint(ep); | ||
3894 | sk->sk_state = SCTP_SS_CLOSED; | ||
3895 | } | ||
3896 | |||
3897 | /* Return if we are already listening. */ | ||
3898 | if (sctp_sstate(sk, LISTENING)) | ||
3899 | return 0; | ||
3900 | |||
3901 | /* | ||
3902 | * If a bind() or sctp_bindx() is not called prior to a listen() | ||
3903 | * call that allows new associations to be accepted, the system | ||
3904 | * picks an ephemeral port and will choose an address set equivalent | ||
3905 | * to binding with a wildcard address. | ||
3906 | * | ||
3907 | * This is not currently spelled out in the SCTP sockets | ||
3908 | * extensions draft, but follows the practice as seen in TCP | ||
3909 | * sockets. | ||
3910 | */ | ||
3911 | if (!ep->base.bind_addr.port) { | ||
3912 | if (sctp_autobind(sk)) | ||
3913 | return -EAGAIN; | ||
3914 | } | ||
3915 | sk->sk_state = SCTP_SS_LISTENING; | ||
3916 | sctp_hash_endpoint(ep); | ||
3917 | return 0; | ||
3918 | } | ||
3919 | |||
3920 | /* | ||
3921 | * 4.1.3 listen() - TCP Style Syntax | ||
3922 | * | ||
3923 | * Applications uses listen() to ready the SCTP endpoint for accepting | ||
3924 | * inbound associations. | ||
3925 | */ | ||
3926 | SCTP_STATIC int sctp_stream_listen(struct sock *sk, int backlog) | ||
3927 | { | ||
3928 | struct sctp_sock *sp = sctp_sk(sk); | ||
3929 | struct sctp_endpoint *ep = sp->ep; | ||
3930 | |||
3931 | /* If backlog is zero, disable listening. */ | ||
3932 | if (!backlog) { | ||
3933 | if (sctp_sstate(sk, CLOSED)) | ||
3934 | return 0; | ||
3935 | |||
3936 | sctp_unhash_endpoint(ep); | ||
3937 | sk->sk_state = SCTP_SS_CLOSED; | ||
3938 | } | ||
3939 | |||
3940 | if (sctp_sstate(sk, LISTENING)) | ||
3941 | return 0; | ||
3942 | |||
3943 | /* | ||
3944 | * If a bind() or sctp_bindx() is not called prior to a listen() | ||
3945 | * call that allows new associations to be accepted, the system | ||
3946 | * picks an ephemeral port and will choose an address set equivalent | ||
3947 | * to binding with a wildcard address. | ||
3948 | * | ||
3949 | * This is not currently spelled out in the SCTP sockets | ||
3950 | * extensions draft, but follows the practice as seen in TCP | ||
3951 | * sockets. | ||
3952 | */ | ||
3953 | if (!ep->base.bind_addr.port) { | ||
3954 | if (sctp_autobind(sk)) | ||
3955 | return -EAGAIN; | ||
3956 | } | ||
3957 | sk->sk_state = SCTP_SS_LISTENING; | ||
3958 | sk->sk_max_ack_backlog = backlog; | ||
3959 | sctp_hash_endpoint(ep); | ||
3960 | return 0; | ||
3961 | } | ||
3962 | |||
3963 | /* | ||
3964 | * Move a socket to LISTENING state. | ||
3965 | */ | ||
3966 | int sctp_inet_listen(struct socket *sock, int backlog) | ||
3967 | { | ||
3968 | struct sock *sk = sock->sk; | ||
3969 | struct crypto_tfm *tfm=NULL; | ||
3970 | int err = -EINVAL; | ||
3971 | |||
3972 | if (unlikely(backlog < 0)) | ||
3973 | goto out; | ||
3974 | |||
3975 | sctp_lock_sock(sk); | ||
3976 | |||
3977 | if (sock->state != SS_UNCONNECTED) | ||
3978 | goto out; | ||
3979 | |||
3980 | /* Allocate HMAC for generating cookie. */ | ||
3981 | if (sctp_hmac_alg) { | ||
3982 | tfm = sctp_crypto_alloc_tfm(sctp_hmac_alg, 0); | ||
3983 | if (!tfm) { | ||
3984 | err = -ENOSYS; | ||
3985 | goto out; | ||
3986 | } | ||
3987 | } | ||
3988 | |||
3989 | switch (sock->type) { | ||
3990 | case SOCK_SEQPACKET: | ||
3991 | err = sctp_seqpacket_listen(sk, backlog); | ||
3992 | break; | ||
3993 | case SOCK_STREAM: | ||
3994 | err = sctp_stream_listen(sk, backlog); | ||
3995 | break; | ||
3996 | default: | ||
3997 | break; | ||
3998 | }; | ||
3999 | if (err) | ||
4000 | goto cleanup; | ||
4001 | |||
4002 | /* Store away the transform reference. */ | ||
4003 | sctp_sk(sk)->hmac = tfm; | ||
4004 | out: | ||
4005 | sctp_release_sock(sk); | ||
4006 | return err; | ||
4007 | cleanup: | ||
4008 | if (tfm) | ||
4009 | sctp_crypto_free_tfm(tfm); | ||
4010 | goto out; | ||
4011 | } | ||
4012 | |||
4013 | /* | ||
4014 | * This function is done by modeling the current datagram_poll() and the | ||
4015 | * tcp_poll(). Note that, based on these implementations, we don't | ||
4016 | * lock the socket in this function, even though it seems that, | ||
4017 | * ideally, locking or some other mechanisms can be used to ensure | ||
4018 | * the integrity of the counters (sndbuf and wmem_queued) used | ||
4019 | * in this place. We assume that we don't need locks either until proven | ||
4020 | * otherwise. | ||
4021 | * | ||
4022 | * Another thing to note is that we include the Async I/O support | ||
4023 | * here, again, by modeling the current TCP/UDP code. We don't have | ||
4024 | * a good way to test with it yet. | ||
4025 | */ | ||
4026 | unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait) | ||
4027 | { | ||
4028 | struct sock *sk = sock->sk; | ||
4029 | struct sctp_sock *sp = sctp_sk(sk); | ||
4030 | unsigned int mask; | ||
4031 | |||
4032 | poll_wait(file, sk->sk_sleep, wait); | ||
4033 | |||
4034 | /* A TCP-style listening socket becomes readable when the accept queue | ||
4035 | * is not empty. | ||
4036 | */ | ||
4037 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING)) | ||
4038 | return (!list_empty(&sp->ep->asocs)) ? | ||
4039 | (POLLIN | POLLRDNORM) : 0; | ||
4040 | |||
4041 | mask = 0; | ||
4042 | |||
4043 | /* Is there any exceptional events? */ | ||
4044 | if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) | ||
4045 | mask |= POLLERR; | ||
4046 | if (sk->sk_shutdown == SHUTDOWN_MASK) | ||
4047 | mask |= POLLHUP; | ||
4048 | |||
4049 | /* Is it readable? Reconsider this code with TCP-style support. */ | ||
4050 | if (!skb_queue_empty(&sk->sk_receive_queue) || | ||
4051 | (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
4052 | mask |= POLLIN | POLLRDNORM; | ||
4053 | |||
4054 | /* The association is either gone or not ready. */ | ||
4055 | if (!sctp_style(sk, UDP) && sctp_sstate(sk, CLOSED)) | ||
4056 | return mask; | ||
4057 | |||
4058 | /* Is it writable? */ | ||
4059 | if (sctp_writeable(sk)) { | ||
4060 | mask |= POLLOUT | POLLWRNORM; | ||
4061 | } else { | ||
4062 | set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | ||
4063 | /* | ||
4064 | * Since the socket is not locked, the buffer | ||
4065 | * might be made available after the writeable check and | ||
4066 | * before the bit is set. This could cause a lost I/O | ||
4067 | * signal. tcp_poll() has a race breaker for this race | ||
4068 | * condition. Based on their implementation, we put | ||
4069 | * in the following code to cover it as well. | ||
4070 | */ | ||
4071 | if (sctp_writeable(sk)) | ||
4072 | mask |= POLLOUT | POLLWRNORM; | ||
4073 | } | ||
4074 | return mask; | ||
4075 | } | ||
4076 | |||
4077 | /******************************************************************** | ||
4078 | * 2nd Level Abstractions | ||
4079 | ********************************************************************/ | ||
4080 | |||
4081 | static struct sctp_bind_bucket *sctp_bucket_create( | ||
4082 | struct sctp_bind_hashbucket *head, unsigned short snum) | ||
4083 | { | ||
4084 | struct sctp_bind_bucket *pp; | ||
4085 | |||
4086 | pp = kmem_cache_alloc(sctp_bucket_cachep, SLAB_ATOMIC); | ||
4087 | SCTP_DBG_OBJCNT_INC(bind_bucket); | ||
4088 | if (pp) { | ||
4089 | pp->port = snum; | ||
4090 | pp->fastreuse = 0; | ||
4091 | INIT_HLIST_HEAD(&pp->owner); | ||
4092 | if ((pp->next = head->chain) != NULL) | ||
4093 | pp->next->pprev = &pp->next; | ||
4094 | head->chain = pp; | ||
4095 | pp->pprev = &head->chain; | ||
4096 | } | ||
4097 | return pp; | ||
4098 | } | ||
4099 | |||
4100 | /* Caller must hold hashbucket lock for this tb with local BH disabled */ | ||
4101 | static void sctp_bucket_destroy(struct sctp_bind_bucket *pp) | ||
4102 | { | ||
4103 | if (hlist_empty(&pp->owner)) { | ||
4104 | if (pp->next) | ||
4105 | pp->next->pprev = pp->pprev; | ||
4106 | *(pp->pprev) = pp->next; | ||
4107 | kmem_cache_free(sctp_bucket_cachep, pp); | ||
4108 | SCTP_DBG_OBJCNT_DEC(bind_bucket); | ||
4109 | } | ||
4110 | } | ||
4111 | |||
4112 | /* Release this socket's reference to a local port. */ | ||
4113 | static inline void __sctp_put_port(struct sock *sk) | ||
4114 | { | ||
4115 | struct sctp_bind_hashbucket *head = | ||
4116 | &sctp_port_hashtable[sctp_phashfn(inet_sk(sk)->num)]; | ||
4117 | struct sctp_bind_bucket *pp; | ||
4118 | |||
4119 | sctp_spin_lock(&head->lock); | ||
4120 | pp = sctp_sk(sk)->bind_hash; | ||
4121 | __sk_del_bind_node(sk); | ||
4122 | sctp_sk(sk)->bind_hash = NULL; | ||
4123 | inet_sk(sk)->num = 0; | ||
4124 | sctp_bucket_destroy(pp); | ||
4125 | sctp_spin_unlock(&head->lock); | ||
4126 | } | ||
4127 | |||
4128 | void sctp_put_port(struct sock *sk) | ||
4129 | { | ||
4130 | sctp_local_bh_disable(); | ||
4131 | __sctp_put_port(sk); | ||
4132 | sctp_local_bh_enable(); | ||
4133 | } | ||
4134 | |||
4135 | /* | ||
4136 | * The system picks an ephemeral port and choose an address set equivalent | ||
4137 | * to binding with a wildcard address. | ||
4138 | * One of those addresses will be the primary address for the association. | ||
4139 | * This automatically enables the multihoming capability of SCTP. | ||
4140 | */ | ||
4141 | static int sctp_autobind(struct sock *sk) | ||
4142 | { | ||
4143 | union sctp_addr autoaddr; | ||
4144 | struct sctp_af *af; | ||
4145 | unsigned short port; | ||
4146 | |||
4147 | /* Initialize a local sockaddr structure to INADDR_ANY. */ | ||
4148 | af = sctp_sk(sk)->pf->af; | ||
4149 | |||
4150 | port = htons(inet_sk(sk)->num); | ||
4151 | af->inaddr_any(&autoaddr, port); | ||
4152 | |||
4153 | return sctp_do_bind(sk, &autoaddr, af->sockaddr_len); | ||
4154 | } | ||
4155 | |||
4156 | /* Parse out IPPROTO_SCTP CMSG headers. Perform only minimal validation. | ||
4157 | * | ||
4158 | * From RFC 2292 | ||
4159 | * 4.2 The cmsghdr Structure * | ||
4160 | * | ||
4161 | * When ancillary data is sent or received, any number of ancillary data | ||
4162 | * objects can be specified by the msg_control and msg_controllen members of | ||
4163 | * the msghdr structure, because each object is preceded by | ||
4164 | * a cmsghdr structure defining the object's length (the cmsg_len member). | ||
4165 | * Historically Berkeley-derived implementations have passed only one object | ||
4166 | * at a time, but this API allows multiple objects to be | ||
4167 | * passed in a single call to sendmsg() or recvmsg(). The following example | ||
4168 | * shows two ancillary data objects in a control buffer. | ||
4169 | * | ||
4170 | * |<--------------------------- msg_controllen -------------------------->| | ||
4171 | * | | | ||
4172 | * | ||
4173 | * |<----- ancillary data object ----->|<----- ancillary data object ----->| | ||
4174 | * | ||
4175 | * |<---------- CMSG_SPACE() --------->|<---------- CMSG_SPACE() --------->| | ||
4176 | * | | | | ||
4177 | * | ||
4178 | * |<---------- cmsg_len ---------->| |<--------- cmsg_len ----------->| | | ||
4179 | * | ||
4180 | * |<--------- CMSG_LEN() --------->| |<-------- CMSG_LEN() ---------->| | | ||
4181 | * | | | | | | ||
4182 | * | ||
4183 | * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ | ||
4184 | * |cmsg_|cmsg_|cmsg_|XX| |XX|cmsg_|cmsg_|cmsg_|XX| |XX| | ||
4185 | * | ||
4186 | * |len |level|type |XX|cmsg_data[]|XX|len |level|type |XX|cmsg_data[]|XX| | ||
4187 | * | ||
4188 | * +-----+-----+-----+--+-----------+--+-----+-----+-----+--+-----------+--+ | ||
4189 | * ^ | ||
4190 | * | | ||
4191 | * | ||
4192 | * msg_control | ||
4193 | * points here | ||
4194 | */ | ||
4195 | SCTP_STATIC int sctp_msghdr_parse(const struct msghdr *msg, | ||
4196 | sctp_cmsgs_t *cmsgs) | ||
4197 | { | ||
4198 | struct cmsghdr *cmsg; | ||
4199 | |||
4200 | for (cmsg = CMSG_FIRSTHDR(msg); | ||
4201 | cmsg != NULL; | ||
4202 | cmsg = CMSG_NXTHDR((struct msghdr*)msg, cmsg)) { | ||
4203 | if (!CMSG_OK(msg, cmsg)) | ||
4204 | return -EINVAL; | ||
4205 | |||
4206 | /* Should we parse this header or ignore? */ | ||
4207 | if (cmsg->cmsg_level != IPPROTO_SCTP) | ||
4208 | continue; | ||
4209 | |||
4210 | /* Strictly check lengths following example in SCM code. */ | ||
4211 | switch (cmsg->cmsg_type) { | ||
4212 | case SCTP_INIT: | ||
4213 | /* SCTP Socket API Extension | ||
4214 | * 5.2.1 SCTP Initiation Structure (SCTP_INIT) | ||
4215 | * | ||
4216 | * This cmsghdr structure provides information for | ||
4217 | * initializing new SCTP associations with sendmsg(). | ||
4218 | * The SCTP_INITMSG socket option uses this same data | ||
4219 | * structure. This structure is not used for | ||
4220 | * recvmsg(). | ||
4221 | * | ||
4222 | * cmsg_level cmsg_type cmsg_data[] | ||
4223 | * ------------ ------------ ---------------------- | ||
4224 | * IPPROTO_SCTP SCTP_INIT struct sctp_initmsg | ||
4225 | */ | ||
4226 | if (cmsg->cmsg_len != | ||
4227 | CMSG_LEN(sizeof(struct sctp_initmsg))) | ||
4228 | return -EINVAL; | ||
4229 | cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg); | ||
4230 | break; | ||
4231 | |||
4232 | case SCTP_SNDRCV: | ||
4233 | /* SCTP Socket API Extension | ||
4234 | * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV) | ||
4235 | * | ||
4236 | * This cmsghdr structure specifies SCTP options for | ||
4237 | * sendmsg() and describes SCTP header information | ||
4238 | * about a received message through recvmsg(). | ||
4239 | * | ||
4240 | * cmsg_level cmsg_type cmsg_data[] | ||
4241 | * ------------ ------------ ---------------------- | ||
4242 | * IPPROTO_SCTP SCTP_SNDRCV struct sctp_sndrcvinfo | ||
4243 | */ | ||
4244 | if (cmsg->cmsg_len != | ||
4245 | CMSG_LEN(sizeof(struct sctp_sndrcvinfo))) | ||
4246 | return -EINVAL; | ||
4247 | |||
4248 | cmsgs->info = | ||
4249 | (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg); | ||
4250 | |||
4251 | /* Minimally, validate the sinfo_flags. */ | ||
4252 | if (cmsgs->info->sinfo_flags & | ||
4253 | ~(MSG_UNORDERED | MSG_ADDR_OVER | | ||
4254 | MSG_ABORT | MSG_EOF)) | ||
4255 | return -EINVAL; | ||
4256 | break; | ||
4257 | |||
4258 | default: | ||
4259 | return -EINVAL; | ||
4260 | }; | ||
4261 | } | ||
4262 | return 0; | ||
4263 | } | ||
4264 | |||
4265 | /* | ||
4266 | * Wait for a packet.. | ||
4267 | * Note: This function is the same function as in core/datagram.c | ||
4268 | * with a few modifications to make lksctp work. | ||
4269 | */ | ||
4270 | static int sctp_wait_for_packet(struct sock * sk, int *err, long *timeo_p) | ||
4271 | { | ||
4272 | int error; | ||
4273 | DEFINE_WAIT(wait); | ||
4274 | |||
4275 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | ||
4276 | |||
4277 | /* Socket errors? */ | ||
4278 | error = sock_error(sk); | ||
4279 | if (error) | ||
4280 | goto out; | ||
4281 | |||
4282 | if (!skb_queue_empty(&sk->sk_receive_queue)) | ||
4283 | goto ready; | ||
4284 | |||
4285 | /* Socket shut down? */ | ||
4286 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
4287 | goto out; | ||
4288 | |||
4289 | /* Sequenced packets can come disconnected. If so we report the | ||
4290 | * problem. | ||
4291 | */ | ||
4292 | error = -ENOTCONN; | ||
4293 | |||
4294 | /* Is there a good reason to think that we may receive some data? */ | ||
4295 | if (list_empty(&sctp_sk(sk)->ep->asocs) && !sctp_sstate(sk, LISTENING)) | ||
4296 | goto out; | ||
4297 | |||
4298 | /* Handle signals. */ | ||
4299 | if (signal_pending(current)) | ||
4300 | goto interrupted; | ||
4301 | |||
4302 | /* Let another process have a go. Since we are going to sleep | ||
4303 | * anyway. Note: This may cause odd behaviors if the message | ||
4304 | * does not fit in the user's buffer, but this seems to be the | ||
4305 | * only way to honor MSG_DONTWAIT realistically. | ||
4306 | */ | ||
4307 | sctp_release_sock(sk); | ||
4308 | *timeo_p = schedule_timeout(*timeo_p); | ||
4309 | sctp_lock_sock(sk); | ||
4310 | |||
4311 | ready: | ||
4312 | finish_wait(sk->sk_sleep, &wait); | ||
4313 | return 0; | ||
4314 | |||
4315 | interrupted: | ||
4316 | error = sock_intr_errno(*timeo_p); | ||
4317 | |||
4318 | out: | ||
4319 | finish_wait(sk->sk_sleep, &wait); | ||
4320 | *err = error; | ||
4321 | return error; | ||
4322 | } | ||
4323 | |||
4324 | /* Receive a datagram. | ||
4325 | * Note: This is pretty much the same routine as in core/datagram.c | ||
4326 | * with a few changes to make lksctp work. | ||
4327 | */ | ||
4328 | static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags, | ||
4329 | int noblock, int *err) | ||
4330 | { | ||
4331 | int error; | ||
4332 | struct sk_buff *skb; | ||
4333 | long timeo; | ||
4334 | |||
4335 | /* Caller is allowed not to check sk->sk_err before calling. */ | ||
4336 | error = sock_error(sk); | ||
4337 | if (error) | ||
4338 | goto no_packet; | ||
4339 | |||
4340 | timeo = sock_rcvtimeo(sk, noblock); | ||
4341 | |||
4342 | SCTP_DEBUG_PRINTK("Timeout: timeo: %ld, MAX: %ld.\n", | ||
4343 | timeo, MAX_SCHEDULE_TIMEOUT); | ||
4344 | |||
4345 | do { | ||
4346 | /* Again only user level code calls this function, | ||
4347 | * so nothing interrupt level | ||
4348 | * will suddenly eat the receive_queue. | ||
4349 | * | ||
4350 | * Look at current nfs client by the way... | ||
4351 | * However, this function was corrent in any case. 8) | ||
4352 | */ | ||
4353 | if (flags & MSG_PEEK) { | ||
4354 | unsigned long cpu_flags; | ||
4355 | |||
4356 | sctp_spin_lock_irqsave(&sk->sk_receive_queue.lock, | ||
4357 | cpu_flags); | ||
4358 | skb = skb_peek(&sk->sk_receive_queue); | ||
4359 | if (skb) | ||
4360 | atomic_inc(&skb->users); | ||
4361 | sctp_spin_unlock_irqrestore(&sk->sk_receive_queue.lock, | ||
4362 | cpu_flags); | ||
4363 | } else { | ||
4364 | skb = skb_dequeue(&sk->sk_receive_queue); | ||
4365 | } | ||
4366 | |||
4367 | if (skb) | ||
4368 | return skb; | ||
4369 | |||
4370 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
4371 | break; | ||
4372 | |||
4373 | /* User doesn't want to wait. */ | ||
4374 | error = -EAGAIN; | ||
4375 | if (!timeo) | ||
4376 | goto no_packet; | ||
4377 | } while (sctp_wait_for_packet(sk, err, &timeo) == 0); | ||
4378 | |||
4379 | return NULL; | ||
4380 | |||
4381 | no_packet: | ||
4382 | *err = error; | ||
4383 | return NULL; | ||
4384 | } | ||
4385 | |||
4386 | /* If sndbuf has changed, wake up per association sndbuf waiters. */ | ||
4387 | static void __sctp_write_space(struct sctp_association *asoc) | ||
4388 | { | ||
4389 | struct sock *sk = asoc->base.sk; | ||
4390 | struct socket *sock = sk->sk_socket; | ||
4391 | |||
4392 | if ((sctp_wspace(asoc) > 0) && sock) { | ||
4393 | if (waitqueue_active(&asoc->wait)) | ||
4394 | wake_up_interruptible(&asoc->wait); | ||
4395 | |||
4396 | if (sctp_writeable(sk)) { | ||
4397 | if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) | ||
4398 | wake_up_interruptible(sk->sk_sleep); | ||
4399 | |||
4400 | /* Note that we try to include the Async I/O support | ||
4401 | * here by modeling from the current TCP/UDP code. | ||
4402 | * We have not tested with it yet. | ||
4403 | */ | ||
4404 | if (sock->fasync_list && | ||
4405 | !(sk->sk_shutdown & SEND_SHUTDOWN)) | ||
4406 | sock_wake_async(sock, 2, POLL_OUT); | ||
4407 | } | ||
4408 | } | ||
4409 | } | ||
4410 | |||
4411 | /* Do accounting for the sndbuf space. | ||
4412 | * Decrement the used sndbuf space of the corresponding association by the | ||
4413 | * data size which was just transmitted(freed). | ||
4414 | */ | ||
4415 | static void sctp_wfree(struct sk_buff *skb) | ||
4416 | { | ||
4417 | struct sctp_association *asoc; | ||
4418 | struct sctp_chunk *chunk; | ||
4419 | struct sock *sk; | ||
4420 | |||
4421 | /* Get the saved chunk pointer. */ | ||
4422 | chunk = *((struct sctp_chunk **)(skb->cb)); | ||
4423 | asoc = chunk->asoc; | ||
4424 | sk = asoc->base.sk; | ||
4425 | asoc->sndbuf_used -= SCTP_DATA_SNDSIZE(chunk); | ||
4426 | sk->sk_wmem_queued -= SCTP_DATA_SNDSIZE(chunk); | ||
4427 | __sctp_write_space(asoc); | ||
4428 | |||
4429 | sctp_association_put(asoc); | ||
4430 | } | ||
4431 | |||
4432 | /* Helper function to wait for space in the sndbuf. */ | ||
4433 | static int sctp_wait_for_sndbuf(struct sctp_association *asoc, long *timeo_p, | ||
4434 | size_t msg_len) | ||
4435 | { | ||
4436 | struct sock *sk = asoc->base.sk; | ||
4437 | int err = 0; | ||
4438 | long current_timeo = *timeo_p; | ||
4439 | DEFINE_WAIT(wait); | ||
4440 | |||
4441 | SCTP_DEBUG_PRINTK("wait_for_sndbuf: asoc=%p, timeo=%ld, msg_len=%zu\n", | ||
4442 | asoc, (long)(*timeo_p), msg_len); | ||
4443 | |||
4444 | /* Increment the association's refcnt. */ | ||
4445 | sctp_association_hold(asoc); | ||
4446 | |||
4447 | /* Wait on the association specific sndbuf space. */ | ||
4448 | for (;;) { | ||
4449 | prepare_to_wait_exclusive(&asoc->wait, &wait, | ||
4450 | TASK_INTERRUPTIBLE); | ||
4451 | if (!*timeo_p) | ||
4452 | goto do_nonblock; | ||
4453 | if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || | ||
4454 | asoc->base.dead) | ||
4455 | goto do_error; | ||
4456 | if (signal_pending(current)) | ||
4457 | goto do_interrupted; | ||
4458 | if (msg_len <= sctp_wspace(asoc)) | ||
4459 | break; | ||
4460 | |||
4461 | /* Let another process have a go. Since we are going | ||
4462 | * to sleep anyway. | ||
4463 | */ | ||
4464 | sctp_release_sock(sk); | ||
4465 | current_timeo = schedule_timeout(current_timeo); | ||
4466 | sctp_lock_sock(sk); | ||
4467 | |||
4468 | *timeo_p = current_timeo; | ||
4469 | } | ||
4470 | |||
4471 | out: | ||
4472 | finish_wait(&asoc->wait, &wait); | ||
4473 | |||
4474 | /* Release the association's refcnt. */ | ||
4475 | sctp_association_put(asoc); | ||
4476 | |||
4477 | return err; | ||
4478 | |||
4479 | do_error: | ||
4480 | err = -EPIPE; | ||
4481 | goto out; | ||
4482 | |||
4483 | do_interrupted: | ||
4484 | err = sock_intr_errno(*timeo_p); | ||
4485 | goto out; | ||
4486 | |||
4487 | do_nonblock: | ||
4488 | err = -EAGAIN; | ||
4489 | goto out; | ||
4490 | } | ||
4491 | |||
4492 | /* If socket sndbuf has changed, wake up all per association waiters. */ | ||
4493 | void sctp_write_space(struct sock *sk) | ||
4494 | { | ||
4495 | struct sctp_association *asoc; | ||
4496 | struct list_head *pos; | ||
4497 | |||
4498 | /* Wake up the tasks in each wait queue. */ | ||
4499 | list_for_each(pos, &((sctp_sk(sk))->ep->asocs)) { | ||
4500 | asoc = list_entry(pos, struct sctp_association, asocs); | ||
4501 | __sctp_write_space(asoc); | ||
4502 | } | ||
4503 | } | ||
4504 | |||
4505 | /* Is there any sndbuf space available on the socket? | ||
4506 | * | ||
4507 | * Note that wmem_queued is the sum of the send buffers on all of the | ||
4508 | * associations on the same socket. For a UDP-style socket with | ||
4509 | * multiple associations, it is possible for it to be "unwriteable" | ||
4510 | * prematurely. I assume that this is acceptable because | ||
4511 | * a premature "unwriteable" is better than an accidental "writeable" which | ||
4512 | * would cause an unwanted block under certain circumstances. For the 1-1 | ||
4513 | * UDP-style sockets or TCP-style sockets, this code should work. | ||
4514 | * - Daisy | ||
4515 | */ | ||
4516 | static int sctp_writeable(struct sock *sk) | ||
4517 | { | ||
4518 | int amt = 0; | ||
4519 | |||
4520 | amt = sk->sk_sndbuf - sk->sk_wmem_queued; | ||
4521 | if (amt < 0) | ||
4522 | amt = 0; | ||
4523 | return amt; | ||
4524 | } | ||
4525 | |||
4526 | /* Wait for an association to go into ESTABLISHED state. If timeout is 0, | ||
4527 | * returns immediately with EINPROGRESS. | ||
4528 | */ | ||
4529 | static int sctp_wait_for_connect(struct sctp_association *asoc, long *timeo_p) | ||
4530 | { | ||
4531 | struct sock *sk = asoc->base.sk; | ||
4532 | int err = 0; | ||
4533 | long current_timeo = *timeo_p; | ||
4534 | DEFINE_WAIT(wait); | ||
4535 | |||
4536 | SCTP_DEBUG_PRINTK("%s: asoc=%p, timeo=%ld\n", __FUNCTION__, asoc, | ||
4537 | (long)(*timeo_p)); | ||
4538 | |||
4539 | /* Increment the association's refcnt. */ | ||
4540 | sctp_association_hold(asoc); | ||
4541 | |||
4542 | for (;;) { | ||
4543 | prepare_to_wait_exclusive(&asoc->wait, &wait, | ||
4544 | TASK_INTERRUPTIBLE); | ||
4545 | if (!*timeo_p) | ||
4546 | goto do_nonblock; | ||
4547 | if (sk->sk_shutdown & RCV_SHUTDOWN) | ||
4548 | break; | ||
4549 | if (sk->sk_err || asoc->state >= SCTP_STATE_SHUTDOWN_PENDING || | ||
4550 | asoc->base.dead) | ||
4551 | goto do_error; | ||
4552 | if (signal_pending(current)) | ||
4553 | goto do_interrupted; | ||
4554 | |||
4555 | if (sctp_state(asoc, ESTABLISHED)) | ||
4556 | break; | ||
4557 | |||
4558 | /* Let another process have a go. Since we are going | ||
4559 | * to sleep anyway. | ||
4560 | */ | ||
4561 | sctp_release_sock(sk); | ||
4562 | current_timeo = schedule_timeout(current_timeo); | ||
4563 | sctp_lock_sock(sk); | ||
4564 | |||
4565 | *timeo_p = current_timeo; | ||
4566 | } | ||
4567 | |||
4568 | out: | ||
4569 | finish_wait(&asoc->wait, &wait); | ||
4570 | |||
4571 | /* Release the association's refcnt. */ | ||
4572 | sctp_association_put(asoc); | ||
4573 | |||
4574 | return err; | ||
4575 | |||
4576 | do_error: | ||
4577 | if (asoc->counters[SCTP_COUNTER_INIT_ERROR] + 1 >= | ||
4578 | asoc->max_init_attempts) | ||
4579 | err = -ETIMEDOUT; | ||
4580 | else | ||
4581 | err = -ECONNREFUSED; | ||
4582 | goto out; | ||
4583 | |||
4584 | do_interrupted: | ||
4585 | err = sock_intr_errno(*timeo_p); | ||
4586 | goto out; | ||
4587 | |||
4588 | do_nonblock: | ||
4589 | err = -EINPROGRESS; | ||
4590 | goto out; | ||
4591 | } | ||
4592 | |||
4593 | static int sctp_wait_for_accept(struct sock *sk, long timeo) | ||
4594 | { | ||
4595 | struct sctp_endpoint *ep; | ||
4596 | int err = 0; | ||
4597 | DEFINE_WAIT(wait); | ||
4598 | |||
4599 | ep = sctp_sk(sk)->ep; | ||
4600 | |||
4601 | |||
4602 | for (;;) { | ||
4603 | prepare_to_wait_exclusive(sk->sk_sleep, &wait, | ||
4604 | TASK_INTERRUPTIBLE); | ||
4605 | |||
4606 | if (list_empty(&ep->asocs)) { | ||
4607 | sctp_release_sock(sk); | ||
4608 | timeo = schedule_timeout(timeo); | ||
4609 | sctp_lock_sock(sk); | ||
4610 | } | ||
4611 | |||
4612 | err = -EINVAL; | ||
4613 | if (!sctp_sstate(sk, LISTENING)) | ||
4614 | break; | ||
4615 | |||
4616 | err = 0; | ||
4617 | if (!list_empty(&ep->asocs)) | ||
4618 | break; | ||
4619 | |||
4620 | err = sock_intr_errno(timeo); | ||
4621 | if (signal_pending(current)) | ||
4622 | break; | ||
4623 | |||
4624 | err = -EAGAIN; | ||
4625 | if (!timeo) | ||
4626 | break; | ||
4627 | } | ||
4628 | |||
4629 | finish_wait(sk->sk_sleep, &wait); | ||
4630 | |||
4631 | return err; | ||
4632 | } | ||
4633 | |||
4634 | void sctp_wait_for_close(struct sock *sk, long timeout) | ||
4635 | { | ||
4636 | DEFINE_WAIT(wait); | ||
4637 | |||
4638 | do { | ||
4639 | prepare_to_wait(sk->sk_sleep, &wait, TASK_INTERRUPTIBLE); | ||
4640 | if (list_empty(&sctp_sk(sk)->ep->asocs)) | ||
4641 | break; | ||
4642 | sctp_release_sock(sk); | ||
4643 | timeout = schedule_timeout(timeout); | ||
4644 | sctp_lock_sock(sk); | ||
4645 | } while (!signal_pending(current) && timeout); | ||
4646 | |||
4647 | finish_wait(sk->sk_sleep, &wait); | ||
4648 | } | ||
4649 | |||
4650 | /* Populate the fields of the newsk from the oldsk and migrate the assoc | ||
4651 | * and its messages to the newsk. | ||
4652 | */ | ||
4653 | static void sctp_sock_migrate(struct sock *oldsk, struct sock *newsk, | ||
4654 | struct sctp_association *assoc, | ||
4655 | sctp_socket_type_t type) | ||
4656 | { | ||
4657 | struct sctp_sock *oldsp = sctp_sk(oldsk); | ||
4658 | struct sctp_sock *newsp = sctp_sk(newsk); | ||
4659 | struct sctp_bind_bucket *pp; /* hash list port iterator */ | ||
4660 | struct sctp_endpoint *newep = newsp->ep; | ||
4661 | struct sk_buff *skb, *tmp; | ||
4662 | struct sctp_ulpevent *event; | ||
4663 | |||
4664 | /* Migrate socket buffer sizes and all the socket level options to the | ||
4665 | * new socket. | ||
4666 | */ | ||
4667 | newsk->sk_sndbuf = oldsk->sk_sndbuf; | ||
4668 | newsk->sk_rcvbuf = oldsk->sk_rcvbuf; | ||
4669 | /* Brute force copy old sctp opt. */ | ||
4670 | inet_sk_copy_descendant(newsk, oldsk); | ||
4671 | |||
4672 | /* Restore the ep value that was overwritten with the above structure | ||
4673 | * copy. | ||
4674 | */ | ||
4675 | newsp->ep = newep; | ||
4676 | newsp->hmac = NULL; | ||
4677 | |||
4678 | /* Hook this new socket in to the bind_hash list. */ | ||
4679 | pp = sctp_sk(oldsk)->bind_hash; | ||
4680 | sk_add_bind_node(newsk, &pp->owner); | ||
4681 | sctp_sk(newsk)->bind_hash = pp; | ||
4682 | inet_sk(newsk)->num = inet_sk(oldsk)->num; | ||
4683 | |||
4684 | /* Move any messages in the old socket's receive queue that are for the | ||
4685 | * peeled off association to the new socket's receive queue. | ||
4686 | */ | ||
4687 | sctp_skb_for_each(skb, &oldsk->sk_receive_queue, tmp) { | ||
4688 | event = sctp_skb2event(skb); | ||
4689 | if (event->asoc == assoc) { | ||
4690 | __skb_unlink(skb, skb->list); | ||
4691 | __skb_queue_tail(&newsk->sk_receive_queue, skb); | ||
4692 | } | ||
4693 | } | ||
4694 | |||
4695 | /* Clean up any messages pending delivery due to partial | ||
4696 | * delivery. Three cases: | ||
4697 | * 1) No partial deliver; no work. | ||
4698 | * 2) Peeling off partial delivery; keep pd_lobby in new pd_lobby. | ||
4699 | * 3) Peeling off non-partial delivery; move pd_lobby to receive_queue. | ||
4700 | */ | ||
4701 | skb_queue_head_init(&newsp->pd_lobby); | ||
4702 | sctp_sk(newsk)->pd_mode = assoc->ulpq.pd_mode; | ||
4703 | |||
4704 | if (sctp_sk(oldsk)->pd_mode) { | ||
4705 | struct sk_buff_head *queue; | ||
4706 | |||
4707 | /* Decide which queue to move pd_lobby skbs to. */ | ||
4708 | if (assoc->ulpq.pd_mode) { | ||
4709 | queue = &newsp->pd_lobby; | ||
4710 | } else | ||
4711 | queue = &newsk->sk_receive_queue; | ||
4712 | |||
4713 | /* Walk through the pd_lobby, looking for skbs that | ||
4714 | * need moved to the new socket. | ||
4715 | */ | ||
4716 | sctp_skb_for_each(skb, &oldsp->pd_lobby, tmp) { | ||
4717 | event = sctp_skb2event(skb); | ||
4718 | if (event->asoc == assoc) { | ||
4719 | __skb_unlink(skb, skb->list); | ||
4720 | __skb_queue_tail(queue, skb); | ||
4721 | } | ||
4722 | } | ||
4723 | |||
4724 | /* Clear up any skbs waiting for the partial | ||
4725 | * delivery to finish. | ||
4726 | */ | ||
4727 | if (assoc->ulpq.pd_mode) | ||
4728 | sctp_clear_pd(oldsk); | ||
4729 | |||
4730 | } | ||
4731 | |||
4732 | /* Set the type of socket to indicate that it is peeled off from the | ||
4733 | * original UDP-style socket or created with the accept() call on a | ||
4734 | * TCP-style socket.. | ||
4735 | */ | ||
4736 | newsp->type = type; | ||
4737 | |||
4738 | /* Migrate the association to the new socket. */ | ||
4739 | sctp_assoc_migrate(assoc, newsk); | ||
4740 | |||
4741 | /* If the association on the newsk is already closed before accept() | ||
4742 | * is called, set RCV_SHUTDOWN flag. | ||
4743 | */ | ||
4744 | if (sctp_state(assoc, CLOSED) && sctp_style(newsk, TCP)) | ||
4745 | newsk->sk_shutdown |= RCV_SHUTDOWN; | ||
4746 | |||
4747 | newsk->sk_state = SCTP_SS_ESTABLISHED; | ||
4748 | } | ||
4749 | |||
4750 | /* This proto struct describes the ULP interface for SCTP. */ | ||
4751 | struct proto sctp_prot = { | ||
4752 | .name = "SCTP", | ||
4753 | .owner = THIS_MODULE, | ||
4754 | .close = sctp_close, | ||
4755 | .connect = sctp_connect, | ||
4756 | .disconnect = sctp_disconnect, | ||
4757 | .accept = sctp_accept, | ||
4758 | .ioctl = sctp_ioctl, | ||
4759 | .init = sctp_init_sock, | ||
4760 | .destroy = sctp_destroy_sock, | ||
4761 | .shutdown = sctp_shutdown, | ||
4762 | .setsockopt = sctp_setsockopt, | ||
4763 | .getsockopt = sctp_getsockopt, | ||
4764 | .sendmsg = sctp_sendmsg, | ||
4765 | .recvmsg = sctp_recvmsg, | ||
4766 | .bind = sctp_bind, | ||
4767 | .backlog_rcv = sctp_backlog_rcv, | ||
4768 | .hash = sctp_hash, | ||
4769 | .unhash = sctp_unhash, | ||
4770 | .get_port = sctp_get_port, | ||
4771 | .obj_size = sizeof(struct sctp_sock), | ||
4772 | }; | ||
4773 | |||
4774 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
4775 | struct proto sctpv6_prot = { | ||
4776 | .name = "SCTPv6", | ||
4777 | .owner = THIS_MODULE, | ||
4778 | .close = sctp_close, | ||
4779 | .connect = sctp_connect, | ||
4780 | .disconnect = sctp_disconnect, | ||
4781 | .accept = sctp_accept, | ||
4782 | .ioctl = sctp_ioctl, | ||
4783 | .init = sctp_init_sock, | ||
4784 | .destroy = sctp_destroy_sock, | ||
4785 | .shutdown = sctp_shutdown, | ||
4786 | .setsockopt = sctp_setsockopt, | ||
4787 | .getsockopt = sctp_getsockopt, | ||
4788 | .sendmsg = sctp_sendmsg, | ||
4789 | .recvmsg = sctp_recvmsg, | ||
4790 | .bind = sctp_bind, | ||
4791 | .backlog_rcv = sctp_backlog_rcv, | ||
4792 | .hash = sctp_hash, | ||
4793 | .unhash = sctp_unhash, | ||
4794 | .get_port = sctp_get_port, | ||
4795 | .obj_size = sizeof(struct sctp6_sock), | ||
4796 | }; | ||
4797 | #endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */ | ||
diff --git a/net/sctp/ssnmap.c b/net/sctp/ssnmap.c new file mode 100644 index 000000000000..e627d2b451b6 --- /dev/null +++ b/net/sctp/ssnmap.c | |||
@@ -0,0 +1,131 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 2003 International Business Machines, Corp. | ||
3 | * | ||
4 | * This file is part of the SCTP kernel reference Implementation | ||
5 | * | ||
6 | * These functions manipulate sctp SSN tracker. | ||
7 | * | ||
8 | * The SCTP reference implementation is free software; | ||
9 | * you can redistribute it and/or modify it under the terms of | ||
10 | * the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * The SCTP reference implementation is distributed in the hope that it | ||
15 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
16 | * ************************ | ||
17 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
18 | * See the GNU General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with GNU CC; see the file COPYING. If not, write to | ||
22 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
23 | * Boston, MA 02111-1307, USA. | ||
24 | * | ||
25 | * Please send any bug reports or fixes you make to the | ||
26 | * email address(es): | ||
27 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
28 | * | ||
29 | * Or submit a bug report through the following website: | ||
30 | * http://www.sf.net/projects/lksctp | ||
31 | * | ||
32 | * Written or modified by: | ||
33 | * Jon Grimm <jgrimm@us.ibm.com> | ||
34 | * | ||
35 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
36 | * be incorporated into the next SCTP release. | ||
37 | */ | ||
38 | |||
39 | #include <linux/types.h> | ||
40 | #include <net/sctp/sctp.h> | ||
41 | #include <net/sctp/sm.h> | ||
42 | |||
43 | #define MAX_KMALLOC_SIZE 131072 | ||
44 | |||
45 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, | ||
46 | __u16 out); | ||
47 | |||
48 | /* Storage size needed for map includes 2 headers and then the | ||
49 | * specific needs of in or out streams. | ||
50 | */ | ||
51 | static inline size_t sctp_ssnmap_size(__u16 in, __u16 out) | ||
52 | { | ||
53 | return sizeof(struct sctp_ssnmap) + (in + out) * sizeof(__u16); | ||
54 | } | ||
55 | |||
56 | |||
57 | /* Create a new sctp_ssnmap. | ||
58 | * Allocate room to store at least 'len' contiguous TSNs. | ||
59 | */ | ||
60 | struct sctp_ssnmap *sctp_ssnmap_new(__u16 in, __u16 out, int gfp) | ||
61 | { | ||
62 | struct sctp_ssnmap *retval; | ||
63 | int size; | ||
64 | |||
65 | size = sctp_ssnmap_size(in, out); | ||
66 | if (size <= MAX_KMALLOC_SIZE) | ||
67 | retval = kmalloc(size, gfp); | ||
68 | else | ||
69 | retval = (struct sctp_ssnmap *) | ||
70 | __get_free_pages(gfp, get_order(size)); | ||
71 | if (!retval) | ||
72 | goto fail; | ||
73 | |||
74 | if (!sctp_ssnmap_init(retval, in, out)) | ||
75 | goto fail_map; | ||
76 | |||
77 | retval->malloced = 1; | ||
78 | SCTP_DBG_OBJCNT_INC(ssnmap); | ||
79 | |||
80 | return retval; | ||
81 | |||
82 | fail_map: | ||
83 | if (size <= MAX_KMALLOC_SIZE) | ||
84 | kfree(retval); | ||
85 | else | ||
86 | free_pages((unsigned long)retval, get_order(size)); | ||
87 | fail: | ||
88 | return NULL; | ||
89 | } | ||
90 | |||
91 | |||
92 | /* Initialize a block of memory as a ssnmap. */ | ||
93 | static struct sctp_ssnmap *sctp_ssnmap_init(struct sctp_ssnmap *map, __u16 in, | ||
94 | __u16 out) | ||
95 | { | ||
96 | memset(map, 0x00, sctp_ssnmap_size(in, out)); | ||
97 | |||
98 | /* Start 'in' stream just after the map header. */ | ||
99 | map->in.ssn = (__u16 *)&map[1]; | ||
100 | map->in.len = in; | ||
101 | |||
102 | /* Start 'out' stream just after 'in'. */ | ||
103 | map->out.ssn = &map->in.ssn[in]; | ||
104 | map->out.len = out; | ||
105 | |||
106 | return map; | ||
107 | } | ||
108 | |||
109 | /* Clear out the ssnmap streams. */ | ||
110 | void sctp_ssnmap_clear(struct sctp_ssnmap *map) | ||
111 | { | ||
112 | size_t size; | ||
113 | |||
114 | size = (map->in.len + map->out.len) * sizeof(__u16); | ||
115 | memset(map->in.ssn, 0x00, size); | ||
116 | } | ||
117 | |||
118 | /* Dispose of a ssnmap. */ | ||
119 | void sctp_ssnmap_free(struct sctp_ssnmap *map) | ||
120 | { | ||
121 | if (map && map->malloced) { | ||
122 | int size; | ||
123 | |||
124 | size = sctp_ssnmap_size(map->in.len, map->out.len); | ||
125 | if (size <= MAX_KMALLOC_SIZE) | ||
126 | kfree(map); | ||
127 | else | ||
128 | free_pages((unsigned long)map, get_order(size)); | ||
129 | SCTP_DBG_OBJCNT_DEC(ssnmap); | ||
130 | } | ||
131 | } | ||
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c new file mode 100644 index 000000000000..89fa20c73a5c --- /dev/null +++ b/net/sctp/sysctl.c | |||
@@ -0,0 +1,251 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2002, 2004 | ||
3 | * Copyright (c) 2002 Intel Corp. | ||
4 | * | ||
5 | * This file is part of the SCTP kernel reference Implementation | ||
6 | * | ||
7 | * Sysctl related interfaces for SCTP. | ||
8 | * | ||
9 | * The SCTP reference implementation is free software; | ||
10 | * you can redistribute it and/or modify it under the terms of | ||
11 | * the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2, or (at your option) | ||
13 | * any later version. | ||
14 | * | ||
15 | * The SCTP reference implementation is distributed in the hope that it | ||
16 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
17 | * ************************ | ||
18 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
19 | * See the GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with GNU CC; see the file COPYING. If not, write to | ||
23 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
24 | * Boston, MA 02111-1307, USA. | ||
25 | * | ||
26 | * Please send any bug reports or fixes you make to the | ||
27 | * email address(es): | ||
28 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
29 | * | ||
30 | * Or submit a bug report through the following website: | ||
31 | * http://www.sf.net/projects/lksctp | ||
32 | * | ||
33 | * Written or modified by: | ||
34 | * Mingqin Liu <liuming@us.ibm.com> | ||
35 | * Jon Grimm <jgrimm@us.ibm.com> | ||
36 | * Ardelle Fan <ardelle.fan@intel.com> | ||
37 | * Ryan Layer <rmlayer@us.ibm.com> | ||
38 | * Sridhar Samudrala <sri@us.ibm.com> | ||
39 | * | ||
40 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
41 | * be incorporated into the next SCTP release. | ||
42 | */ | ||
43 | |||
44 | #include <net/sctp/structs.h> | ||
45 | #include <linux/sysctl.h> | ||
46 | |||
47 | static ctl_handler sctp_sysctl_jiffies_ms; | ||
48 | static long rto_timer_min = 1; | ||
49 | static long rto_timer_max = 86400000; /* One day */ | ||
50 | |||
51 | static ctl_table sctp_table[] = { | ||
52 | { | ||
53 | .ctl_name = NET_SCTP_RTO_INITIAL, | ||
54 | .procname = "rto_initial", | ||
55 | .data = &sctp_rto_initial, | ||
56 | .maxlen = sizeof(long), | ||
57 | .mode = 0644, | ||
58 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
59 | .strategy = &sctp_sysctl_jiffies_ms, | ||
60 | .extra1 = &rto_timer_min, | ||
61 | .extra2 = &rto_timer_max | ||
62 | }, | ||
63 | { | ||
64 | .ctl_name = NET_SCTP_RTO_MIN, | ||
65 | .procname = "rto_min", | ||
66 | .data = &sctp_rto_min, | ||
67 | .maxlen = sizeof(long), | ||
68 | .mode = 0644, | ||
69 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
70 | .strategy = &sctp_sysctl_jiffies_ms, | ||
71 | .extra1 = &rto_timer_min, | ||
72 | .extra2 = &rto_timer_max | ||
73 | }, | ||
74 | { | ||
75 | .ctl_name = NET_SCTP_RTO_MAX, | ||
76 | .procname = "rto_max", | ||
77 | .data = &sctp_rto_max, | ||
78 | .maxlen = sizeof(long), | ||
79 | .mode = 0644, | ||
80 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
81 | .strategy = &sctp_sysctl_jiffies_ms, | ||
82 | .extra1 = &rto_timer_min, | ||
83 | .extra2 = &rto_timer_max | ||
84 | }, | ||
85 | { | ||
86 | .ctl_name = NET_SCTP_VALID_COOKIE_LIFE, | ||
87 | .procname = "valid_cookie_life", | ||
88 | .data = &sctp_valid_cookie_life, | ||
89 | .maxlen = sizeof(long), | ||
90 | .mode = 0644, | ||
91 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
92 | .strategy = &sctp_sysctl_jiffies_ms, | ||
93 | .extra1 = &rto_timer_min, | ||
94 | .extra2 = &rto_timer_max | ||
95 | }, | ||
96 | { | ||
97 | .ctl_name = NET_SCTP_MAX_BURST, | ||
98 | .procname = "max_burst", | ||
99 | .data = &sctp_max_burst, | ||
100 | .maxlen = sizeof(int), | ||
101 | .mode = 0644, | ||
102 | .proc_handler = &proc_dointvec | ||
103 | }, | ||
104 | { | ||
105 | .ctl_name = NET_SCTP_ASSOCIATION_MAX_RETRANS, | ||
106 | .procname = "association_max_retrans", | ||
107 | .data = &sctp_max_retrans_association, | ||
108 | .maxlen = sizeof(int), | ||
109 | .mode = 0644, | ||
110 | .proc_handler = &proc_dointvec | ||
111 | }, | ||
112 | { | ||
113 | .ctl_name = NET_SCTP_PATH_MAX_RETRANS, | ||
114 | .procname = "path_max_retrans", | ||
115 | .data = &sctp_max_retrans_path, | ||
116 | .maxlen = sizeof(int), | ||
117 | .mode = 0644, | ||
118 | .proc_handler = &proc_dointvec | ||
119 | }, | ||
120 | { | ||
121 | .ctl_name = NET_SCTP_MAX_INIT_RETRANSMITS, | ||
122 | .procname = "max_init_retransmits", | ||
123 | .data = &sctp_max_retrans_init, | ||
124 | .maxlen = sizeof(int), | ||
125 | .mode = 0644, | ||
126 | .proc_handler = &proc_dointvec | ||
127 | }, | ||
128 | { | ||
129 | .ctl_name = NET_SCTP_HB_INTERVAL, | ||
130 | .procname = "hb_interval", | ||
131 | .data = &sctp_hb_interval, | ||
132 | .maxlen = sizeof(long), | ||
133 | .mode = 0644, | ||
134 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
135 | .strategy = &sctp_sysctl_jiffies_ms, | ||
136 | .extra1 = &rto_timer_min, | ||
137 | .extra2 = &rto_timer_max | ||
138 | }, | ||
139 | { | ||
140 | .ctl_name = NET_SCTP_PRESERVE_ENABLE, | ||
141 | .procname = "cookie_preserve_enable", | ||
142 | .data = &sctp_cookie_preserve_enable, | ||
143 | .maxlen = sizeof(long), | ||
144 | .mode = 0644, | ||
145 | .proc_handler = &proc_doulongvec_ms_jiffies_minmax, | ||
146 | .strategy = &sctp_sysctl_jiffies_ms, | ||
147 | .extra1 = &rto_timer_min, | ||
148 | .extra2 = &rto_timer_max | ||
149 | }, | ||
150 | { | ||
151 | .ctl_name = NET_SCTP_RTO_ALPHA, | ||
152 | .procname = "rto_alpha_exp_divisor", | ||
153 | .data = &sctp_rto_alpha, | ||
154 | .maxlen = sizeof(int), | ||
155 | .mode = 0644, | ||
156 | .proc_handler = &proc_dointvec | ||
157 | }, | ||
158 | { | ||
159 | .ctl_name = NET_SCTP_RTO_BETA, | ||
160 | .procname = "rto_beta_exp_divisor", | ||
161 | .data = &sctp_rto_beta, | ||
162 | .maxlen = sizeof(int), | ||
163 | .mode = 0644, | ||
164 | .proc_handler = &proc_dointvec | ||
165 | }, | ||
166 | { | ||
167 | .ctl_name = NET_SCTP_ADDIP_ENABLE, | ||
168 | .procname = "addip_enable", | ||
169 | .data = &sctp_addip_enable, | ||
170 | .maxlen = sizeof(int), | ||
171 | .mode = 0644, | ||
172 | .proc_handler = &proc_dointvec | ||
173 | }, | ||
174 | { | ||
175 | .ctl_name = NET_SCTP_PRSCTP_ENABLE, | ||
176 | .procname = "prsctp_enable", | ||
177 | .data = &sctp_prsctp_enable, | ||
178 | .maxlen = sizeof(int), | ||
179 | .mode = 0644, | ||
180 | .proc_handler = &proc_dointvec | ||
181 | }, | ||
182 | { .ctl_name = 0 } | ||
183 | }; | ||
184 | |||
185 | static ctl_table sctp_net_table[] = { | ||
186 | { | ||
187 | .ctl_name = NET_SCTP, | ||
188 | .procname = "sctp", | ||
189 | .mode = 0555, | ||
190 | .child = sctp_table | ||
191 | }, | ||
192 | { .ctl_name = 0 } | ||
193 | }; | ||
194 | |||
195 | static ctl_table sctp_root_table[] = { | ||
196 | { | ||
197 | .ctl_name = CTL_NET, | ||
198 | .procname = "net", | ||
199 | .mode = 0555, | ||
200 | .child = sctp_net_table | ||
201 | }, | ||
202 | { .ctl_name = 0 } | ||
203 | }; | ||
204 | |||
205 | static struct ctl_table_header * sctp_sysctl_header; | ||
206 | |||
207 | /* Sysctl registration. */ | ||
208 | void sctp_sysctl_register(void) | ||
209 | { | ||
210 | sctp_sysctl_header = register_sysctl_table(sctp_root_table, 0); | ||
211 | } | ||
212 | |||
213 | /* Sysctl deregistration. */ | ||
214 | void sctp_sysctl_unregister(void) | ||
215 | { | ||
216 | unregister_sysctl_table(sctp_sysctl_header); | ||
217 | } | ||
218 | |||
219 | /* Strategy function to convert jiffies to milliseconds. */ | ||
220 | static int sctp_sysctl_jiffies_ms(ctl_table *table, int __user *name, int nlen, | ||
221 | void __user *oldval, size_t __user *oldlenp, | ||
222 | void __user *newval, size_t newlen, void **context) { | ||
223 | |||
224 | if (oldval) { | ||
225 | size_t olen; | ||
226 | |||
227 | if (oldlenp) { | ||
228 | if (get_user(olen, oldlenp)) | ||
229 | return -EFAULT; | ||
230 | |||
231 | if (olen != sizeof (int)) | ||
232 | return -EINVAL; | ||
233 | } | ||
234 | if (put_user((*(int *)(table->data) * 1000) / HZ, | ||
235 | (int __user *)oldval) || | ||
236 | (oldlenp && put_user(sizeof (int), oldlenp))) | ||
237 | return -EFAULT; | ||
238 | } | ||
239 | if (newval && newlen) { | ||
240 | int new; | ||
241 | |||
242 | if (newlen != sizeof (int)) | ||
243 | return -EINVAL; | ||
244 | |||
245 | if (get_user(new, (int __user *)newval)) | ||
246 | return -EFAULT; | ||
247 | |||
248 | *(int *)(table->data) = (new * HZ) / 1000; | ||
249 | } | ||
250 | return 1; | ||
251 | } | ||
diff --git a/net/sctp/transport.c b/net/sctp/transport.c new file mode 100644 index 000000000000..f30882e1e96a --- /dev/null +++ b/net/sctp/transport.c | |||
@@ -0,0 +1,514 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
3 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
4 | * Copyright (c) 2001-2003 International Business Machines Corp. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
7 | * | ||
8 | * This file is part of the SCTP kernel reference Implementation | ||
9 | * | ||
10 | * This module provides the abstraction for an SCTP tranport representing | ||
11 | * a remote transport address. For local transport addresses, we just use | ||
12 | * union sctp_addr. | ||
13 | * | ||
14 | * The SCTP reference implementation is free software; | ||
15 | * you can redistribute it and/or modify it under the terms of | ||
16 | * the GNU General Public License as published by | ||
17 | * the Free Software Foundation; either version 2, or (at your option) | ||
18 | * any later version. | ||
19 | * | ||
20 | * The SCTP reference implementation is distributed in the hope that it | ||
21 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
22 | * ************************ | ||
23 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
24 | * See the GNU General Public License for more details. | ||
25 | * | ||
26 | * You should have received a copy of the GNU General Public License | ||
27 | * along with GNU CC; see the file COPYING. If not, write to | ||
28 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
29 | * Boston, MA 02111-1307, USA. | ||
30 | * | ||
31 | * Please send any bug reports or fixes you make to the | ||
32 | * email address(es): | ||
33 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
34 | * | ||
35 | * Or submit a bug report through the following website: | ||
36 | * http://www.sf.net/projects/lksctp | ||
37 | * | ||
38 | * Written or modified by: | ||
39 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
40 | * Karl Knutson <karl@athena.chicago.il.us> | ||
41 | * Jon Grimm <jgrimm@us.ibm.com> | ||
42 | * Xingang Guo <xingang.guo@intel.com> | ||
43 | * Hui Huang <hui.huang@nokia.com> | ||
44 | * Sridhar Samudrala <sri@us.ibm.com> | ||
45 | * Ardelle Fan <ardelle.fan@intel.com> | ||
46 | * | ||
47 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
48 | * be incorporated into the next SCTP release. | ||
49 | */ | ||
50 | |||
51 | #include <linux/types.h> | ||
52 | #include <net/sctp/sctp.h> | ||
53 | #include <net/sctp/sm.h> | ||
54 | |||
55 | /* 1st Level Abstractions. */ | ||
56 | |||
57 | /* Initialize a new transport from provided memory. */ | ||
58 | static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer, | ||
59 | const union sctp_addr *addr, | ||
60 | int gfp) | ||
61 | { | ||
62 | /* Copy in the address. */ | ||
63 | peer->ipaddr = *addr; | ||
64 | peer->af_specific = sctp_get_af_specific(addr->sa.sa_family); | ||
65 | peer->asoc = NULL; | ||
66 | |||
67 | peer->dst = NULL; | ||
68 | memset(&peer->saddr, 0, sizeof(union sctp_addr)); | ||
69 | |||
70 | /* From 6.3.1 RTO Calculation: | ||
71 | * | ||
72 | * C1) Until an RTT measurement has been made for a packet sent to the | ||
73 | * given destination transport address, set RTO to the protocol | ||
74 | * parameter 'RTO.Initial'. | ||
75 | */ | ||
76 | peer->rtt = 0; | ||
77 | peer->rto = sctp_rto_initial; | ||
78 | peer->rttvar = 0; | ||
79 | peer->srtt = 0; | ||
80 | peer->rto_pending = 0; | ||
81 | |||
82 | peer->last_time_heard = jiffies; | ||
83 | peer->last_time_used = jiffies; | ||
84 | peer->last_time_ecne_reduced = jiffies; | ||
85 | |||
86 | peer->active = SCTP_ACTIVE; | ||
87 | peer->hb_allowed = 0; | ||
88 | |||
89 | /* Initialize the default path max_retrans. */ | ||
90 | peer->max_retrans = sctp_max_retrans_path; | ||
91 | peer->error_count = 0; | ||
92 | |||
93 | INIT_LIST_HEAD(&peer->transmitted); | ||
94 | INIT_LIST_HEAD(&peer->send_ready); | ||
95 | INIT_LIST_HEAD(&peer->transports); | ||
96 | |||
97 | /* Set up the retransmission timer. */ | ||
98 | init_timer(&peer->T3_rtx_timer); | ||
99 | peer->T3_rtx_timer.function = sctp_generate_t3_rtx_event; | ||
100 | peer->T3_rtx_timer.data = (unsigned long)peer; | ||
101 | |||
102 | /* Set up the heartbeat timer. */ | ||
103 | init_timer(&peer->hb_timer); | ||
104 | peer->hb_interval = SCTP_DEFAULT_TIMEOUT_HEARTBEAT; | ||
105 | peer->hb_timer.function = sctp_generate_heartbeat_event; | ||
106 | peer->hb_timer.data = (unsigned long)peer; | ||
107 | |||
108 | atomic_set(&peer->refcnt, 1); | ||
109 | peer->dead = 0; | ||
110 | |||
111 | peer->malloced = 0; | ||
112 | |||
113 | /* Initialize the state information for SFR-CACC */ | ||
114 | peer->cacc.changeover_active = 0; | ||
115 | peer->cacc.cycling_changeover = 0; | ||
116 | peer->cacc.next_tsn_at_change = 0; | ||
117 | peer->cacc.cacc_saw_newack = 0; | ||
118 | |||
119 | return peer; | ||
120 | } | ||
121 | |||
122 | /* Allocate and initialize a new transport. */ | ||
123 | struct sctp_transport *sctp_transport_new(const union sctp_addr *addr, int gfp) | ||
124 | { | ||
125 | struct sctp_transport *transport; | ||
126 | |||
127 | transport = t_new(struct sctp_transport, gfp); | ||
128 | if (!transport) | ||
129 | goto fail; | ||
130 | |||
131 | if (!sctp_transport_init(transport, addr, gfp)) | ||
132 | goto fail_init; | ||
133 | |||
134 | transport->malloced = 1; | ||
135 | SCTP_DBG_OBJCNT_INC(transport); | ||
136 | |||
137 | return transport; | ||
138 | |||
139 | fail_init: | ||
140 | kfree(transport); | ||
141 | |||
142 | fail: | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | /* This transport is no longer needed. Free up if possible, or | ||
147 | * delay until it last reference count. | ||
148 | */ | ||
149 | void sctp_transport_free(struct sctp_transport *transport) | ||
150 | { | ||
151 | transport->dead = 1; | ||
152 | |||
153 | /* Try to delete the heartbeat timer. */ | ||
154 | if (del_timer(&transport->hb_timer)) | ||
155 | sctp_transport_put(transport); | ||
156 | |||
157 | /* Delete the T3_rtx timer if it's active. | ||
158 | * There is no point in not doing this now and letting | ||
159 | * structure hang around in memory since we know | ||
160 | * the tranport is going away. | ||
161 | */ | ||
162 | if (timer_pending(&transport->T3_rtx_timer) && | ||
163 | del_timer(&transport->T3_rtx_timer)) | ||
164 | sctp_transport_put(transport); | ||
165 | |||
166 | |||
167 | sctp_transport_put(transport); | ||
168 | } | ||
169 | |||
170 | /* Destroy the transport data structure. | ||
171 | * Assumes there are no more users of this structure. | ||
172 | */ | ||
173 | static void sctp_transport_destroy(struct sctp_transport *transport) | ||
174 | { | ||
175 | SCTP_ASSERT(transport->dead, "Transport is not dead", return); | ||
176 | |||
177 | if (transport->asoc) | ||
178 | sctp_association_put(transport->asoc); | ||
179 | |||
180 | sctp_packet_free(&transport->packet); | ||
181 | |||
182 | dst_release(transport->dst); | ||
183 | kfree(transport); | ||
184 | SCTP_DBG_OBJCNT_DEC(transport); | ||
185 | } | ||
186 | |||
187 | /* Start T3_rtx timer if it is not already running and update the heartbeat | ||
188 | * timer. This routine is called every time a DATA chunk is sent. | ||
189 | */ | ||
190 | void sctp_transport_reset_timers(struct sctp_transport *transport) | ||
191 | { | ||
192 | /* RFC 2960 6.3.2 Retransmission Timer Rules | ||
193 | * | ||
194 | * R1) Every time a DATA chunk is sent to any address(including a | ||
195 | * retransmission), if the T3-rtx timer of that address is not running | ||
196 | * start it running so that it will expire after the RTO of that | ||
197 | * address. | ||
198 | */ | ||
199 | |||
200 | if (!timer_pending(&transport->T3_rtx_timer)) | ||
201 | if (!mod_timer(&transport->T3_rtx_timer, | ||
202 | jiffies + transport->rto)) | ||
203 | sctp_transport_hold(transport); | ||
204 | |||
205 | /* When a data chunk is sent, reset the heartbeat interval. */ | ||
206 | if (!mod_timer(&transport->hb_timer, | ||
207 | sctp_transport_timeout(transport))) | ||
208 | sctp_transport_hold(transport); | ||
209 | } | ||
210 | |||
211 | /* This transport has been assigned to an association. | ||
212 | * Initialize fields from the association or from the sock itself. | ||
213 | * Register the reference count in the association. | ||
214 | */ | ||
215 | void sctp_transport_set_owner(struct sctp_transport *transport, | ||
216 | struct sctp_association *asoc) | ||
217 | { | ||
218 | transport->asoc = asoc; | ||
219 | sctp_association_hold(asoc); | ||
220 | } | ||
221 | |||
222 | /* Initialize the pmtu of a transport. */ | ||
223 | void sctp_transport_pmtu(struct sctp_transport *transport) | ||
224 | { | ||
225 | struct dst_entry *dst; | ||
226 | |||
227 | dst = transport->af_specific->get_dst(NULL, &transport->ipaddr, NULL); | ||
228 | |||
229 | if (dst) { | ||
230 | transport->pmtu = dst_mtu(dst); | ||
231 | dst_release(dst); | ||
232 | } else | ||
233 | transport->pmtu = SCTP_DEFAULT_MAXSEGMENT; | ||
234 | } | ||
235 | |||
236 | /* Caches the dst entry and source address for a transport's destination | ||
237 | * address. | ||
238 | */ | ||
239 | void sctp_transport_route(struct sctp_transport *transport, | ||
240 | union sctp_addr *saddr, struct sctp_sock *opt) | ||
241 | { | ||
242 | struct sctp_association *asoc = transport->asoc; | ||
243 | struct sctp_af *af = transport->af_specific; | ||
244 | union sctp_addr *daddr = &transport->ipaddr; | ||
245 | struct dst_entry *dst; | ||
246 | |||
247 | dst = af->get_dst(asoc, daddr, saddr); | ||
248 | |||
249 | if (saddr) | ||
250 | memcpy(&transport->saddr, saddr, sizeof(union sctp_addr)); | ||
251 | else | ||
252 | af->get_saddr(asoc, dst, daddr, &transport->saddr); | ||
253 | |||
254 | transport->dst = dst; | ||
255 | if (dst) { | ||
256 | transport->pmtu = dst_mtu(dst); | ||
257 | |||
258 | /* Initialize sk->sk_rcv_saddr, if the transport is the | ||
259 | * association's active path for getsockname(). | ||
260 | */ | ||
261 | if (asoc && (transport == asoc->peer.active_path)) | ||
262 | af->to_sk_saddr(&transport->saddr, asoc->base.sk); | ||
263 | } else | ||
264 | transport->pmtu = SCTP_DEFAULT_MAXSEGMENT; | ||
265 | } | ||
266 | |||
267 | /* Hold a reference to a transport. */ | ||
268 | void sctp_transport_hold(struct sctp_transport *transport) | ||
269 | { | ||
270 | atomic_inc(&transport->refcnt); | ||
271 | } | ||
272 | |||
273 | /* Release a reference to a transport and clean up | ||
274 | * if there are no more references. | ||
275 | */ | ||
276 | void sctp_transport_put(struct sctp_transport *transport) | ||
277 | { | ||
278 | if (atomic_dec_and_test(&transport->refcnt)) | ||
279 | sctp_transport_destroy(transport); | ||
280 | } | ||
281 | |||
282 | /* Update transport's RTO based on the newly calculated RTT. */ | ||
283 | void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt) | ||
284 | { | ||
285 | /* Check for valid transport. */ | ||
286 | SCTP_ASSERT(tp, "NULL transport", return); | ||
287 | |||
288 | /* We should not be doing any RTO updates unless rto_pending is set. */ | ||
289 | SCTP_ASSERT(tp->rto_pending, "rto_pending not set", return); | ||
290 | |||
291 | if (tp->rttvar || tp->srtt) { | ||
292 | /* 6.3.1 C3) When a new RTT measurement R' is made, set | ||
293 | * RTTVAR <- (1 - RTO.Beta) * RTTVAR + RTO.Beta * |SRTT - R'| | ||
294 | * SRTT <- (1 - RTO.Alpha) * SRTT + RTO.Alpha * R' | ||
295 | */ | ||
296 | |||
297 | /* Note: The above algorithm has been rewritten to | ||
298 | * express rto_beta and rto_alpha as inverse powers | ||
299 | * of two. | ||
300 | * For example, assuming the default value of RTO.Alpha of | ||
301 | * 1/8, rto_alpha would be expressed as 3. | ||
302 | */ | ||
303 | tp->rttvar = tp->rttvar - (tp->rttvar >> sctp_rto_beta) | ||
304 | + ((abs(tp->srtt - rtt)) >> sctp_rto_beta); | ||
305 | tp->srtt = tp->srtt - (tp->srtt >> sctp_rto_alpha) | ||
306 | + (rtt >> sctp_rto_alpha); | ||
307 | } else { | ||
308 | /* 6.3.1 C2) When the first RTT measurement R is made, set | ||
309 | * SRTT <- R, RTTVAR <- R/2. | ||
310 | */ | ||
311 | tp->srtt = rtt; | ||
312 | tp->rttvar = rtt >> 1; | ||
313 | } | ||
314 | |||
315 | /* 6.3.1 G1) Whenever RTTVAR is computed, if RTTVAR = 0, then | ||
316 | * adjust RTTVAR <- G, where G is the CLOCK GRANULARITY. | ||
317 | */ | ||
318 | if (tp->rttvar == 0) | ||
319 | tp->rttvar = SCTP_CLOCK_GRANULARITY; | ||
320 | |||
321 | /* 6.3.1 C3) After the computation, update RTO <- SRTT + 4 * RTTVAR. */ | ||
322 | tp->rto = tp->srtt + (tp->rttvar << 2); | ||
323 | |||
324 | /* 6.3.1 C6) Whenever RTO is computed, if it is less than RTO.Min | ||
325 | * seconds then it is rounded up to RTO.Min seconds. | ||
326 | */ | ||
327 | if (tp->rto < tp->asoc->rto_min) | ||
328 | tp->rto = tp->asoc->rto_min; | ||
329 | |||
330 | /* 6.3.1 C7) A maximum value may be placed on RTO provided it is | ||
331 | * at least RTO.max seconds. | ||
332 | */ | ||
333 | if (tp->rto > tp->asoc->rto_max) | ||
334 | tp->rto = tp->asoc->rto_max; | ||
335 | |||
336 | tp->rtt = rtt; | ||
337 | |||
338 | /* Reset rto_pending so that a new RTT measurement is started when a | ||
339 | * new data chunk is sent. | ||
340 | */ | ||
341 | tp->rto_pending = 0; | ||
342 | |||
343 | SCTP_DEBUG_PRINTK("%s: transport: %p, rtt: %d, srtt: %d " | ||
344 | "rttvar: %d, rto: %d\n", __FUNCTION__, | ||
345 | tp, rtt, tp->srtt, tp->rttvar, tp->rto); | ||
346 | } | ||
347 | |||
348 | /* This routine updates the transport's cwnd and partial_bytes_acked | ||
349 | * parameters based on the bytes acked in the received SACK. | ||
350 | */ | ||
351 | void sctp_transport_raise_cwnd(struct sctp_transport *transport, | ||
352 | __u32 sack_ctsn, __u32 bytes_acked) | ||
353 | { | ||
354 | __u32 cwnd, ssthresh, flight_size, pba, pmtu; | ||
355 | |||
356 | cwnd = transport->cwnd; | ||
357 | flight_size = transport->flight_size; | ||
358 | |||
359 | /* The appropriate cwnd increase algorithm is performed if, and only | ||
360 | * if the cumulative TSN has advanced and the congestion window is | ||
361 | * being fully utilized. | ||
362 | */ | ||
363 | if ((transport->asoc->ctsn_ack_point >= sack_ctsn) || | ||
364 | (flight_size < cwnd)) | ||
365 | return; | ||
366 | |||
367 | ssthresh = transport->ssthresh; | ||
368 | pba = transport->partial_bytes_acked; | ||
369 | pmtu = transport->asoc->pmtu; | ||
370 | |||
371 | if (cwnd <= ssthresh) { | ||
372 | /* RFC 2960 7.2.1, sctpimpguide-05 2.14.2 When cwnd is less | ||
373 | * than or equal to ssthresh an SCTP endpoint MUST use the | ||
374 | * slow start algorithm to increase cwnd only if the current | ||
375 | * congestion window is being fully utilized and an incoming | ||
376 | * SACK advances the Cumulative TSN Ack Point. Only when these | ||
377 | * two conditions are met can the cwnd be increased otherwise | ||
378 | * the cwnd MUST not be increased. If these conditions are met | ||
379 | * then cwnd MUST be increased by at most the lesser of | ||
380 | * 1) the total size of the previously outstanding DATA | ||
381 | * chunk(s) acknowledged, and 2) the destination's path MTU. | ||
382 | */ | ||
383 | if (bytes_acked > pmtu) | ||
384 | cwnd += pmtu; | ||
385 | else | ||
386 | cwnd += bytes_acked; | ||
387 | SCTP_DEBUG_PRINTK("%s: SLOW START: transport: %p, " | ||
388 | "bytes_acked: %d, cwnd: %d, ssthresh: %d, " | ||
389 | "flight_size: %d, pba: %d\n", | ||
390 | __FUNCTION__, | ||
391 | transport, bytes_acked, cwnd, | ||
392 | ssthresh, flight_size, pba); | ||
393 | } else { | ||
394 | /* RFC 2960 7.2.2 Whenever cwnd is greater than ssthresh, | ||
395 | * upon each SACK arrival that advances the Cumulative TSN Ack | ||
396 | * Point, increase partial_bytes_acked by the total number of | ||
397 | * bytes of all new chunks acknowledged in that SACK including | ||
398 | * chunks acknowledged by the new Cumulative TSN Ack and by | ||
399 | * Gap Ack Blocks. | ||
400 | * | ||
401 | * When partial_bytes_acked is equal to or greater than cwnd | ||
402 | * and before the arrival of the SACK the sender had cwnd or | ||
403 | * more bytes of data outstanding (i.e., before arrival of the | ||
404 | * SACK, flightsize was greater than or equal to cwnd), | ||
405 | * increase cwnd by MTU, and reset partial_bytes_acked to | ||
406 | * (partial_bytes_acked - cwnd). | ||
407 | */ | ||
408 | pba += bytes_acked; | ||
409 | if (pba >= cwnd) { | ||
410 | cwnd += pmtu; | ||
411 | pba = ((cwnd < pba) ? (pba - cwnd) : 0); | ||
412 | } | ||
413 | SCTP_DEBUG_PRINTK("%s: CONGESTION AVOIDANCE: " | ||
414 | "transport: %p, bytes_acked: %d, cwnd: %d, " | ||
415 | "ssthresh: %d, flight_size: %d, pba: %d\n", | ||
416 | __FUNCTION__, | ||
417 | transport, bytes_acked, cwnd, | ||
418 | ssthresh, flight_size, pba); | ||
419 | } | ||
420 | |||
421 | transport->cwnd = cwnd; | ||
422 | transport->partial_bytes_acked = pba; | ||
423 | } | ||
424 | |||
425 | /* This routine is used to lower the transport's cwnd when congestion is | ||
426 | * detected. | ||
427 | */ | ||
428 | void sctp_transport_lower_cwnd(struct sctp_transport *transport, | ||
429 | sctp_lower_cwnd_t reason) | ||
430 | { | ||
431 | switch (reason) { | ||
432 | case SCTP_LOWER_CWND_T3_RTX: | ||
433 | /* RFC 2960 Section 7.2.3, sctpimpguide | ||
434 | * When the T3-rtx timer expires on an address, SCTP should | ||
435 | * perform slow start by: | ||
436 | * ssthresh = max(cwnd/2, 4*MTU) | ||
437 | * cwnd = 1*MTU | ||
438 | * partial_bytes_acked = 0 | ||
439 | */ | ||
440 | transport->ssthresh = max(transport->cwnd/2, | ||
441 | 4*transport->asoc->pmtu); | ||
442 | transport->cwnd = transport->asoc->pmtu; | ||
443 | break; | ||
444 | |||
445 | case SCTP_LOWER_CWND_FAST_RTX: | ||
446 | /* RFC 2960 7.2.4 Adjust the ssthresh and cwnd of the | ||
447 | * destination address(es) to which the missing DATA chunks | ||
448 | * were last sent, according to the formula described in | ||
449 | * Section 7.2.3. | ||
450 | * | ||
451 | * RFC 2960 7.2.3, sctpimpguide Upon detection of packet | ||
452 | * losses from SACK (see Section 7.2.4), An endpoint | ||
453 | * should do the following: | ||
454 | * ssthresh = max(cwnd/2, 4*MTU) | ||
455 | * cwnd = ssthresh | ||
456 | * partial_bytes_acked = 0 | ||
457 | */ | ||
458 | transport->ssthresh = max(transport->cwnd/2, | ||
459 | 4*transport->asoc->pmtu); | ||
460 | transport->cwnd = transport->ssthresh; | ||
461 | break; | ||
462 | |||
463 | case SCTP_LOWER_CWND_ECNE: | ||
464 | /* RFC 2481 Section 6.1.2. | ||
465 | * If the sender receives an ECN-Echo ACK packet | ||
466 | * then the sender knows that congestion was encountered in the | ||
467 | * network on the path from the sender to the receiver. The | ||
468 | * indication of congestion should be treated just as a | ||
469 | * congestion loss in non-ECN Capable TCP. That is, the TCP | ||
470 | * source halves the congestion window "cwnd" and reduces the | ||
471 | * slow start threshold "ssthresh". | ||
472 | * A critical condition is that TCP does not react to | ||
473 | * congestion indications more than once every window of | ||
474 | * data (or more loosely more than once every round-trip time). | ||
475 | */ | ||
476 | if ((jiffies - transport->last_time_ecne_reduced) > | ||
477 | transport->rtt) { | ||
478 | transport->ssthresh = max(transport->cwnd/2, | ||
479 | 4*transport->asoc->pmtu); | ||
480 | transport->cwnd = transport->ssthresh; | ||
481 | transport->last_time_ecne_reduced = jiffies; | ||
482 | } | ||
483 | break; | ||
484 | |||
485 | case SCTP_LOWER_CWND_INACTIVE: | ||
486 | /* RFC 2960 Section 7.2.1, sctpimpguide | ||
487 | * When the endpoint does not transmit data on a given | ||
488 | * transport address, the cwnd of the transport address | ||
489 | * should be adjusted to max(cwnd/2, 4*MTU) per RTO. | ||
490 | * NOTE: Although the draft recommends that this check needs | ||
491 | * to be done every RTO interval, we do it every hearbeat | ||
492 | * interval. | ||
493 | */ | ||
494 | if ((jiffies - transport->last_time_used) > transport->rto) | ||
495 | transport->cwnd = max(transport->cwnd/2, | ||
496 | 4*transport->asoc->pmtu); | ||
497 | break; | ||
498 | }; | ||
499 | |||
500 | transport->partial_bytes_acked = 0; | ||
501 | SCTP_DEBUG_PRINTK("%s: transport: %p reason: %d cwnd: " | ||
502 | "%d ssthresh: %d\n", __FUNCTION__, | ||
503 | transport, reason, | ||
504 | transport->cwnd, transport->ssthresh); | ||
505 | } | ||
506 | |||
507 | /* What is the next timeout value for this transport? */ | ||
508 | unsigned long sctp_transport_timeout(struct sctp_transport *t) | ||
509 | { | ||
510 | unsigned long timeout; | ||
511 | timeout = t->hb_interval + t->rto + sctp_jitter(t->rto); | ||
512 | timeout += jiffies; | ||
513 | return timeout; | ||
514 | } | ||
diff --git a/net/sctp/tsnmap.c b/net/sctp/tsnmap.c new file mode 100644 index 000000000000..ac4fae161bc7 --- /dev/null +++ b/net/sctp/tsnmap.c | |||
@@ -0,0 +1,417 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * | ||
7 | * This file is part of the SCTP kernel reference Implementation | ||
8 | * | ||
9 | * These functions manipulate sctp tsn mapping array. | ||
10 | * | ||
11 | * The SCTP reference implementation is free software; | ||
12 | * you can redistribute it and/or modify it under the terms of | ||
13 | * the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * The SCTP reference implementation is distributed in the hope that it | ||
18 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
19 | * ************************ | ||
20 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | * See the GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with GNU CC; see the file COPYING. If not, write to | ||
25 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | * Please send any bug reports or fixes you make to the | ||
29 | * email address(es): | ||
30 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
31 | * | ||
32 | * Or submit a bug report through the following website: | ||
33 | * http://www.sf.net/projects/lksctp | ||
34 | * | ||
35 | * Written or modified by: | ||
36 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
37 | * Jon Grimm <jgrimm@us.ibm.com> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Sridhar Samudrala <sri@us.ibm.com> | ||
40 | * | ||
41 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
42 | * be incorporated into the next SCTP release. | ||
43 | */ | ||
44 | |||
45 | #include <linux/types.h> | ||
46 | #include <net/sctp/sctp.h> | ||
47 | #include <net/sctp/sm.h> | ||
48 | |||
49 | static void sctp_tsnmap_update(struct sctp_tsnmap *map); | ||
50 | static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off, | ||
51 | __u16 len, __u16 base, | ||
52 | int *started, __u16 *start, | ||
53 | int *ended, __u16 *end); | ||
54 | |||
55 | /* Initialize a block of memory as a tsnmap. */ | ||
56 | struct sctp_tsnmap *sctp_tsnmap_init(struct sctp_tsnmap *map, __u16 len, | ||
57 | __u32 initial_tsn) | ||
58 | { | ||
59 | map->tsn_map = map->raw_map; | ||
60 | map->overflow_map = map->tsn_map + len; | ||
61 | map->len = len; | ||
62 | |||
63 | /* Clear out a TSN ack status. */ | ||
64 | memset(map->tsn_map, 0x00, map->len + map->len); | ||
65 | |||
66 | /* Keep track of TSNs represented by tsn_map. */ | ||
67 | map->base_tsn = initial_tsn; | ||
68 | map->overflow_tsn = initial_tsn + map->len; | ||
69 | map->cumulative_tsn_ack_point = initial_tsn - 1; | ||
70 | map->max_tsn_seen = map->cumulative_tsn_ack_point; | ||
71 | map->malloced = 0; | ||
72 | map->num_dup_tsns = 0; | ||
73 | |||
74 | return map; | ||
75 | } | ||
76 | |||
77 | /* Test the tracking state of this TSN. | ||
78 | * Returns: | ||
79 | * 0 if the TSN has not yet been seen | ||
80 | * >0 if the TSN has been seen (duplicate) | ||
81 | * <0 if the TSN is invalid (too large to track) | ||
82 | */ | ||
83 | int sctp_tsnmap_check(const struct sctp_tsnmap *map, __u32 tsn) | ||
84 | { | ||
85 | __s32 gap; | ||
86 | int dup; | ||
87 | |||
88 | /* Calculate the index into the mapping arrays. */ | ||
89 | gap = tsn - map->base_tsn; | ||
90 | |||
91 | /* Verify that we can hold this TSN. */ | ||
92 | if (gap >= (/* base */ map->len + /* overflow */ map->len)) { | ||
93 | dup = -1; | ||
94 | goto out; | ||
95 | } | ||
96 | |||
97 | /* Honk if we've already seen this TSN. | ||
98 | * We have three cases: | ||
99 | * 1. The TSN is ancient or belongs to a previous tsn_map. | ||
100 | * 2. The TSN is already marked in the tsn_map. | ||
101 | * 3. The TSN is already marked in the tsn_map_overflow. | ||
102 | */ | ||
103 | if (gap < 0 || | ||
104 | (gap < map->len && map->tsn_map[gap]) || | ||
105 | (gap >= map->len && map->overflow_map[gap - map->len])) | ||
106 | dup = 1; | ||
107 | else | ||
108 | dup = 0; | ||
109 | |||
110 | out: | ||
111 | return dup; | ||
112 | } | ||
113 | |||
114 | |||
115 | /* Mark this TSN as seen. */ | ||
116 | void sctp_tsnmap_mark(struct sctp_tsnmap *map, __u32 tsn) | ||
117 | { | ||
118 | __s32 gap; | ||
119 | |||
120 | /* Vacuously mark any TSN which precedes the map base or | ||
121 | * exceeds the end of the map. | ||
122 | */ | ||
123 | if (TSN_lt(tsn, map->base_tsn)) | ||
124 | return; | ||
125 | if (!TSN_lt(tsn, map->base_tsn + map->len + map->len)) | ||
126 | return; | ||
127 | |||
128 | /* Bump the max. */ | ||
129 | if (TSN_lt(map->max_tsn_seen, tsn)) | ||
130 | map->max_tsn_seen = tsn; | ||
131 | |||
132 | /* Assert: TSN is in range. */ | ||
133 | gap = tsn - map->base_tsn; | ||
134 | |||
135 | /* Mark the TSN as received. */ | ||
136 | if (gap < map->len) | ||
137 | map->tsn_map[gap]++; | ||
138 | else | ||
139 | map->overflow_map[gap - map->len]++; | ||
140 | |||
141 | /* Go fixup any internal TSN mapping variables including | ||
142 | * cumulative_tsn_ack_point. | ||
143 | */ | ||
144 | sctp_tsnmap_update(map); | ||
145 | } | ||
146 | |||
147 | |||
148 | /* Initialize a Gap Ack Block iterator from memory being provided. */ | ||
149 | SCTP_STATIC void sctp_tsnmap_iter_init(const struct sctp_tsnmap *map, | ||
150 | struct sctp_tsnmap_iter *iter) | ||
151 | { | ||
152 | /* Only start looking one past the Cumulative TSN Ack Point. */ | ||
153 | iter->start = map->cumulative_tsn_ack_point + 1; | ||
154 | } | ||
155 | |||
156 | /* Get the next Gap Ack Blocks. Returns 0 if there was not another block | ||
157 | * to get. | ||
158 | */ | ||
159 | SCTP_STATIC int sctp_tsnmap_next_gap_ack(const struct sctp_tsnmap *map, | ||
160 | struct sctp_tsnmap_iter *iter, | ||
161 | __u16 *start, __u16 *end) | ||
162 | { | ||
163 | int started, ended; | ||
164 | __u16 _start, _end, offset; | ||
165 | |||
166 | /* We haven't found a gap yet. */ | ||
167 | started = ended = 0; | ||
168 | |||
169 | /* If there are no more gap acks possible, get out fast. */ | ||
170 | if (TSN_lte(map->max_tsn_seen, iter->start)) | ||
171 | return 0; | ||
172 | |||
173 | /* Search the first mapping array. */ | ||
174 | if (iter->start - map->base_tsn < map->len) { | ||
175 | |||
176 | offset = iter->start - map->base_tsn; | ||
177 | sctp_tsnmap_find_gap_ack(map->tsn_map, offset, map->len, 0, | ||
178 | &started, &_start, &ended, &_end); | ||
179 | } | ||
180 | |||
181 | /* Do we need to check the overflow map? */ | ||
182 | if (!ended) { | ||
183 | /* Fix up where we'd like to start searching in the | ||
184 | * overflow map. | ||
185 | */ | ||
186 | if (iter->start - map->base_tsn < map->len) | ||
187 | offset = 0; | ||
188 | else | ||
189 | offset = iter->start - map->base_tsn - map->len; | ||
190 | |||
191 | /* Search the overflow map. */ | ||
192 | sctp_tsnmap_find_gap_ack(map->overflow_map, | ||
193 | offset, | ||
194 | map->len, | ||
195 | map->len, | ||
196 | &started, &_start, | ||
197 | &ended, &_end); | ||
198 | } | ||
199 | |||
200 | /* The Gap Ack Block happens to end at the end of the | ||
201 | * overflow map. | ||
202 | */ | ||
203 | if (started && !ended) { | ||
204 | ended++; | ||
205 | _end = map->len + map->len - 1; | ||
206 | } | ||
207 | |||
208 | /* If we found a Gap Ack Block, return the start and end and | ||
209 | * bump the iterator forward. | ||
210 | */ | ||
211 | if (ended) { | ||
212 | /* Fix up the start and end based on the | ||
213 | * Cumulative TSN Ack offset into the map. | ||
214 | */ | ||
215 | int gap = map->cumulative_tsn_ack_point - | ||
216 | map->base_tsn; | ||
217 | |||
218 | *start = _start - gap; | ||
219 | *end = _end - gap; | ||
220 | |||
221 | /* Move the iterator forward. */ | ||
222 | iter->start = map->cumulative_tsn_ack_point + *end + 1; | ||
223 | } | ||
224 | |||
225 | return ended; | ||
226 | } | ||
227 | |||
228 | /* Mark this and any lower TSN as seen. */ | ||
229 | void sctp_tsnmap_skip(struct sctp_tsnmap *map, __u32 tsn) | ||
230 | { | ||
231 | __s32 gap; | ||
232 | |||
233 | /* Vacuously mark any TSN which precedes the map base or | ||
234 | * exceeds the end of the map. | ||
235 | */ | ||
236 | if (TSN_lt(tsn, map->base_tsn)) | ||
237 | return; | ||
238 | if (!TSN_lt(tsn, map->base_tsn + map->len + map->len)) | ||
239 | return; | ||
240 | |||
241 | /* Bump the max. */ | ||
242 | if (TSN_lt(map->max_tsn_seen, tsn)) | ||
243 | map->max_tsn_seen = tsn; | ||
244 | |||
245 | /* Assert: TSN is in range. */ | ||
246 | gap = tsn - map->base_tsn + 1; | ||
247 | |||
248 | /* Mark the TSNs as received. */ | ||
249 | if (gap <= map->len) | ||
250 | memset(map->tsn_map, 0x01, gap); | ||
251 | else { | ||
252 | memset(map->tsn_map, 0x01, map->len); | ||
253 | memset(map->overflow_map, 0x01, (gap - map->len)); | ||
254 | } | ||
255 | |||
256 | /* Go fixup any internal TSN mapping variables including | ||
257 | * cumulative_tsn_ack_point. | ||
258 | */ | ||
259 | sctp_tsnmap_update(map); | ||
260 | } | ||
261 | |||
262 | /******************************************************************** | ||
263 | * 2nd Level Abstractions | ||
264 | ********************************************************************/ | ||
265 | |||
266 | /* This private helper function updates the tsnmap buffers and | ||
267 | * the Cumulative TSN Ack Point. | ||
268 | */ | ||
269 | static void sctp_tsnmap_update(struct sctp_tsnmap *map) | ||
270 | { | ||
271 | __u32 ctsn; | ||
272 | |||
273 | ctsn = map->cumulative_tsn_ack_point; | ||
274 | do { | ||
275 | ctsn++; | ||
276 | if (ctsn == map->overflow_tsn) { | ||
277 | /* Now tsn_map must have been all '1's, | ||
278 | * so we swap the map and check the overflow table | ||
279 | */ | ||
280 | __u8 *tmp = map->tsn_map; | ||
281 | memset(tmp, 0, map->len); | ||
282 | map->tsn_map = map->overflow_map; | ||
283 | map->overflow_map = tmp; | ||
284 | |||
285 | /* Update the tsn_map boundaries. */ | ||
286 | map->base_tsn += map->len; | ||
287 | map->overflow_tsn += map->len; | ||
288 | } | ||
289 | } while (map->tsn_map[ctsn - map->base_tsn]); | ||
290 | |||
291 | map->cumulative_tsn_ack_point = ctsn - 1; /* Back up one. */ | ||
292 | } | ||
293 | |||
294 | /* How many data chunks are we missing from our peer? | ||
295 | */ | ||
296 | __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map) | ||
297 | { | ||
298 | __u32 cum_tsn = map->cumulative_tsn_ack_point; | ||
299 | __u32 max_tsn = map->max_tsn_seen; | ||
300 | __u32 base_tsn = map->base_tsn; | ||
301 | __u16 pending_data; | ||
302 | __s32 gap, start, end, i; | ||
303 | |||
304 | pending_data = max_tsn - cum_tsn; | ||
305 | gap = max_tsn - base_tsn; | ||
306 | |||
307 | if (gap <= 0 || gap >= (map->len + map->len)) | ||
308 | goto out; | ||
309 | |||
310 | start = ((cum_tsn >= base_tsn) ? (cum_tsn - base_tsn + 1) : 0); | ||
311 | end = ((gap > map->len ) ? map->len : gap + 1); | ||
312 | |||
313 | for (i = start; i < end; i++) { | ||
314 | if (map->tsn_map[i]) | ||
315 | pending_data--; | ||
316 | } | ||
317 | |||
318 | if (gap >= map->len) { | ||
319 | start = 0; | ||
320 | end = gap - map->len + 1; | ||
321 | for (i = start; i < end; i++) { | ||
322 | if (map->overflow_map[i]) | ||
323 | pending_data--; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | out: | ||
328 | return pending_data; | ||
329 | } | ||
330 | |||
331 | /* This is a private helper for finding Gap Ack Blocks. It searches a | ||
332 | * single array for the start and end of a Gap Ack Block. | ||
333 | * | ||
334 | * The flags "started" and "ended" tell is if we found the beginning | ||
335 | * or (respectively) the end of a Gap Ack Block. | ||
336 | */ | ||
337 | static void sctp_tsnmap_find_gap_ack(__u8 *map, __u16 off, | ||
338 | __u16 len, __u16 base, | ||
339 | int *started, __u16 *start, | ||
340 | int *ended, __u16 *end) | ||
341 | { | ||
342 | int i = off; | ||
343 | |||
344 | /* Look through the entire array, but break out | ||
345 | * early if we have found the end of the Gap Ack Block. | ||
346 | */ | ||
347 | |||
348 | /* Also, stop looking past the maximum TSN seen. */ | ||
349 | |||
350 | /* Look for the start. */ | ||
351 | if (!(*started)) { | ||
352 | for (; i < len; i++) { | ||
353 | if (map[i]) { | ||
354 | (*started)++; | ||
355 | *start = base + i; | ||
356 | break; | ||
357 | } | ||
358 | } | ||
359 | } | ||
360 | |||
361 | /* Look for the end. */ | ||
362 | if (*started) { | ||
363 | /* We have found the start, let's find the | ||
364 | * end. If we find the end, break out. | ||
365 | */ | ||
366 | for (; i < len; i++) { | ||
367 | if (!map[i]) { | ||
368 | (*ended)++; | ||
369 | *end = base + i - 1; | ||
370 | break; | ||
371 | } | ||
372 | } | ||
373 | } | ||
374 | } | ||
375 | |||
376 | /* Renege that we have seen a TSN. */ | ||
377 | void sctp_tsnmap_renege(struct sctp_tsnmap *map, __u32 tsn) | ||
378 | { | ||
379 | __s32 gap; | ||
380 | |||
381 | if (TSN_lt(tsn, map->base_tsn)) | ||
382 | return; | ||
383 | if (!TSN_lt(tsn, map->base_tsn + map->len + map->len)) | ||
384 | return; | ||
385 | |||
386 | /* Assert: TSN is in range. */ | ||
387 | gap = tsn - map->base_tsn; | ||
388 | |||
389 | /* Pretend we never saw the TSN. */ | ||
390 | if (gap < map->len) | ||
391 | map->tsn_map[gap] = 0; | ||
392 | else | ||
393 | map->overflow_map[gap - map->len] = 0; | ||
394 | } | ||
395 | |||
396 | /* How many gap ack blocks do we have recorded? */ | ||
397 | __u16 sctp_tsnmap_num_gabs(struct sctp_tsnmap *map) | ||
398 | { | ||
399 | struct sctp_tsnmap_iter iter; | ||
400 | int gabs = 0; | ||
401 | |||
402 | /* Refresh the gap ack information. */ | ||
403 | if (sctp_tsnmap_has_gap(map)) { | ||
404 | sctp_tsnmap_iter_init(map, &iter); | ||
405 | while (sctp_tsnmap_next_gap_ack(map, &iter, | ||
406 | &map->gabs[gabs].start, | ||
407 | &map->gabs[gabs].end)) { | ||
408 | |||
409 | map->gabs[gabs].start = htons(map->gabs[gabs].start); | ||
410 | map->gabs[gabs].end = htons(map->gabs[gabs].end); | ||
411 | gabs++; | ||
412 | if (gabs >= SCTP_MAX_GABS) | ||
413 | break; | ||
414 | } | ||
415 | } | ||
416 | return gabs; | ||
417 | } | ||
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c new file mode 100644 index 000000000000..17d0ff534735 --- /dev/null +++ b/net/sctp/ulpevent.c | |||
@@ -0,0 +1,942 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * These functions manipulate an sctp event. The struct ulpevent is used | ||
10 | * to carry notifications and data to the ULP (sockets). | ||
11 | * The SCTP reference implementation is free software; | ||
12 | * you can redistribute it and/or modify it under the terms of | ||
13 | * the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * The SCTP reference implementation is distributed in the hope that it | ||
18 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
19 | * ************************ | ||
20 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | * See the GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with GNU CC; see the file COPYING. If not, write to | ||
25 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | * Please send any bug reports or fixes you make to the | ||
29 | * email address(es): | ||
30 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
31 | * | ||
32 | * Or submit a bug report through the following website: | ||
33 | * http://www.sf.net/projects/lksctp | ||
34 | * | ||
35 | * Written or modified by: | ||
36 | * Jon Grimm <jgrimm@us.ibm.com> | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Ardelle Fan <ardelle.fan@intel.com> | ||
39 | * Sridhar Samudrala <sri@us.ibm.com> | ||
40 | * | ||
41 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
42 | * be incorporated into the next SCTP release. | ||
43 | */ | ||
44 | |||
45 | #include <linux/types.h> | ||
46 | #include <linux/skbuff.h> | ||
47 | #include <net/sctp/structs.h> | ||
48 | #include <net/sctp/sctp.h> | ||
49 | #include <net/sctp/sm.h> | ||
50 | |||
51 | static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | ||
52 | struct sctp_association *asoc); | ||
53 | static void sctp_ulpevent_release_data(struct sctp_ulpevent *event); | ||
54 | |||
55 | /* Stub skb destructor. */ | ||
56 | static void sctp_stub_rfree(struct sk_buff *skb) | ||
57 | { | ||
58 | /* WARNING: This function is just a warning not to use the | ||
59 | * skb destructor. If the skb is shared, we may get the destructor | ||
60 | * callback on some processor that does not own the sock_lock. This | ||
61 | * was occuring with PACKET socket applications that were monitoring | ||
62 | * our skbs. We can't take the sock_lock, because we can't risk | ||
63 | * recursing if we do really own the sock lock. Instead, do all | ||
64 | * of our rwnd manipulation while we own the sock_lock outright. | ||
65 | */ | ||
66 | } | ||
67 | |||
68 | /* Initialize an ULP event from an given skb. */ | ||
69 | SCTP_STATIC void sctp_ulpevent_init(struct sctp_ulpevent *event, int msg_flags) | ||
70 | { | ||
71 | memset(event, 0, sizeof(struct sctp_ulpevent)); | ||
72 | event->msg_flags = msg_flags; | ||
73 | } | ||
74 | |||
75 | /* Create a new sctp_ulpevent. */ | ||
76 | SCTP_STATIC struct sctp_ulpevent *sctp_ulpevent_new(int size, int msg_flags, | ||
77 | int gfp) | ||
78 | { | ||
79 | struct sctp_ulpevent *event; | ||
80 | struct sk_buff *skb; | ||
81 | |||
82 | skb = alloc_skb(size, gfp); | ||
83 | if (!skb) | ||
84 | goto fail; | ||
85 | |||
86 | event = sctp_skb2event(skb); | ||
87 | sctp_ulpevent_init(event, msg_flags); | ||
88 | |||
89 | return event; | ||
90 | |||
91 | fail: | ||
92 | return NULL; | ||
93 | } | ||
94 | |||
95 | /* Is this a MSG_NOTIFICATION? */ | ||
96 | int sctp_ulpevent_is_notification(const struct sctp_ulpevent *event) | ||
97 | { | ||
98 | return MSG_NOTIFICATION == (event->msg_flags & MSG_NOTIFICATION); | ||
99 | } | ||
100 | |||
101 | /* Hold the association in case the msg_name needs read out of | ||
102 | * the association. | ||
103 | */ | ||
104 | static inline void sctp_ulpevent_set_owner(struct sctp_ulpevent *event, | ||
105 | const struct sctp_association *asoc) | ||
106 | { | ||
107 | struct sk_buff *skb; | ||
108 | |||
109 | /* Cast away the const, as we are just wanting to | ||
110 | * bump the reference count. | ||
111 | */ | ||
112 | sctp_association_hold((struct sctp_association *)asoc); | ||
113 | skb = sctp_event2skb(event); | ||
114 | skb->sk = asoc->base.sk; | ||
115 | event->asoc = (struct sctp_association *)asoc; | ||
116 | skb->destructor = sctp_stub_rfree; | ||
117 | } | ||
118 | |||
119 | /* A simple destructor to give up the reference to the association. */ | ||
120 | static inline void sctp_ulpevent_release_owner(struct sctp_ulpevent *event) | ||
121 | { | ||
122 | sctp_association_put(event->asoc); | ||
123 | } | ||
124 | |||
125 | /* Create and initialize an SCTP_ASSOC_CHANGE event. | ||
126 | * | ||
127 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
128 | * | ||
129 | * Communication notifications inform the ULP that an SCTP association | ||
130 | * has either begun or ended. The identifier for a new association is | ||
131 | * provided by this notification. | ||
132 | * | ||
133 | * Note: There is no field checking here. If a field is unused it will be | ||
134 | * zero'd out. | ||
135 | */ | ||
136 | struct sctp_ulpevent *sctp_ulpevent_make_assoc_change( | ||
137 | const struct sctp_association *asoc, | ||
138 | __u16 flags, __u16 state, __u16 error, __u16 outbound, | ||
139 | __u16 inbound, int gfp) | ||
140 | { | ||
141 | struct sctp_ulpevent *event; | ||
142 | struct sctp_assoc_change *sac; | ||
143 | struct sk_buff *skb; | ||
144 | |||
145 | event = sctp_ulpevent_new(sizeof(struct sctp_assoc_change), | ||
146 | MSG_NOTIFICATION, gfp); | ||
147 | if (!event) | ||
148 | goto fail; | ||
149 | skb = sctp_event2skb(event); | ||
150 | sac = (struct sctp_assoc_change *) | ||
151 | skb_put(skb, sizeof(struct sctp_assoc_change)); | ||
152 | |||
153 | /* Socket Extensions for SCTP | ||
154 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
155 | * | ||
156 | * sac_type: | ||
157 | * It should be SCTP_ASSOC_CHANGE. | ||
158 | */ | ||
159 | sac->sac_type = SCTP_ASSOC_CHANGE; | ||
160 | |||
161 | /* Socket Extensions for SCTP | ||
162 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
163 | * | ||
164 | * sac_state: 32 bits (signed integer) | ||
165 | * This field holds one of a number of values that communicate the | ||
166 | * event that happened to the association. | ||
167 | */ | ||
168 | sac->sac_state = state; | ||
169 | |||
170 | /* Socket Extensions for SCTP | ||
171 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
172 | * | ||
173 | * sac_flags: 16 bits (unsigned integer) | ||
174 | * Currently unused. | ||
175 | */ | ||
176 | sac->sac_flags = 0; | ||
177 | |||
178 | /* Socket Extensions for SCTP | ||
179 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
180 | * | ||
181 | * sac_length: sizeof (__u32) | ||
182 | * This field is the total length of the notification data, including | ||
183 | * the notification header. | ||
184 | */ | ||
185 | sac->sac_length = sizeof(struct sctp_assoc_change); | ||
186 | |||
187 | /* Socket Extensions for SCTP | ||
188 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
189 | * | ||
190 | * sac_error: 32 bits (signed integer) | ||
191 | * | ||
192 | * If the state was reached due to a error condition (e.g. | ||
193 | * COMMUNICATION_LOST) any relevant error information is available in | ||
194 | * this field. This corresponds to the protocol error codes defined in | ||
195 | * [SCTP]. | ||
196 | */ | ||
197 | sac->sac_error = error; | ||
198 | |||
199 | /* Socket Extensions for SCTP | ||
200 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
201 | * | ||
202 | * sac_outbound_streams: 16 bits (unsigned integer) | ||
203 | * sac_inbound_streams: 16 bits (unsigned integer) | ||
204 | * | ||
205 | * The maximum number of streams allowed in each direction are | ||
206 | * available in sac_outbound_streams and sac_inbound streams. | ||
207 | */ | ||
208 | sac->sac_outbound_streams = outbound; | ||
209 | sac->sac_inbound_streams = inbound; | ||
210 | |||
211 | /* Socket Extensions for SCTP | ||
212 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
213 | * | ||
214 | * sac_assoc_id: sizeof (sctp_assoc_t) | ||
215 | * | ||
216 | * The association id field, holds the identifier for the association. | ||
217 | * All notifications for a given association have the same association | ||
218 | * identifier. For TCP style socket, this field is ignored. | ||
219 | */ | ||
220 | sctp_ulpevent_set_owner(event, asoc); | ||
221 | sac->sac_assoc_id = sctp_assoc2id(asoc); | ||
222 | |||
223 | return event; | ||
224 | |||
225 | fail: | ||
226 | return NULL; | ||
227 | } | ||
228 | |||
229 | /* Create and initialize an SCTP_PEER_ADDR_CHANGE event. | ||
230 | * | ||
231 | * Socket Extensions for SCTP - draft-01 | ||
232 | * 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
233 | * | ||
234 | * When a destination address on a multi-homed peer encounters a change | ||
235 | * an interface details event is sent. | ||
236 | */ | ||
237 | struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change( | ||
238 | const struct sctp_association *asoc, | ||
239 | const struct sockaddr_storage *aaddr, | ||
240 | int flags, int state, int error, int gfp) | ||
241 | { | ||
242 | struct sctp_ulpevent *event; | ||
243 | struct sctp_paddr_change *spc; | ||
244 | struct sk_buff *skb; | ||
245 | |||
246 | event = sctp_ulpevent_new(sizeof(struct sctp_paddr_change), | ||
247 | MSG_NOTIFICATION, gfp); | ||
248 | if (!event) | ||
249 | goto fail; | ||
250 | |||
251 | skb = sctp_event2skb(event); | ||
252 | spc = (struct sctp_paddr_change *) | ||
253 | skb_put(skb, sizeof(struct sctp_paddr_change)); | ||
254 | |||
255 | /* Sockets API Extensions for SCTP | ||
256 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
257 | * | ||
258 | * spc_type: | ||
259 | * | ||
260 | * It should be SCTP_PEER_ADDR_CHANGE. | ||
261 | */ | ||
262 | spc->spc_type = SCTP_PEER_ADDR_CHANGE; | ||
263 | |||
264 | /* Sockets API Extensions for SCTP | ||
265 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
266 | * | ||
267 | * spc_length: sizeof (__u32) | ||
268 | * | ||
269 | * This field is the total length of the notification data, including | ||
270 | * the notification header. | ||
271 | */ | ||
272 | spc->spc_length = sizeof(struct sctp_paddr_change); | ||
273 | |||
274 | /* Sockets API Extensions for SCTP | ||
275 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
276 | * | ||
277 | * spc_flags: 16 bits (unsigned integer) | ||
278 | * Currently unused. | ||
279 | */ | ||
280 | spc->spc_flags = 0; | ||
281 | |||
282 | /* Sockets API Extensions for SCTP | ||
283 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
284 | * | ||
285 | * spc_state: 32 bits (signed integer) | ||
286 | * | ||
287 | * This field holds one of a number of values that communicate the | ||
288 | * event that happened to the address. | ||
289 | */ | ||
290 | spc->spc_state = state; | ||
291 | |||
292 | /* Sockets API Extensions for SCTP | ||
293 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
294 | * | ||
295 | * spc_error: 32 bits (signed integer) | ||
296 | * | ||
297 | * If the state was reached due to any error condition (e.g. | ||
298 | * ADDRESS_UNREACHABLE) any relevant error information is available in | ||
299 | * this field. | ||
300 | */ | ||
301 | spc->spc_error = error; | ||
302 | |||
303 | /* Socket Extensions for SCTP | ||
304 | * 5.3.1.1 SCTP_ASSOC_CHANGE | ||
305 | * | ||
306 | * spc_assoc_id: sizeof (sctp_assoc_t) | ||
307 | * | ||
308 | * The association id field, holds the identifier for the association. | ||
309 | * All notifications for a given association have the same association | ||
310 | * identifier. For TCP style socket, this field is ignored. | ||
311 | */ | ||
312 | sctp_ulpevent_set_owner(event, asoc); | ||
313 | spc->spc_assoc_id = sctp_assoc2id(asoc); | ||
314 | |||
315 | /* Sockets API Extensions for SCTP | ||
316 | * Section 5.3.1.2 SCTP_PEER_ADDR_CHANGE | ||
317 | * | ||
318 | * spc_aaddr: sizeof (struct sockaddr_storage) | ||
319 | * | ||
320 | * The affected address field, holds the remote peer's address that is | ||
321 | * encountering the change of state. | ||
322 | */ | ||
323 | memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage)); | ||
324 | |||
325 | /* Map ipv4 address into v4-mapped-on-v6 address. */ | ||
326 | sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_v4map( | ||
327 | sctp_sk(asoc->base.sk), | ||
328 | (union sctp_addr *)&spc->spc_aaddr); | ||
329 | |||
330 | return event; | ||
331 | |||
332 | fail: | ||
333 | return NULL; | ||
334 | } | ||
335 | |||
336 | /* Create and initialize an SCTP_REMOTE_ERROR notification. | ||
337 | * | ||
338 | * Note: This assumes that the chunk->skb->data already points to the | ||
339 | * operation error payload. | ||
340 | * | ||
341 | * Socket Extensions for SCTP - draft-01 | ||
342 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
343 | * | ||
344 | * A remote peer may send an Operational Error message to its peer. | ||
345 | * This message indicates a variety of error conditions on an | ||
346 | * association. The entire error TLV as it appears on the wire is | ||
347 | * included in a SCTP_REMOTE_ERROR event. Please refer to the SCTP | ||
348 | * specification [SCTP] and any extensions for a list of possible | ||
349 | * error formats. | ||
350 | */ | ||
351 | struct sctp_ulpevent *sctp_ulpevent_make_remote_error( | ||
352 | const struct sctp_association *asoc, struct sctp_chunk *chunk, | ||
353 | __u16 flags, int gfp) | ||
354 | { | ||
355 | struct sctp_ulpevent *event; | ||
356 | struct sctp_remote_error *sre; | ||
357 | struct sk_buff *skb; | ||
358 | sctp_errhdr_t *ch; | ||
359 | __u16 cause; | ||
360 | int elen; | ||
361 | |||
362 | ch = (sctp_errhdr_t *)(chunk->skb->data); | ||
363 | cause = ch->cause; | ||
364 | elen = WORD_ROUND(ntohs(ch->length)) - sizeof(sctp_errhdr_t); | ||
365 | |||
366 | /* Pull off the ERROR header. */ | ||
367 | skb_pull(chunk->skb, sizeof(sctp_errhdr_t)); | ||
368 | |||
369 | /* Copy the skb to a new skb with room for us to prepend | ||
370 | * notification with. | ||
371 | */ | ||
372 | skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), | ||
373 | 0, gfp); | ||
374 | |||
375 | /* Pull off the rest of the cause TLV from the chunk. */ | ||
376 | skb_pull(chunk->skb, elen); | ||
377 | if (!skb) | ||
378 | goto fail; | ||
379 | |||
380 | /* Embed the event fields inside the cloned skb. */ | ||
381 | event = sctp_skb2event(skb); | ||
382 | sctp_ulpevent_init(event, MSG_NOTIFICATION); | ||
383 | |||
384 | sre = (struct sctp_remote_error *) | ||
385 | skb_push(skb, sizeof(struct sctp_remote_error)); | ||
386 | |||
387 | /* Trim the buffer to the right length. */ | ||
388 | skb_trim(skb, sizeof(struct sctp_remote_error) + elen); | ||
389 | |||
390 | /* Socket Extensions for SCTP | ||
391 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
392 | * | ||
393 | * sre_type: | ||
394 | * It should be SCTP_REMOTE_ERROR. | ||
395 | */ | ||
396 | sre->sre_type = SCTP_REMOTE_ERROR; | ||
397 | |||
398 | /* | ||
399 | * Socket Extensions for SCTP | ||
400 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
401 | * | ||
402 | * sre_flags: 16 bits (unsigned integer) | ||
403 | * Currently unused. | ||
404 | */ | ||
405 | sre->sre_flags = 0; | ||
406 | |||
407 | /* Socket Extensions for SCTP | ||
408 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
409 | * | ||
410 | * sre_length: sizeof (__u32) | ||
411 | * | ||
412 | * This field is the total length of the notification data, | ||
413 | * including the notification header. | ||
414 | */ | ||
415 | sre->sre_length = skb->len; | ||
416 | |||
417 | /* Socket Extensions for SCTP | ||
418 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
419 | * | ||
420 | * sre_error: 16 bits (unsigned integer) | ||
421 | * This value represents one of the Operational Error causes defined in | ||
422 | * the SCTP specification, in network byte order. | ||
423 | */ | ||
424 | sre->sre_error = cause; | ||
425 | |||
426 | /* Socket Extensions for SCTP | ||
427 | * 5.3.1.3 SCTP_REMOTE_ERROR | ||
428 | * | ||
429 | * sre_assoc_id: sizeof (sctp_assoc_t) | ||
430 | * | ||
431 | * The association id field, holds the identifier for the association. | ||
432 | * All notifications for a given association have the same association | ||
433 | * identifier. For TCP style socket, this field is ignored. | ||
434 | */ | ||
435 | sctp_ulpevent_set_owner(event, asoc); | ||
436 | sre->sre_assoc_id = sctp_assoc2id(asoc); | ||
437 | |||
438 | return event; | ||
439 | |||
440 | fail: | ||
441 | return NULL; | ||
442 | } | ||
443 | |||
444 | /* Create and initialize a SCTP_SEND_FAILED notification. | ||
445 | * | ||
446 | * Socket Extensions for SCTP - draft-01 | ||
447 | * 5.3.1.4 SCTP_SEND_FAILED | ||
448 | */ | ||
449 | struct sctp_ulpevent *sctp_ulpevent_make_send_failed( | ||
450 | const struct sctp_association *asoc, struct sctp_chunk *chunk, | ||
451 | __u16 flags, __u32 error, int gfp) | ||
452 | { | ||
453 | struct sctp_ulpevent *event; | ||
454 | struct sctp_send_failed *ssf; | ||
455 | struct sk_buff *skb; | ||
456 | |||
457 | /* Pull off any padding. */ | ||
458 | int len = ntohs(chunk->chunk_hdr->length); | ||
459 | |||
460 | /* Make skb with more room so we can prepend notification. */ | ||
461 | skb = skb_copy_expand(chunk->skb, | ||
462 | sizeof(struct sctp_send_failed), /* headroom */ | ||
463 | 0, /* tailroom */ | ||
464 | gfp); | ||
465 | if (!skb) | ||
466 | goto fail; | ||
467 | |||
468 | /* Pull off the common chunk header and DATA header. */ | ||
469 | skb_pull(skb, sizeof(struct sctp_data_chunk)); | ||
470 | len -= sizeof(struct sctp_data_chunk); | ||
471 | |||
472 | /* Embed the event fields inside the cloned skb. */ | ||
473 | event = sctp_skb2event(skb); | ||
474 | sctp_ulpevent_init(event, MSG_NOTIFICATION); | ||
475 | |||
476 | ssf = (struct sctp_send_failed *) | ||
477 | skb_push(skb, sizeof(struct sctp_send_failed)); | ||
478 | |||
479 | /* Socket Extensions for SCTP | ||
480 | * 5.3.1.4 SCTP_SEND_FAILED | ||
481 | * | ||
482 | * ssf_type: | ||
483 | * It should be SCTP_SEND_FAILED. | ||
484 | */ | ||
485 | ssf->ssf_type = SCTP_SEND_FAILED; | ||
486 | |||
487 | /* Socket Extensions for SCTP | ||
488 | * 5.3.1.4 SCTP_SEND_FAILED | ||
489 | * | ||
490 | * ssf_flags: 16 bits (unsigned integer) | ||
491 | * The flag value will take one of the following values | ||
492 | * | ||
493 | * SCTP_DATA_UNSENT - Indicates that the data was never put on | ||
494 | * the wire. | ||
495 | * | ||
496 | * SCTP_DATA_SENT - Indicates that the data was put on the wire. | ||
497 | * Note that this does not necessarily mean that the | ||
498 | * data was (or was not) successfully delivered. | ||
499 | */ | ||
500 | ssf->ssf_flags = flags; | ||
501 | |||
502 | /* Socket Extensions for SCTP | ||
503 | * 5.3.1.4 SCTP_SEND_FAILED | ||
504 | * | ||
505 | * ssf_length: sizeof (__u32) | ||
506 | * This field is the total length of the notification data, including | ||
507 | * the notification header. | ||
508 | */ | ||
509 | ssf->ssf_length = sizeof(struct sctp_send_failed) + len; | ||
510 | skb_trim(skb, ssf->ssf_length); | ||
511 | |||
512 | /* Socket Extensions for SCTP | ||
513 | * 5.3.1.4 SCTP_SEND_FAILED | ||
514 | * | ||
515 | * ssf_error: 16 bits (unsigned integer) | ||
516 | * This value represents the reason why the send failed, and if set, | ||
517 | * will be a SCTP protocol error code as defined in [SCTP] section | ||
518 | * 3.3.10. | ||
519 | */ | ||
520 | ssf->ssf_error = error; | ||
521 | |||
522 | /* Socket Extensions for SCTP | ||
523 | * 5.3.1.4 SCTP_SEND_FAILED | ||
524 | * | ||
525 | * ssf_info: sizeof (struct sctp_sndrcvinfo) | ||
526 | * The original send information associated with the undelivered | ||
527 | * message. | ||
528 | */ | ||
529 | memcpy(&ssf->ssf_info, &chunk->sinfo, sizeof(struct sctp_sndrcvinfo)); | ||
530 | |||
531 | /* Per TSVWG discussion with Randy. Allow the application to | ||
532 | * ressemble a fragmented message. | ||
533 | */ | ||
534 | ssf->ssf_info.sinfo_flags = chunk->chunk_hdr->flags; | ||
535 | |||
536 | /* Socket Extensions for SCTP | ||
537 | * 5.3.1.4 SCTP_SEND_FAILED | ||
538 | * | ||
539 | * ssf_assoc_id: sizeof (sctp_assoc_t) | ||
540 | * The association id field, sf_assoc_id, holds the identifier for the | ||
541 | * association. All notifications for a given association have the | ||
542 | * same association identifier. For TCP style socket, this field is | ||
543 | * ignored. | ||
544 | */ | ||
545 | sctp_ulpevent_set_owner(event, asoc); | ||
546 | ssf->ssf_assoc_id = sctp_assoc2id(asoc); | ||
547 | return event; | ||
548 | |||
549 | fail: | ||
550 | return NULL; | ||
551 | } | ||
552 | |||
553 | /* Create and initialize a SCTP_SHUTDOWN_EVENT notification. | ||
554 | * | ||
555 | * Socket Extensions for SCTP - draft-01 | ||
556 | * 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
557 | */ | ||
558 | struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event( | ||
559 | const struct sctp_association *asoc, | ||
560 | __u16 flags, int gfp) | ||
561 | { | ||
562 | struct sctp_ulpevent *event; | ||
563 | struct sctp_shutdown_event *sse; | ||
564 | struct sk_buff *skb; | ||
565 | |||
566 | event = sctp_ulpevent_new(sizeof(struct sctp_shutdown_event), | ||
567 | MSG_NOTIFICATION, gfp); | ||
568 | if (!event) | ||
569 | goto fail; | ||
570 | |||
571 | skb = sctp_event2skb(event); | ||
572 | sse = (struct sctp_shutdown_event *) | ||
573 | skb_put(skb, sizeof(struct sctp_shutdown_event)); | ||
574 | |||
575 | /* Socket Extensions for SCTP | ||
576 | * 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
577 | * | ||
578 | * sse_type | ||
579 | * It should be SCTP_SHUTDOWN_EVENT | ||
580 | */ | ||
581 | sse->sse_type = SCTP_SHUTDOWN_EVENT; | ||
582 | |||
583 | /* Socket Extensions for SCTP | ||
584 | * 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
585 | * | ||
586 | * sse_flags: 16 bits (unsigned integer) | ||
587 | * Currently unused. | ||
588 | */ | ||
589 | sse->sse_flags = 0; | ||
590 | |||
591 | /* Socket Extensions for SCTP | ||
592 | * 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
593 | * | ||
594 | * sse_length: sizeof (__u32) | ||
595 | * This field is the total length of the notification data, including | ||
596 | * the notification header. | ||
597 | */ | ||
598 | sse->sse_length = sizeof(struct sctp_shutdown_event); | ||
599 | |||
600 | /* Socket Extensions for SCTP | ||
601 | * 5.3.1.5 SCTP_SHUTDOWN_EVENT | ||
602 | * | ||
603 | * sse_assoc_id: sizeof (sctp_assoc_t) | ||
604 | * The association id field, holds the identifier for the association. | ||
605 | * All notifications for a given association have the same association | ||
606 | * identifier. For TCP style socket, this field is ignored. | ||
607 | */ | ||
608 | sctp_ulpevent_set_owner(event, asoc); | ||
609 | sse->sse_assoc_id = sctp_assoc2id(asoc); | ||
610 | |||
611 | return event; | ||
612 | |||
613 | fail: | ||
614 | return NULL; | ||
615 | } | ||
616 | |||
617 | /* Create and initialize a SCTP_ADAPTION_INDICATION notification. | ||
618 | * | ||
619 | * Socket Extensions for SCTP | ||
620 | * 5.3.1.6 SCTP_ADAPTION_INDICATION | ||
621 | */ | ||
622 | struct sctp_ulpevent *sctp_ulpevent_make_adaption_indication( | ||
623 | const struct sctp_association *asoc, int gfp) | ||
624 | { | ||
625 | struct sctp_ulpevent *event; | ||
626 | struct sctp_adaption_event *sai; | ||
627 | struct sk_buff *skb; | ||
628 | |||
629 | event = sctp_ulpevent_new(sizeof(struct sctp_adaption_event), | ||
630 | MSG_NOTIFICATION, gfp); | ||
631 | if (!event) | ||
632 | goto fail; | ||
633 | |||
634 | skb = sctp_event2skb(event); | ||
635 | sai = (struct sctp_adaption_event *) | ||
636 | skb_put(skb, sizeof(struct sctp_adaption_event)); | ||
637 | |||
638 | sai->sai_type = SCTP_ADAPTION_INDICATION; | ||
639 | sai->sai_flags = 0; | ||
640 | sai->sai_length = sizeof(struct sctp_adaption_event); | ||
641 | sai->sai_adaption_ind = asoc->peer.adaption_ind; | ||
642 | sctp_ulpevent_set_owner(event, asoc); | ||
643 | sai->sai_assoc_id = sctp_assoc2id(asoc); | ||
644 | |||
645 | return event; | ||
646 | |||
647 | fail: | ||
648 | return NULL; | ||
649 | } | ||
650 | |||
651 | /* A message has been received. Package this message as a notification | ||
652 | * to pass it to the upper layers. Go ahead and calculate the sndrcvinfo | ||
653 | * even if filtered out later. | ||
654 | * | ||
655 | * Socket Extensions for SCTP | ||
656 | * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) | ||
657 | */ | ||
658 | struct sctp_ulpevent *sctp_ulpevent_make_rcvmsg(struct sctp_association *asoc, | ||
659 | struct sctp_chunk *chunk, | ||
660 | int gfp) | ||
661 | { | ||
662 | struct sctp_ulpevent *event = NULL; | ||
663 | struct sk_buff *skb; | ||
664 | size_t padding, len; | ||
665 | |||
666 | /* Clone the original skb, sharing the data. */ | ||
667 | skb = skb_clone(chunk->skb, gfp); | ||
668 | if (!skb) | ||
669 | goto fail; | ||
670 | |||
671 | /* First calculate the padding, so we don't inadvertently | ||
672 | * pass up the wrong length to the user. | ||
673 | * | ||
674 | * RFC 2960 - Section 3.2 Chunk Field Descriptions | ||
675 | * | ||
676 | * The total length of a chunk(including Type, Length and Value fields) | ||
677 | * MUST be a multiple of 4 bytes. If the length of the chunk is not a | ||
678 | * multiple of 4 bytes, the sender MUST pad the chunk with all zero | ||
679 | * bytes and this padding is not included in the chunk length field. | ||
680 | * The sender should never pad with more than 3 bytes. The receiver | ||
681 | * MUST ignore the padding bytes. | ||
682 | */ | ||
683 | len = ntohs(chunk->chunk_hdr->length); | ||
684 | padding = WORD_ROUND(len) - len; | ||
685 | |||
686 | /* Fixup cloned skb with just this chunks data. */ | ||
687 | skb_trim(skb, chunk->chunk_end - padding - skb->data); | ||
688 | |||
689 | /* Embed the event fields inside the cloned skb. */ | ||
690 | event = sctp_skb2event(skb); | ||
691 | |||
692 | /* Initialize event with flags 0. */ | ||
693 | sctp_ulpevent_init(event, 0); | ||
694 | |||
695 | sctp_ulpevent_receive_data(event, asoc); | ||
696 | |||
697 | event->stream = ntohs(chunk->subh.data_hdr->stream); | ||
698 | event->ssn = ntohs(chunk->subh.data_hdr->ssn); | ||
699 | event->ppid = chunk->subh.data_hdr->ppid; | ||
700 | if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) { | ||
701 | event->flags |= MSG_UNORDERED; | ||
702 | event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | ||
703 | } | ||
704 | event->tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
705 | event->msg_flags |= chunk->chunk_hdr->flags; | ||
706 | event->iif = sctp_chunk_iif(chunk); | ||
707 | |||
708 | fail: | ||
709 | return event; | ||
710 | } | ||
711 | |||
712 | /* Create a partial delivery related event. | ||
713 | * | ||
714 | * 5.3.1.7 SCTP_PARTIAL_DELIVERY_EVENT | ||
715 | * | ||
716 | * When a receiver is engaged in a partial delivery of a | ||
717 | * message this notification will be used to indicate | ||
718 | * various events. | ||
719 | */ | ||
720 | struct sctp_ulpevent *sctp_ulpevent_make_pdapi( | ||
721 | const struct sctp_association *asoc, __u32 indication, int gfp) | ||
722 | { | ||
723 | struct sctp_ulpevent *event; | ||
724 | struct sctp_pdapi_event *pd; | ||
725 | struct sk_buff *skb; | ||
726 | |||
727 | event = sctp_ulpevent_new(sizeof(struct sctp_pdapi_event), | ||
728 | MSG_NOTIFICATION, gfp); | ||
729 | if (!event) | ||
730 | goto fail; | ||
731 | |||
732 | skb = sctp_event2skb(event); | ||
733 | pd = (struct sctp_pdapi_event *) | ||
734 | skb_put(skb, sizeof(struct sctp_pdapi_event)); | ||
735 | |||
736 | /* pdapi_type | ||
737 | * It should be SCTP_PARTIAL_DELIVERY_EVENT | ||
738 | * | ||
739 | * pdapi_flags: 16 bits (unsigned integer) | ||
740 | * Currently unused. | ||
741 | */ | ||
742 | pd->pdapi_type = SCTP_PARTIAL_DELIVERY_EVENT; | ||
743 | pd->pdapi_flags = 0; | ||
744 | |||
745 | /* pdapi_length: 32 bits (unsigned integer) | ||
746 | * | ||
747 | * This field is the total length of the notification data, including | ||
748 | * the notification header. It will generally be sizeof (struct | ||
749 | * sctp_pdapi_event). | ||
750 | */ | ||
751 | pd->pdapi_length = sizeof(struct sctp_pdapi_event); | ||
752 | |||
753 | /* pdapi_indication: 32 bits (unsigned integer) | ||
754 | * | ||
755 | * This field holds the indication being sent to the application. | ||
756 | */ | ||
757 | pd->pdapi_indication = indication; | ||
758 | |||
759 | /* pdapi_assoc_id: sizeof (sctp_assoc_t) | ||
760 | * | ||
761 | * The association id field, holds the identifier for the association. | ||
762 | */ | ||
763 | sctp_ulpevent_set_owner(event, asoc); | ||
764 | pd->pdapi_assoc_id = sctp_assoc2id(asoc); | ||
765 | |||
766 | return event; | ||
767 | fail: | ||
768 | return NULL; | ||
769 | } | ||
770 | |||
771 | /* Return the notification type, assuming this is a notification | ||
772 | * event. | ||
773 | */ | ||
774 | __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event) | ||
775 | { | ||
776 | union sctp_notification *notification; | ||
777 | struct sk_buff *skb; | ||
778 | |||
779 | skb = sctp_event2skb((struct sctp_ulpevent *)event); | ||
780 | notification = (union sctp_notification *) skb->data; | ||
781 | return notification->sn_header.sn_type; | ||
782 | } | ||
783 | |||
784 | /* Copy out the sndrcvinfo into a msghdr. */ | ||
785 | void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, | ||
786 | struct msghdr *msghdr) | ||
787 | { | ||
788 | struct sctp_sndrcvinfo sinfo; | ||
789 | |||
790 | if (sctp_ulpevent_is_notification(event)) | ||
791 | return; | ||
792 | |||
793 | /* Sockets API Extensions for SCTP | ||
794 | * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV) | ||
795 | * | ||
796 | * sinfo_stream: 16 bits (unsigned integer) | ||
797 | * | ||
798 | * For recvmsg() the SCTP stack places the message's stream number in | ||
799 | * this value. | ||
800 | */ | ||
801 | sinfo.sinfo_stream = event->stream; | ||
802 | /* sinfo_ssn: 16 bits (unsigned integer) | ||
803 | * | ||
804 | * For recvmsg() this value contains the stream sequence number that | ||
805 | * the remote endpoint placed in the DATA chunk. For fragmented | ||
806 | * messages this is the same number for all deliveries of the message | ||
807 | * (if more than one recvmsg() is needed to read the message). | ||
808 | */ | ||
809 | sinfo.sinfo_ssn = event->ssn; | ||
810 | /* sinfo_ppid: 32 bits (unsigned integer) | ||
811 | * | ||
812 | * In recvmsg() this value is | ||
813 | * the same information that was passed by the upper layer in the peer | ||
814 | * application. Please note that byte order issues are NOT accounted | ||
815 | * for and this information is passed opaquely by the SCTP stack from | ||
816 | * one end to the other. | ||
817 | */ | ||
818 | sinfo.sinfo_ppid = event->ppid; | ||
819 | /* sinfo_flags: 16 bits (unsigned integer) | ||
820 | * | ||
821 | * This field may contain any of the following flags and is composed of | ||
822 | * a bitwise OR of these values. | ||
823 | * | ||
824 | * recvmsg() flags: | ||
825 | * | ||
826 | * MSG_UNORDERED - This flag is present when the message was sent | ||
827 | * non-ordered. | ||
828 | */ | ||
829 | sinfo.sinfo_flags = event->flags; | ||
830 | /* sinfo_tsn: 32 bit (unsigned integer) | ||
831 | * | ||
832 | * For the receiving side, this field holds a TSN that was | ||
833 | * assigned to one of the SCTP Data Chunks. | ||
834 | */ | ||
835 | sinfo.sinfo_tsn = event->tsn; | ||
836 | /* sinfo_cumtsn: 32 bit (unsigned integer) | ||
837 | * | ||
838 | * This field will hold the current cumulative TSN as | ||
839 | * known by the underlying SCTP layer. Note this field is | ||
840 | * ignored when sending and only valid for a receive | ||
841 | * operation when sinfo_flags are set to MSG_UNORDERED. | ||
842 | */ | ||
843 | sinfo.sinfo_cumtsn = event->cumtsn; | ||
844 | /* sinfo_assoc_id: sizeof (sctp_assoc_t) | ||
845 | * | ||
846 | * The association handle field, sinfo_assoc_id, holds the identifier | ||
847 | * for the association announced in the COMMUNICATION_UP notification. | ||
848 | * All notifications for a given association have the same identifier. | ||
849 | * Ignored for one-to-one style sockets. | ||
850 | */ | ||
851 | sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); | ||
852 | |||
853 | /* These fields are not used while receiving. */ | ||
854 | sinfo.sinfo_context = 0; | ||
855 | sinfo.sinfo_timetolive = 0; | ||
856 | |||
857 | put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, | ||
858 | sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); | ||
859 | } | ||
860 | |||
861 | /* Do accounting for bytes received and hold a reference to the association | ||
862 | * for each skb. | ||
863 | */ | ||
864 | static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event, | ||
865 | struct sctp_association *asoc) | ||
866 | { | ||
867 | struct sk_buff *skb, *frag; | ||
868 | |||
869 | skb = sctp_event2skb(event); | ||
870 | /* Set the owner and charge rwnd for bytes received. */ | ||
871 | sctp_ulpevent_set_owner(event, asoc); | ||
872 | sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb)); | ||
873 | |||
874 | if (!skb->data_len) | ||
875 | return; | ||
876 | |||
877 | /* Note: Not clearing the entire event struct as this is just a | ||
878 | * fragment of the real event. However, we still need to do rwnd | ||
879 | * accounting. | ||
880 | * In general, the skb passed from IP can have only 1 level of | ||
881 | * fragments. But we allow multiple levels of fragments. | ||
882 | */ | ||
883 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | ||
884 | sctp_ulpevent_receive_data(sctp_skb2event(frag), asoc); | ||
885 | } | ||
886 | } | ||
887 | |||
888 | /* Do accounting for bytes just read by user and release the references to | ||
889 | * the association. | ||
890 | */ | ||
891 | static void sctp_ulpevent_release_data(struct sctp_ulpevent *event) | ||
892 | { | ||
893 | struct sk_buff *skb, *frag; | ||
894 | |||
895 | /* Current stack structures assume that the rcv buffer is | ||
896 | * per socket. For UDP style sockets this is not true as | ||
897 | * multiple associations may be on a single UDP-style socket. | ||
898 | * Use the local private area of the skb to track the owning | ||
899 | * association. | ||
900 | */ | ||
901 | |||
902 | skb = sctp_event2skb(event); | ||
903 | sctp_assoc_rwnd_increase(event->asoc, skb_headlen(skb)); | ||
904 | |||
905 | if (!skb->data_len) | ||
906 | goto done; | ||
907 | |||
908 | /* Don't forget the fragments. */ | ||
909 | for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) { | ||
910 | /* NOTE: skb_shinfos are recursive. Although IP returns | ||
911 | * skb's with only 1 level of fragments, SCTP reassembly can | ||
912 | * increase the levels. | ||
913 | */ | ||
914 | sctp_ulpevent_release_data(sctp_skb2event(frag)); | ||
915 | } | ||
916 | |||
917 | done: | ||
918 | sctp_ulpevent_release_owner(event); | ||
919 | } | ||
920 | |||
921 | /* Free a ulpevent that has an owner. It includes releasing the reference | ||
922 | * to the owner, updating the rwnd in case of a DATA event and freeing the | ||
923 | * skb. | ||
924 | * See comments in sctp_stub_rfree(). | ||
925 | */ | ||
926 | void sctp_ulpevent_free(struct sctp_ulpevent *event) | ||
927 | { | ||
928 | if (sctp_ulpevent_is_notification(event)) | ||
929 | sctp_ulpevent_release_owner(event); | ||
930 | else | ||
931 | sctp_ulpevent_release_data(event); | ||
932 | |||
933 | kfree_skb(sctp_event2skb(event)); | ||
934 | } | ||
935 | |||
936 | /* Purge the skb lists holding ulpevents. */ | ||
937 | void sctp_queue_purge_ulpevents(struct sk_buff_head *list) | ||
938 | { | ||
939 | struct sk_buff *skb; | ||
940 | while ((skb = skb_dequeue(list)) != NULL) | ||
941 | sctp_ulpevent_free(sctp_skb2event(skb)); | ||
942 | } | ||
diff --git a/net/sctp/ulpqueue.c b/net/sctp/ulpqueue.c new file mode 100644 index 000000000000..d5dd2cf7ac4a --- /dev/null +++ b/net/sctp/ulpqueue.c | |||
@@ -0,0 +1,864 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999-2000 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * Copyright (c) 2001 Intel Corp. | ||
6 | * Copyright (c) 2001 Nokia, Inc. | ||
7 | * Copyright (c) 2001 La Monte H.P. Yarroll | ||
8 | * | ||
9 | * This abstraction carries sctp events to the ULP (sockets). | ||
10 | * | ||
11 | * The SCTP reference implementation is free software; | ||
12 | * you can redistribute it and/or modify it under the terms of | ||
13 | * the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * The SCTP reference implementation is distributed in the hope that it | ||
18 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
19 | * ************************ | ||
20 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
21 | * See the GNU General Public License for more details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with GNU CC; see the file COPYING. If not, write to | ||
25 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
26 | * Boston, MA 02111-1307, USA. | ||
27 | * | ||
28 | * Please send any bug reports or fixes you make to the | ||
29 | * email address(es): | ||
30 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
31 | * | ||
32 | * Or submit a bug report through the following website: | ||
33 | * http://www.sf.net/projects/lksctp | ||
34 | * | ||
35 | * Written or modified by: | ||
36 | * Jon Grimm <jgrimm@us.ibm.com> | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Sridhar Samudrala <sri@us.ibm.com> | ||
39 | * | ||
40 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
41 | * be incorporated into the next SCTP release. | ||
42 | */ | ||
43 | |||
44 | #include <linux/types.h> | ||
45 | #include <linux/skbuff.h> | ||
46 | #include <net/sock.h> | ||
47 | #include <net/sctp/structs.h> | ||
48 | #include <net/sctp/sctp.h> | ||
49 | #include <net/sctp/sm.h> | ||
50 | |||
51 | /* Forward declarations for internal helpers. */ | ||
52 | static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq, | ||
53 | struct sctp_ulpevent *); | ||
54 | static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *, | ||
55 | struct sctp_ulpevent *); | ||
56 | |||
57 | /* 1st Level Abstractions */ | ||
58 | |||
59 | /* Initialize a ULP queue from a block of memory. */ | ||
60 | struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq, | ||
61 | struct sctp_association *asoc) | ||
62 | { | ||
63 | memset(ulpq, 0, sizeof(struct sctp_ulpq)); | ||
64 | |||
65 | ulpq->asoc = asoc; | ||
66 | skb_queue_head_init(&ulpq->reasm); | ||
67 | skb_queue_head_init(&ulpq->lobby); | ||
68 | ulpq->pd_mode = 0; | ||
69 | ulpq->malloced = 0; | ||
70 | |||
71 | return ulpq; | ||
72 | } | ||
73 | |||
74 | |||
75 | /* Flush the reassembly and ordering queues. */ | ||
76 | static void sctp_ulpq_flush(struct sctp_ulpq *ulpq) | ||
77 | { | ||
78 | struct sk_buff *skb; | ||
79 | struct sctp_ulpevent *event; | ||
80 | |||
81 | while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) { | ||
82 | event = sctp_skb2event(skb); | ||
83 | sctp_ulpevent_free(event); | ||
84 | } | ||
85 | |||
86 | while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) { | ||
87 | event = sctp_skb2event(skb); | ||
88 | sctp_ulpevent_free(event); | ||
89 | } | ||
90 | |||
91 | } | ||
92 | |||
93 | /* Dispose of a ulpqueue. */ | ||
94 | void sctp_ulpq_free(struct sctp_ulpq *ulpq) | ||
95 | { | ||
96 | sctp_ulpq_flush(ulpq); | ||
97 | if (ulpq->malloced) | ||
98 | kfree(ulpq); | ||
99 | } | ||
100 | |||
101 | /* Process an incoming DATA chunk. */ | ||
102 | int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | ||
103 | int gfp) | ||
104 | { | ||
105 | struct sk_buff_head temp; | ||
106 | sctp_data_chunk_t *hdr; | ||
107 | struct sctp_ulpevent *event; | ||
108 | |||
109 | hdr = (sctp_data_chunk_t *) chunk->chunk_hdr; | ||
110 | |||
111 | /* Create an event from the incoming chunk. */ | ||
112 | event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp); | ||
113 | if (!event) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | /* Do reassembly if needed. */ | ||
117 | event = sctp_ulpq_reasm(ulpq, event); | ||
118 | |||
119 | /* Do ordering if needed. */ | ||
120 | if ((event) && (event->msg_flags & MSG_EOR)){ | ||
121 | /* Create a temporary list to collect chunks on. */ | ||
122 | skb_queue_head_init(&temp); | ||
123 | __skb_queue_tail(&temp, sctp_event2skb(event)); | ||
124 | |||
125 | event = sctp_ulpq_order(ulpq, event); | ||
126 | } | ||
127 | |||
128 | /* Send event to the ULP. */ | ||
129 | if (event) | ||
130 | sctp_ulpq_tail_event(ulpq, event); | ||
131 | |||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* Add a new event for propagation to the ULP. */ | ||
136 | /* Clear the partial delivery mode for this socket. Note: This | ||
137 | * assumes that no association is currently in partial delivery mode. | ||
138 | */ | ||
139 | int sctp_clear_pd(struct sock *sk) | ||
140 | { | ||
141 | struct sctp_sock *sp = sctp_sk(sk); | ||
142 | |||
143 | sp->pd_mode = 0; | ||
144 | if (!skb_queue_empty(&sp->pd_lobby)) { | ||
145 | struct list_head *list; | ||
146 | sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue); | ||
147 | list = (struct list_head *)&sctp_sk(sk)->pd_lobby; | ||
148 | INIT_LIST_HEAD(list); | ||
149 | return 1; | ||
150 | } | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* Clear the pd_mode and restart any pending messages waiting for delivery. */ | ||
155 | static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq) | ||
156 | { | ||
157 | ulpq->pd_mode = 0; | ||
158 | return sctp_clear_pd(ulpq->asoc->base.sk); | ||
159 | } | ||
160 | |||
161 | |||
162 | |||
163 | int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event) | ||
164 | { | ||
165 | struct sock *sk = ulpq->asoc->base.sk; | ||
166 | struct sk_buff_head *queue; | ||
167 | int clear_pd = 0; | ||
168 | |||
169 | /* If the socket is just going to throw this away, do not | ||
170 | * even try to deliver it. | ||
171 | */ | ||
172 | if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) | ||
173 | goto out_free; | ||
174 | |||
175 | /* Check if the user wishes to receive this event. */ | ||
176 | if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe)) | ||
177 | goto out_free; | ||
178 | |||
179 | /* If we are in partial delivery mode, post to the lobby until | ||
180 | * partial delivery is cleared, unless, of course _this_ is | ||
181 | * the association the cause of the partial delivery. | ||
182 | */ | ||
183 | |||
184 | if (!sctp_sk(sk)->pd_mode) { | ||
185 | queue = &sk->sk_receive_queue; | ||
186 | } else if (ulpq->pd_mode) { | ||
187 | if (event->msg_flags & MSG_NOTIFICATION) | ||
188 | queue = &sctp_sk(sk)->pd_lobby; | ||
189 | else { | ||
190 | clear_pd = event->msg_flags & MSG_EOR; | ||
191 | queue = &sk->sk_receive_queue; | ||
192 | } | ||
193 | } else | ||
194 | queue = &sctp_sk(sk)->pd_lobby; | ||
195 | |||
196 | |||
197 | /* If we are harvesting multiple skbs they will be | ||
198 | * collected on a list. | ||
199 | */ | ||
200 | if (sctp_event2skb(event)->list) | ||
201 | sctp_skb_list_tail(sctp_event2skb(event)->list, queue); | ||
202 | else | ||
203 | __skb_queue_tail(queue, sctp_event2skb(event)); | ||
204 | |||
205 | /* Did we just complete partial delivery and need to get | ||
206 | * rolling again? Move pending data to the receive | ||
207 | * queue. | ||
208 | */ | ||
209 | if (clear_pd) | ||
210 | sctp_ulpq_clear_pd(ulpq); | ||
211 | |||
212 | if (queue == &sk->sk_receive_queue) | ||
213 | sk->sk_data_ready(sk, 0); | ||
214 | return 1; | ||
215 | |||
216 | out_free: | ||
217 | if (sctp_event2skb(event)->list) | ||
218 | sctp_queue_purge_ulpevents(sctp_event2skb(event)->list); | ||
219 | else | ||
220 | sctp_ulpevent_free(event); | ||
221 | return 0; | ||
222 | } | ||
223 | |||
224 | /* 2nd Level Abstractions */ | ||
225 | |||
226 | /* Helper function to store chunks that need to be reassembled. */ | ||
227 | static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq, | ||
228 | struct sctp_ulpevent *event) | ||
229 | { | ||
230 | struct sk_buff *pos; | ||
231 | struct sctp_ulpevent *cevent; | ||
232 | __u32 tsn, ctsn; | ||
233 | |||
234 | tsn = event->tsn; | ||
235 | |||
236 | /* See if it belongs at the end. */ | ||
237 | pos = skb_peek_tail(&ulpq->reasm); | ||
238 | if (!pos) { | ||
239 | __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | ||
240 | return; | ||
241 | } | ||
242 | |||
243 | /* Short circuit just dropping it at the end. */ | ||
244 | cevent = sctp_skb2event(pos); | ||
245 | ctsn = cevent->tsn; | ||
246 | if (TSN_lt(ctsn, tsn)) { | ||
247 | __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event)); | ||
248 | return; | ||
249 | } | ||
250 | |||
251 | /* Find the right place in this list. We store them by TSN. */ | ||
252 | skb_queue_walk(&ulpq->reasm, pos) { | ||
253 | cevent = sctp_skb2event(pos); | ||
254 | ctsn = cevent->tsn; | ||
255 | |||
256 | if (TSN_lt(tsn, ctsn)) | ||
257 | break; | ||
258 | } | ||
259 | |||
260 | /* Insert before pos. */ | ||
261 | __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm); | ||
262 | |||
263 | } | ||
264 | |||
265 | /* Helper function to return an event corresponding to the reassembled | ||
266 | * datagram. | ||
267 | * This routine creates a re-assembled skb given the first and last skb's | ||
268 | * as stored in the reassembly queue. The skb's may be non-linear if the sctp | ||
269 | * payload was fragmented on the way and ip had to reassemble them. | ||
270 | * We add the rest of skb's to the first skb's fraglist. | ||
271 | */ | ||
272 | static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag) | ||
273 | { | ||
274 | struct sk_buff *pos; | ||
275 | struct sctp_ulpevent *event; | ||
276 | struct sk_buff *pnext, *last; | ||
277 | struct sk_buff *list = skb_shinfo(f_frag)->frag_list; | ||
278 | |||
279 | /* Store the pointer to the 2nd skb */ | ||
280 | if (f_frag == l_frag) | ||
281 | pos = NULL; | ||
282 | else | ||
283 | pos = f_frag->next; | ||
284 | |||
285 | /* Get the last skb in the f_frag's frag_list if present. */ | ||
286 | for (last = list; list; last = list, list = list->next); | ||
287 | |||
288 | /* Add the list of remaining fragments to the first fragments | ||
289 | * frag_list. | ||
290 | */ | ||
291 | if (last) | ||
292 | last->next = pos; | ||
293 | else | ||
294 | skb_shinfo(f_frag)->frag_list = pos; | ||
295 | |||
296 | /* Remove the first fragment from the reassembly queue. */ | ||
297 | __skb_unlink(f_frag, f_frag->list); | ||
298 | while (pos) { | ||
299 | |||
300 | pnext = pos->next; | ||
301 | |||
302 | /* Update the len and data_len fields of the first fragment. */ | ||
303 | f_frag->len += pos->len; | ||
304 | f_frag->data_len += pos->len; | ||
305 | |||
306 | /* Remove the fragment from the reassembly queue. */ | ||
307 | __skb_unlink(pos, pos->list); | ||
308 | |||
309 | /* Break if we have reached the last fragment. */ | ||
310 | if (pos == l_frag) | ||
311 | break; | ||
312 | pos->next = pnext; | ||
313 | pos = pnext; | ||
314 | }; | ||
315 | |||
316 | event = sctp_skb2event(f_frag); | ||
317 | SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS); | ||
318 | |||
319 | return event; | ||
320 | } | ||
321 | |||
322 | |||
323 | /* Helper function to check if an incoming chunk has filled up the last | ||
324 | * missing fragment in a SCTP datagram and return the corresponding event. | ||
325 | */ | ||
326 | static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq) | ||
327 | { | ||
328 | struct sk_buff *pos; | ||
329 | struct sctp_ulpevent *cevent; | ||
330 | struct sk_buff *first_frag = NULL; | ||
331 | __u32 ctsn, next_tsn; | ||
332 | struct sctp_ulpevent *retval = NULL; | ||
333 | |||
334 | /* Initialized to 0 just to avoid compiler warning message. Will | ||
335 | * never be used with this value. It is referenced only after it | ||
336 | * is set when we find the first fragment of a message. | ||
337 | */ | ||
338 | next_tsn = 0; | ||
339 | |||
340 | /* The chunks are held in the reasm queue sorted by TSN. | ||
341 | * Walk through the queue sequentially and look for a sequence of | ||
342 | * fragmented chunks that complete a datagram. | ||
343 | * 'first_frag' and next_tsn are reset when we find a chunk which | ||
344 | * is the first fragment of a datagram. Once these 2 fields are set | ||
345 | * we expect to find the remaining middle fragments and the last | ||
346 | * fragment in order. If not, first_frag is reset to NULL and we | ||
347 | * start the next pass when we find another first fragment. | ||
348 | */ | ||
349 | skb_queue_walk(&ulpq->reasm, pos) { | ||
350 | cevent = sctp_skb2event(pos); | ||
351 | ctsn = cevent->tsn; | ||
352 | |||
353 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | ||
354 | case SCTP_DATA_FIRST_FRAG: | ||
355 | first_frag = pos; | ||
356 | next_tsn = ctsn + 1; | ||
357 | break; | ||
358 | |||
359 | case SCTP_DATA_MIDDLE_FRAG: | ||
360 | if ((first_frag) && (ctsn == next_tsn)) | ||
361 | next_tsn++; | ||
362 | else | ||
363 | first_frag = NULL; | ||
364 | break; | ||
365 | |||
366 | case SCTP_DATA_LAST_FRAG: | ||
367 | if (first_frag && (ctsn == next_tsn)) | ||
368 | goto found; | ||
369 | else | ||
370 | first_frag = NULL; | ||
371 | break; | ||
372 | }; | ||
373 | |||
374 | } | ||
375 | done: | ||
376 | return retval; | ||
377 | found: | ||
378 | retval = sctp_make_reassembled_event(first_frag, pos); | ||
379 | if (retval) | ||
380 | retval->msg_flags |= MSG_EOR; | ||
381 | goto done; | ||
382 | } | ||
383 | |||
384 | /* Retrieve the next set of fragments of a partial message. */ | ||
385 | static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq) | ||
386 | { | ||
387 | struct sk_buff *pos, *last_frag, *first_frag; | ||
388 | struct sctp_ulpevent *cevent; | ||
389 | __u32 ctsn, next_tsn; | ||
390 | int is_last; | ||
391 | struct sctp_ulpevent *retval; | ||
392 | |||
393 | /* The chunks are held in the reasm queue sorted by TSN. | ||
394 | * Walk through the queue sequentially and look for the first | ||
395 | * sequence of fragmented chunks. | ||
396 | */ | ||
397 | |||
398 | if (skb_queue_empty(&ulpq->reasm)) | ||
399 | return NULL; | ||
400 | |||
401 | last_frag = first_frag = NULL; | ||
402 | retval = NULL; | ||
403 | next_tsn = 0; | ||
404 | is_last = 0; | ||
405 | |||
406 | skb_queue_walk(&ulpq->reasm, pos) { | ||
407 | cevent = sctp_skb2event(pos); | ||
408 | ctsn = cevent->tsn; | ||
409 | |||
410 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | ||
411 | case SCTP_DATA_MIDDLE_FRAG: | ||
412 | if (!first_frag) { | ||
413 | first_frag = pos; | ||
414 | next_tsn = ctsn + 1; | ||
415 | last_frag = pos; | ||
416 | } else if (next_tsn == ctsn) | ||
417 | next_tsn++; | ||
418 | else | ||
419 | goto done; | ||
420 | break; | ||
421 | case SCTP_DATA_LAST_FRAG: | ||
422 | if (!first_frag) | ||
423 | first_frag = pos; | ||
424 | else if (ctsn != next_tsn) | ||
425 | goto done; | ||
426 | last_frag = pos; | ||
427 | is_last = 1; | ||
428 | goto done; | ||
429 | default: | ||
430 | return NULL; | ||
431 | }; | ||
432 | } | ||
433 | |||
434 | /* We have the reassembled event. There is no need to look | ||
435 | * further. | ||
436 | */ | ||
437 | done: | ||
438 | retval = sctp_make_reassembled_event(first_frag, last_frag); | ||
439 | if (retval && is_last) | ||
440 | retval->msg_flags |= MSG_EOR; | ||
441 | |||
442 | return retval; | ||
443 | } | ||
444 | |||
445 | |||
446 | /* Helper function to reassemble chunks. Hold chunks on the reasm queue that | ||
447 | * need reassembling. | ||
448 | */ | ||
449 | static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq, | ||
450 | struct sctp_ulpevent *event) | ||
451 | { | ||
452 | struct sctp_ulpevent *retval = NULL; | ||
453 | |||
454 | /* Check if this is part of a fragmented message. */ | ||
455 | if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) { | ||
456 | event->msg_flags |= MSG_EOR; | ||
457 | return event; | ||
458 | } | ||
459 | |||
460 | sctp_ulpq_store_reasm(ulpq, event); | ||
461 | if (!ulpq->pd_mode) | ||
462 | retval = sctp_ulpq_retrieve_reassembled(ulpq); | ||
463 | else { | ||
464 | __u32 ctsn, ctsnap; | ||
465 | |||
466 | /* Do not even bother unless this is the next tsn to | ||
467 | * be delivered. | ||
468 | */ | ||
469 | ctsn = event->tsn; | ||
470 | ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map); | ||
471 | if (TSN_lte(ctsn, ctsnap)) | ||
472 | retval = sctp_ulpq_retrieve_partial(ulpq); | ||
473 | } | ||
474 | |||
475 | return retval; | ||
476 | } | ||
477 | |||
478 | /* Retrieve the first part (sequential fragments) for partial delivery. */ | ||
479 | static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq) | ||
480 | { | ||
481 | struct sk_buff *pos, *last_frag, *first_frag; | ||
482 | struct sctp_ulpevent *cevent; | ||
483 | __u32 ctsn, next_tsn; | ||
484 | struct sctp_ulpevent *retval; | ||
485 | |||
486 | /* The chunks are held in the reasm queue sorted by TSN. | ||
487 | * Walk through the queue sequentially and look for a sequence of | ||
488 | * fragmented chunks that start a datagram. | ||
489 | */ | ||
490 | |||
491 | if (skb_queue_empty(&ulpq->reasm)) | ||
492 | return NULL; | ||
493 | |||
494 | last_frag = first_frag = NULL; | ||
495 | retval = NULL; | ||
496 | next_tsn = 0; | ||
497 | |||
498 | skb_queue_walk(&ulpq->reasm, pos) { | ||
499 | cevent = sctp_skb2event(pos); | ||
500 | ctsn = cevent->tsn; | ||
501 | |||
502 | switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) { | ||
503 | case SCTP_DATA_FIRST_FRAG: | ||
504 | if (!first_frag) { | ||
505 | first_frag = pos; | ||
506 | next_tsn = ctsn + 1; | ||
507 | last_frag = pos; | ||
508 | } else | ||
509 | goto done; | ||
510 | break; | ||
511 | |||
512 | case SCTP_DATA_MIDDLE_FRAG: | ||
513 | if (!first_frag) | ||
514 | return NULL; | ||
515 | if (ctsn == next_tsn) { | ||
516 | next_tsn++; | ||
517 | last_frag = pos; | ||
518 | } else | ||
519 | goto done; | ||
520 | break; | ||
521 | default: | ||
522 | return NULL; | ||
523 | }; | ||
524 | } | ||
525 | |||
526 | /* We have the reassembled event. There is no need to look | ||
527 | * further. | ||
528 | */ | ||
529 | done: | ||
530 | retval = sctp_make_reassembled_event(first_frag, last_frag); | ||
531 | return retval; | ||
532 | } | ||
533 | |||
534 | /* Helper function to gather skbs that have possibly become | ||
535 | * ordered by an an incoming chunk. | ||
536 | */ | ||
537 | static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq, | ||
538 | struct sctp_ulpevent *event) | ||
539 | { | ||
540 | struct sk_buff *pos, *tmp; | ||
541 | struct sctp_ulpevent *cevent; | ||
542 | struct sctp_stream *in; | ||
543 | __u16 sid, csid; | ||
544 | __u16 ssn, cssn; | ||
545 | |||
546 | sid = event->stream; | ||
547 | ssn = event->ssn; | ||
548 | in = &ulpq->asoc->ssnmap->in; | ||
549 | |||
550 | /* We are holding the chunks by stream, by SSN. */ | ||
551 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | ||
552 | cevent = (struct sctp_ulpevent *) pos->cb; | ||
553 | csid = cevent->stream; | ||
554 | cssn = cevent->ssn; | ||
555 | |||
556 | /* Have we gone too far? */ | ||
557 | if (csid > sid) | ||
558 | break; | ||
559 | |||
560 | /* Have we not gone far enough? */ | ||
561 | if (csid < sid) | ||
562 | continue; | ||
563 | |||
564 | if (cssn != sctp_ssn_peek(in, sid)) | ||
565 | break; | ||
566 | |||
567 | /* Found it, so mark in the ssnmap. */ | ||
568 | sctp_ssn_next(in, sid); | ||
569 | |||
570 | __skb_unlink(pos, pos->list); | ||
571 | |||
572 | /* Attach all gathered skbs to the event. */ | ||
573 | __skb_queue_tail(sctp_event2skb(event)->list, pos); | ||
574 | } | ||
575 | } | ||
576 | |||
577 | /* Helper function to store chunks needing ordering. */ | ||
578 | static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq, | ||
579 | struct sctp_ulpevent *event) | ||
580 | { | ||
581 | struct sk_buff *pos; | ||
582 | struct sctp_ulpevent *cevent; | ||
583 | __u16 sid, csid; | ||
584 | __u16 ssn, cssn; | ||
585 | |||
586 | pos = skb_peek_tail(&ulpq->lobby); | ||
587 | if (!pos) { | ||
588 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | ||
589 | return; | ||
590 | } | ||
591 | |||
592 | sid = event->stream; | ||
593 | ssn = event->ssn; | ||
594 | |||
595 | cevent = (struct sctp_ulpevent *) pos->cb; | ||
596 | csid = cevent->stream; | ||
597 | cssn = cevent->ssn; | ||
598 | if (sid > csid) { | ||
599 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | if ((sid == csid) && SSN_lt(cssn, ssn)) { | ||
604 | __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event)); | ||
605 | return; | ||
606 | } | ||
607 | |||
608 | /* Find the right place in this list. We store them by | ||
609 | * stream ID and then by SSN. | ||
610 | */ | ||
611 | skb_queue_walk(&ulpq->lobby, pos) { | ||
612 | cevent = (struct sctp_ulpevent *) pos->cb; | ||
613 | csid = cevent->stream; | ||
614 | cssn = cevent->ssn; | ||
615 | |||
616 | if (csid > sid) | ||
617 | break; | ||
618 | if (csid == sid && SSN_lt(ssn, cssn)) | ||
619 | break; | ||
620 | } | ||
621 | |||
622 | |||
623 | /* Insert before pos. */ | ||
624 | __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby); | ||
625 | |||
626 | } | ||
627 | |||
628 | static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq, | ||
629 | struct sctp_ulpevent *event) | ||
630 | { | ||
631 | __u16 sid, ssn; | ||
632 | struct sctp_stream *in; | ||
633 | |||
634 | /* Check if this message needs ordering. */ | ||
635 | if (SCTP_DATA_UNORDERED & event->msg_flags) | ||
636 | return event; | ||
637 | |||
638 | /* Note: The stream ID must be verified before this routine. */ | ||
639 | sid = event->stream; | ||
640 | ssn = event->ssn; | ||
641 | in = &ulpq->asoc->ssnmap->in; | ||
642 | |||
643 | /* Is this the expected SSN for this stream ID? */ | ||
644 | if (ssn != sctp_ssn_peek(in, sid)) { | ||
645 | /* We've received something out of order, so find where it | ||
646 | * needs to be placed. We order by stream and then by SSN. | ||
647 | */ | ||
648 | sctp_ulpq_store_ordered(ulpq, event); | ||
649 | return NULL; | ||
650 | } | ||
651 | |||
652 | /* Mark that the next chunk has been found. */ | ||
653 | sctp_ssn_next(in, sid); | ||
654 | |||
655 | /* Go find any other chunks that were waiting for | ||
656 | * ordering. | ||
657 | */ | ||
658 | sctp_ulpq_retrieve_ordered(ulpq, event); | ||
659 | |||
660 | return event; | ||
661 | } | ||
662 | |||
663 | /* Helper function to gather skbs that have possibly become | ||
664 | * ordered by forward tsn skipping their dependencies. | ||
665 | */ | ||
666 | static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq) | ||
667 | { | ||
668 | struct sk_buff *pos, *tmp; | ||
669 | struct sctp_ulpevent *cevent; | ||
670 | struct sctp_ulpevent *event = NULL; | ||
671 | struct sctp_stream *in; | ||
672 | struct sk_buff_head temp; | ||
673 | __u16 csid, cssn; | ||
674 | |||
675 | in = &ulpq->asoc->ssnmap->in; | ||
676 | |||
677 | /* We are holding the chunks by stream, by SSN. */ | ||
678 | sctp_skb_for_each(pos, &ulpq->lobby, tmp) { | ||
679 | cevent = (struct sctp_ulpevent *) pos->cb; | ||
680 | csid = cevent->stream; | ||
681 | cssn = cevent->ssn; | ||
682 | |||
683 | if (cssn != sctp_ssn_peek(in, csid)) | ||
684 | break; | ||
685 | |||
686 | /* Found it, so mark in the ssnmap. */ | ||
687 | sctp_ssn_next(in, csid); | ||
688 | |||
689 | __skb_unlink(pos, pos->list); | ||
690 | if (!event) { | ||
691 | /* Create a temporary list to collect chunks on. */ | ||
692 | event = sctp_skb2event(pos); | ||
693 | skb_queue_head_init(&temp); | ||
694 | __skb_queue_tail(&temp, sctp_event2skb(event)); | ||
695 | } else { | ||
696 | /* Attach all gathered skbs to the event. */ | ||
697 | __skb_queue_tail(sctp_event2skb(event)->list, pos); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /* Send event to the ULP. */ | ||
702 | if (event) | ||
703 | sctp_ulpq_tail_event(ulpq, event); | ||
704 | } | ||
705 | |||
706 | /* Skip over an SSN. */ | ||
707 | void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn) | ||
708 | { | ||
709 | struct sctp_stream *in; | ||
710 | |||
711 | /* Note: The stream ID must be verified before this routine. */ | ||
712 | in = &ulpq->asoc->ssnmap->in; | ||
713 | |||
714 | /* Is this an old SSN? If so ignore. */ | ||
715 | if (SSN_lt(ssn, sctp_ssn_peek(in, sid))) | ||
716 | return; | ||
717 | |||
718 | /* Mark that we are no longer expecting this SSN or lower. */ | ||
719 | sctp_ssn_skip(in, sid, ssn); | ||
720 | |||
721 | /* Go find any other chunks that were waiting for | ||
722 | * ordering and deliver them if needed. | ||
723 | */ | ||
724 | sctp_ulpq_reap_ordered(ulpq); | ||
725 | return; | ||
726 | } | ||
727 | |||
728 | /* Renege 'needed' bytes from the ordering queue. */ | ||
729 | static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed) | ||
730 | { | ||
731 | __u16 freed = 0; | ||
732 | __u32 tsn; | ||
733 | struct sk_buff *skb; | ||
734 | struct sctp_ulpevent *event; | ||
735 | struct sctp_tsnmap *tsnmap; | ||
736 | |||
737 | tsnmap = &ulpq->asoc->peer.tsn_map; | ||
738 | |||
739 | while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) { | ||
740 | freed += skb_headlen(skb); | ||
741 | event = sctp_skb2event(skb); | ||
742 | tsn = event->tsn; | ||
743 | |||
744 | sctp_ulpevent_free(event); | ||
745 | sctp_tsnmap_renege(tsnmap, tsn); | ||
746 | if (freed >= needed) | ||
747 | return freed; | ||
748 | } | ||
749 | |||
750 | return freed; | ||
751 | } | ||
752 | |||
753 | /* Renege 'needed' bytes from the reassembly queue. */ | ||
754 | static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed) | ||
755 | { | ||
756 | __u16 freed = 0; | ||
757 | __u32 tsn; | ||
758 | struct sk_buff *skb; | ||
759 | struct sctp_ulpevent *event; | ||
760 | struct sctp_tsnmap *tsnmap; | ||
761 | |||
762 | tsnmap = &ulpq->asoc->peer.tsn_map; | ||
763 | |||
764 | /* Walk backwards through the list, reneges the newest tsns. */ | ||
765 | while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) { | ||
766 | freed += skb_headlen(skb); | ||
767 | event = sctp_skb2event(skb); | ||
768 | tsn = event->tsn; | ||
769 | |||
770 | sctp_ulpevent_free(event); | ||
771 | sctp_tsnmap_renege(tsnmap, tsn); | ||
772 | if (freed >= needed) | ||
773 | return freed; | ||
774 | } | ||
775 | |||
776 | return freed; | ||
777 | } | ||
778 | |||
779 | /* Partial deliver the first message as there is pressure on rwnd. */ | ||
780 | void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq, | ||
781 | struct sctp_chunk *chunk, int gfp) | ||
782 | { | ||
783 | struct sctp_ulpevent *event; | ||
784 | struct sctp_association *asoc; | ||
785 | |||
786 | asoc = ulpq->asoc; | ||
787 | |||
788 | /* Are we already in partial delivery mode? */ | ||
789 | if (!sctp_sk(asoc->base.sk)->pd_mode) { | ||
790 | |||
791 | /* Is partial delivery possible? */ | ||
792 | event = sctp_ulpq_retrieve_first(ulpq); | ||
793 | /* Send event to the ULP. */ | ||
794 | if (event) { | ||
795 | sctp_ulpq_tail_event(ulpq, event); | ||
796 | sctp_sk(asoc->base.sk)->pd_mode = 1; | ||
797 | ulpq->pd_mode = 1; | ||
798 | return; | ||
799 | } | ||
800 | } | ||
801 | } | ||
802 | |||
803 | /* Renege some packets to make room for an incoming chunk. */ | ||
804 | void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk, | ||
805 | int gfp) | ||
806 | { | ||
807 | struct sctp_association *asoc; | ||
808 | __u16 needed, freed; | ||
809 | |||
810 | asoc = ulpq->asoc; | ||
811 | |||
812 | if (chunk) { | ||
813 | needed = ntohs(chunk->chunk_hdr->length); | ||
814 | needed -= sizeof(sctp_data_chunk_t); | ||
815 | } else | ||
816 | needed = SCTP_DEFAULT_MAXWINDOW; | ||
817 | |||
818 | freed = 0; | ||
819 | |||
820 | if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) { | ||
821 | freed = sctp_ulpq_renege_order(ulpq, needed); | ||
822 | if (freed < needed) { | ||
823 | freed += sctp_ulpq_renege_frags(ulpq, needed - freed); | ||
824 | } | ||
825 | } | ||
826 | /* If able to free enough room, accept this chunk. */ | ||
827 | if (chunk && (freed >= needed)) { | ||
828 | __u32 tsn; | ||
829 | tsn = ntohl(chunk->subh.data_hdr->tsn); | ||
830 | sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn); | ||
831 | sctp_ulpq_tail_data(ulpq, chunk, gfp); | ||
832 | |||
833 | sctp_ulpq_partial_delivery(ulpq, chunk, gfp); | ||
834 | } | ||
835 | |||
836 | return; | ||
837 | } | ||
838 | |||
839 | |||
840 | |||
841 | /* Notify the application if an association is aborted and in | ||
842 | * partial delivery mode. Send up any pending received messages. | ||
843 | */ | ||
844 | void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp) | ||
845 | { | ||
846 | struct sctp_ulpevent *ev = NULL; | ||
847 | struct sock *sk; | ||
848 | |||
849 | if (!ulpq->pd_mode) | ||
850 | return; | ||
851 | |||
852 | sk = ulpq->asoc->base.sk; | ||
853 | if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT, | ||
854 | &sctp_sk(sk)->subscribe)) | ||
855 | ev = sctp_ulpevent_make_pdapi(ulpq->asoc, | ||
856 | SCTP_PARTIAL_DELIVERY_ABORTED, | ||
857 | gfp); | ||
858 | if (ev) | ||
859 | __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev)); | ||
860 | |||
861 | /* If there is data waiting, send it up the socket now. */ | ||
862 | if (sctp_ulpq_clear_pd(ulpq) || ev) | ||
863 | sk->sk_data_ready(sk, 0); | ||
864 | } | ||