diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /net/sctp/sm_sideeffect.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'net/sctp/sm_sideeffect.c')
-rw-r--r-- | net/sctp/sm_sideeffect.c | 1395 |
1 files changed, 1395 insertions, 0 deletions
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c new file mode 100644 index 000000000000..f65fa441952f --- /dev/null +++ b/net/sctp/sm_sideeffect.c | |||
@@ -0,0 +1,1395 @@ | |||
1 | /* SCTP kernel reference Implementation | ||
2 | * (C) Copyright IBM Corp. 2001, 2004 | ||
3 | * Copyright (c) 1999 Cisco, Inc. | ||
4 | * Copyright (c) 1999-2001 Motorola, Inc. | ||
5 | * | ||
6 | * This file is part of the SCTP kernel reference Implementation | ||
7 | * | ||
8 | * These functions work with the state functions in sctp_sm_statefuns.c | ||
9 | * to implement that state operations. These functions implement the | ||
10 | * steps which require modifying existing data structures. | ||
11 | * | ||
12 | * The SCTP reference implementation is free software; | ||
13 | * you can redistribute it and/or modify it under the terms of | ||
14 | * the GNU General Public License as published by | ||
15 | * the Free Software Foundation; either version 2, or (at your option) | ||
16 | * any later version. | ||
17 | * | ||
18 | * The SCTP reference implementation is distributed in the hope that it | ||
19 | * will be useful, but WITHOUT ANY WARRANTY; without even the implied | ||
20 | * ************************ | ||
21 | * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. | ||
22 | * See the GNU General Public License for more details. | ||
23 | * | ||
24 | * You should have received a copy of the GNU General Public License | ||
25 | * along with GNU CC; see the file COPYING. If not, write to | ||
26 | * the Free Software Foundation, 59 Temple Place - Suite 330, | ||
27 | * Boston, MA 02111-1307, USA. | ||
28 | * | ||
29 | * Please send any bug reports or fixes you make to the | ||
30 | * email address(es): | ||
31 | * lksctp developers <lksctp-developers@lists.sourceforge.net> | ||
32 | * | ||
33 | * Or submit a bug report through the following website: | ||
34 | * http://www.sf.net/projects/lksctp | ||
35 | * | ||
36 | * Written or modified by: | ||
37 | * La Monte H.P. Yarroll <piggy@acm.org> | ||
38 | * Karl Knutson <karl@athena.chicago.il.us> | ||
39 | * Jon Grimm <jgrimm@austin.ibm.com> | ||
40 | * Hui Huang <hui.huang@nokia.com> | ||
41 | * Dajiang Zhang <dajiang.zhang@nokia.com> | ||
42 | * Daisy Chang <daisyc@us.ibm.com> | ||
43 | * Sridhar Samudrala <sri@us.ibm.com> | ||
44 | * Ardelle Fan <ardelle.fan@intel.com> | ||
45 | * | ||
46 | * Any bugs reported given to us we will try to fix... any fixes shared will | ||
47 | * be incorporated into the next SCTP release. | ||
48 | */ | ||
49 | |||
50 | #include <linux/skbuff.h> | ||
51 | #include <linux/types.h> | ||
52 | #include <linux/socket.h> | ||
53 | #include <linux/ip.h> | ||
54 | #include <net/sock.h> | ||
55 | #include <net/sctp/sctp.h> | ||
56 | #include <net/sctp/sm.h> | ||
57 | |||
58 | static int sctp_cmd_interpreter(sctp_event_t event_type, | ||
59 | sctp_subtype_t subtype, | ||
60 | sctp_state_t state, | ||
61 | struct sctp_endpoint *ep, | ||
62 | struct sctp_association *asoc, | ||
63 | void *event_arg, | ||
64 | sctp_disposition_t status, | ||
65 | sctp_cmd_seq_t *commands, | ||
66 | int gfp); | ||
67 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | ||
68 | sctp_state_t state, | ||
69 | struct sctp_endpoint *ep, | ||
70 | struct sctp_association *asoc, | ||
71 | void *event_arg, | ||
72 | sctp_disposition_t status, | ||
73 | sctp_cmd_seq_t *commands, | ||
74 | int gfp); | ||
75 | |||
76 | /******************************************************************** | ||
77 | * Helper functions | ||
78 | ********************************************************************/ | ||
79 | |||
80 | /* A helper function for delayed processing of INET ECN CE bit. */ | ||
81 | static void sctp_do_ecn_ce_work(struct sctp_association *asoc, | ||
82 | __u32 lowest_tsn) | ||
83 | { | ||
84 | /* Save the TSN away for comparison when we receive CWR */ | ||
85 | |||
86 | asoc->last_ecne_tsn = lowest_tsn; | ||
87 | asoc->need_ecne = 1; | ||
88 | } | ||
89 | |||
90 | /* Helper function for delayed processing of SCTP ECNE chunk. */ | ||
91 | /* RFC 2960 Appendix A | ||
92 | * | ||
93 | * RFC 2481 details a specific bit for a sender to send in | ||
94 | * the header of its next outbound TCP segment to indicate to | ||
95 | * its peer that it has reduced its congestion window. This | ||
96 | * is termed the CWR bit. For SCTP the same indication is made | ||
97 | * by including the CWR chunk. This chunk contains one data | ||
98 | * element, i.e. the TSN number that was sent in the ECNE chunk. | ||
99 | * This element represents the lowest TSN number in the datagram | ||
100 | * that was originally marked with the CE bit. | ||
101 | */ | ||
102 | static struct sctp_chunk *sctp_do_ecn_ecne_work(struct sctp_association *asoc, | ||
103 | __u32 lowest_tsn, | ||
104 | struct sctp_chunk *chunk) | ||
105 | { | ||
106 | struct sctp_chunk *repl; | ||
107 | |||
108 | /* Our previously transmitted packet ran into some congestion | ||
109 | * so we should take action by reducing cwnd and ssthresh | ||
110 | * and then ACK our peer that we we've done so by | ||
111 | * sending a CWR. | ||
112 | */ | ||
113 | |||
114 | /* First, try to determine if we want to actually lower | ||
115 | * our cwnd variables. Only lower them if the ECNE looks more | ||
116 | * recent than the last response. | ||
117 | */ | ||
118 | if (TSN_lt(asoc->last_cwr_tsn, lowest_tsn)) { | ||
119 | struct sctp_transport *transport; | ||
120 | |||
121 | /* Find which transport's congestion variables | ||
122 | * need to be adjusted. | ||
123 | */ | ||
124 | transport = sctp_assoc_lookup_tsn(asoc, lowest_tsn); | ||
125 | |||
126 | /* Update the congestion variables. */ | ||
127 | if (transport) | ||
128 | sctp_transport_lower_cwnd(transport, | ||
129 | SCTP_LOWER_CWND_ECNE); | ||
130 | asoc->last_cwr_tsn = lowest_tsn; | ||
131 | } | ||
132 | |||
133 | /* Always try to quiet the other end. In case of lost CWR, | ||
134 | * resend last_cwr_tsn. | ||
135 | */ | ||
136 | repl = sctp_make_cwr(asoc, asoc->last_cwr_tsn, chunk); | ||
137 | |||
138 | /* If we run out of memory, it will look like a lost CWR. We'll | ||
139 | * get back in sync eventually. | ||
140 | */ | ||
141 | return repl; | ||
142 | } | ||
143 | |||
144 | /* Helper function to do delayed processing of ECN CWR chunk. */ | ||
145 | static void sctp_do_ecn_cwr_work(struct sctp_association *asoc, | ||
146 | __u32 lowest_tsn) | ||
147 | { | ||
148 | /* Turn off ECNE getting auto-prepended to every outgoing | ||
149 | * packet | ||
150 | */ | ||
151 | asoc->need_ecne = 0; | ||
152 | } | ||
153 | |||
154 | /* Generate SACK if necessary. We call this at the end of a packet. */ | ||
155 | static int sctp_gen_sack(struct sctp_association *asoc, int force, | ||
156 | sctp_cmd_seq_t *commands) | ||
157 | { | ||
158 | __u32 ctsn, max_tsn_seen; | ||
159 | struct sctp_chunk *sack; | ||
160 | int error = 0; | ||
161 | |||
162 | if (force) | ||
163 | asoc->peer.sack_needed = 1; | ||
164 | |||
165 | ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map); | ||
166 | max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map); | ||
167 | |||
168 | /* From 12.2 Parameters necessary per association (i.e. the TCB): | ||
169 | * | ||
170 | * Ack State : This flag indicates if the next received packet | ||
171 | * : is to be responded to with a SACK. ... | ||
172 | * : When DATA chunks are out of order, SACK's | ||
173 | * : are not delayed (see Section 6). | ||
174 | * | ||
175 | * [This is actually not mentioned in Section 6, but we | ||
176 | * implement it here anyway. --piggy] | ||
177 | */ | ||
178 | if (max_tsn_seen != ctsn) | ||
179 | asoc->peer.sack_needed = 1; | ||
180 | |||
181 | /* From 6.2 Acknowledgement on Reception of DATA Chunks: | ||
182 | * | ||
183 | * Section 4.2 of [RFC2581] SHOULD be followed. Specifically, | ||
184 | * an acknowledgement SHOULD be generated for at least every | ||
185 | * second packet (not every second DATA chunk) received, and | ||
186 | * SHOULD be generated within 200 ms of the arrival of any | ||
187 | * unacknowledged DATA chunk. ... | ||
188 | */ | ||
189 | if (!asoc->peer.sack_needed) { | ||
190 | /* We will need a SACK for the next packet. */ | ||
191 | asoc->peer.sack_needed = 1; | ||
192 | goto out; | ||
193 | } else { | ||
194 | if (asoc->a_rwnd > asoc->rwnd) | ||
195 | asoc->a_rwnd = asoc->rwnd; | ||
196 | sack = sctp_make_sack(asoc); | ||
197 | if (!sack) | ||
198 | goto nomem; | ||
199 | |||
200 | asoc->peer.sack_needed = 0; | ||
201 | |||
202 | error = sctp_outq_tail(&asoc->outqueue, sack); | ||
203 | |||
204 | /* Stop the SACK timer. */ | ||
205 | sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP, | ||
206 | SCTP_TO(SCTP_EVENT_TIMEOUT_SACK)); | ||
207 | } | ||
208 | out: | ||
209 | return error; | ||
210 | nomem: | ||
211 | error = -ENOMEM; | ||
212 | return error; | ||
213 | } | ||
214 | |||
215 | /* When the T3-RTX timer expires, it calls this function to create the | ||
216 | * relevant state machine event. | ||
217 | */ | ||
218 | void sctp_generate_t3_rtx_event(unsigned long peer) | ||
219 | { | ||
220 | int error; | ||
221 | struct sctp_transport *transport = (struct sctp_transport *) peer; | ||
222 | struct sctp_association *asoc = transport->asoc; | ||
223 | |||
224 | /* Check whether a task is in the sock. */ | ||
225 | |||
226 | sctp_bh_lock_sock(asoc->base.sk); | ||
227 | if (sock_owned_by_user(asoc->base.sk)) { | ||
228 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | ||
229 | |||
230 | /* Try again later. */ | ||
231 | if (!mod_timer(&transport->T3_rtx_timer, jiffies + (HZ/20))) | ||
232 | sctp_transport_hold(transport); | ||
233 | goto out_unlock; | ||
234 | } | ||
235 | |||
236 | /* Is this transport really dead and just waiting around for | ||
237 | * the timer to let go of the reference? | ||
238 | */ | ||
239 | if (transport->dead) | ||
240 | goto out_unlock; | ||
241 | |||
242 | /* Run through the state machine. */ | ||
243 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
244 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_T3_RTX), | ||
245 | asoc->state, | ||
246 | asoc->ep, asoc, | ||
247 | transport, GFP_ATOMIC); | ||
248 | |||
249 | if (error) | ||
250 | asoc->base.sk->sk_err = -error; | ||
251 | |||
252 | out_unlock: | ||
253 | sctp_bh_unlock_sock(asoc->base.sk); | ||
254 | sctp_transport_put(transport); | ||
255 | } | ||
256 | |||
257 | /* This is a sa interface for producing timeout events. It works | ||
258 | * for timeouts which use the association as their parameter. | ||
259 | */ | ||
260 | static void sctp_generate_timeout_event(struct sctp_association *asoc, | ||
261 | sctp_event_timeout_t timeout_type) | ||
262 | { | ||
263 | int error = 0; | ||
264 | |||
265 | sctp_bh_lock_sock(asoc->base.sk); | ||
266 | if (sock_owned_by_user(asoc->base.sk)) { | ||
267 | SCTP_DEBUG_PRINTK("%s:Sock is busy: timer %d\n", | ||
268 | __FUNCTION__, | ||
269 | timeout_type); | ||
270 | |||
271 | /* Try again later. */ | ||
272 | if (!mod_timer(&asoc->timers[timeout_type], jiffies + (HZ/20))) | ||
273 | sctp_association_hold(asoc); | ||
274 | goto out_unlock; | ||
275 | } | ||
276 | |||
277 | /* Is this association really dead and just waiting around for | ||
278 | * the timer to let go of the reference? | ||
279 | */ | ||
280 | if (asoc->base.dead) | ||
281 | goto out_unlock; | ||
282 | |||
283 | /* Run through the state machine. */ | ||
284 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
285 | SCTP_ST_TIMEOUT(timeout_type), | ||
286 | asoc->state, asoc->ep, asoc, | ||
287 | (void *)timeout_type, GFP_ATOMIC); | ||
288 | |||
289 | if (error) | ||
290 | asoc->base.sk->sk_err = -error; | ||
291 | |||
292 | out_unlock: | ||
293 | sctp_bh_unlock_sock(asoc->base.sk); | ||
294 | sctp_association_put(asoc); | ||
295 | } | ||
296 | |||
297 | static void sctp_generate_t1_cookie_event(unsigned long data) | ||
298 | { | ||
299 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
300 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_COOKIE); | ||
301 | } | ||
302 | |||
303 | static void sctp_generate_t1_init_event(unsigned long data) | ||
304 | { | ||
305 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
306 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T1_INIT); | ||
307 | } | ||
308 | |||
309 | static void sctp_generate_t2_shutdown_event(unsigned long data) | ||
310 | { | ||
311 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
312 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T2_SHUTDOWN); | ||
313 | } | ||
314 | |||
315 | static void sctp_generate_t4_rto_event(unsigned long data) | ||
316 | { | ||
317 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
318 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_T4_RTO); | ||
319 | } | ||
320 | |||
321 | static void sctp_generate_t5_shutdown_guard_event(unsigned long data) | ||
322 | { | ||
323 | struct sctp_association *asoc = (struct sctp_association *)data; | ||
324 | sctp_generate_timeout_event(asoc, | ||
325 | SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD); | ||
326 | |||
327 | } /* sctp_generate_t5_shutdown_guard_event() */ | ||
328 | |||
329 | static void sctp_generate_autoclose_event(unsigned long data) | ||
330 | { | ||
331 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
332 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_AUTOCLOSE); | ||
333 | } | ||
334 | |||
335 | /* Generate a heart beat event. If the sock is busy, reschedule. Make | ||
336 | * sure that the transport is still valid. | ||
337 | */ | ||
338 | void sctp_generate_heartbeat_event(unsigned long data) | ||
339 | { | ||
340 | int error = 0; | ||
341 | struct sctp_transport *transport = (struct sctp_transport *) data; | ||
342 | struct sctp_association *asoc = transport->asoc; | ||
343 | |||
344 | sctp_bh_lock_sock(asoc->base.sk); | ||
345 | if (sock_owned_by_user(asoc->base.sk)) { | ||
346 | SCTP_DEBUG_PRINTK("%s:Sock is busy.\n", __FUNCTION__); | ||
347 | |||
348 | /* Try again later. */ | ||
349 | if (!mod_timer(&transport->hb_timer, jiffies + (HZ/20))) | ||
350 | sctp_transport_hold(transport); | ||
351 | goto out_unlock; | ||
352 | } | ||
353 | |||
354 | /* Is this structure just waiting around for us to actually | ||
355 | * get destroyed? | ||
356 | */ | ||
357 | if (transport->dead) | ||
358 | goto out_unlock; | ||
359 | |||
360 | error = sctp_do_sm(SCTP_EVENT_T_TIMEOUT, | ||
361 | SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_HEARTBEAT), | ||
362 | asoc->state, asoc->ep, asoc, | ||
363 | transport, GFP_ATOMIC); | ||
364 | |||
365 | if (error) | ||
366 | asoc->base.sk->sk_err = -error; | ||
367 | |||
368 | out_unlock: | ||
369 | sctp_bh_unlock_sock(asoc->base.sk); | ||
370 | sctp_transport_put(transport); | ||
371 | } | ||
372 | |||
373 | /* Inject a SACK Timeout event into the state machine. */ | ||
374 | static void sctp_generate_sack_event(unsigned long data) | ||
375 | { | ||
376 | struct sctp_association *asoc = (struct sctp_association *) data; | ||
377 | sctp_generate_timeout_event(asoc, SCTP_EVENT_TIMEOUT_SACK); | ||
378 | } | ||
379 | |||
380 | sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = { | ||
381 | NULL, | ||
382 | sctp_generate_t1_cookie_event, | ||
383 | sctp_generate_t1_init_event, | ||
384 | sctp_generate_t2_shutdown_event, | ||
385 | NULL, | ||
386 | sctp_generate_t4_rto_event, | ||
387 | sctp_generate_t5_shutdown_guard_event, | ||
388 | sctp_generate_heartbeat_event, | ||
389 | sctp_generate_sack_event, | ||
390 | sctp_generate_autoclose_event, | ||
391 | }; | ||
392 | |||
393 | |||
394 | /* RFC 2960 8.2 Path Failure Detection | ||
395 | * | ||
396 | * When its peer endpoint is multi-homed, an endpoint should keep a | ||
397 | * error counter for each of the destination transport addresses of the | ||
398 | * peer endpoint. | ||
399 | * | ||
400 | * Each time the T3-rtx timer expires on any address, or when a | ||
401 | * HEARTBEAT sent to an idle address is not acknowledged within a RTO, | ||
402 | * the error counter of that destination address will be incremented. | ||
403 | * When the value in the error counter exceeds the protocol parameter | ||
404 | * 'Path.Max.Retrans' of that destination address, the endpoint should | ||
405 | * mark the destination transport address as inactive, and a | ||
406 | * notification SHOULD be sent to the upper layer. | ||
407 | * | ||
408 | */ | ||
409 | static void sctp_do_8_2_transport_strike(struct sctp_association *asoc, | ||
410 | struct sctp_transport *transport) | ||
411 | { | ||
412 | /* The check for association's overall error counter exceeding the | ||
413 | * threshold is done in the state function. | ||
414 | */ | ||
415 | asoc->overall_error_count++; | ||
416 | |||
417 | if (transport->active && | ||
418 | (transport->error_count++ >= transport->max_retrans)) { | ||
419 | SCTP_DEBUG_PRINTK("transport_strike: transport " | ||
420 | "IP:%d.%d.%d.%d failed.\n", | ||
421 | NIPQUAD(transport->ipaddr.v4.sin_addr)); | ||
422 | sctp_assoc_control_transport(asoc, transport, | ||
423 | SCTP_TRANSPORT_DOWN, | ||
424 | SCTP_FAILED_THRESHOLD); | ||
425 | } | ||
426 | |||
427 | /* E2) For the destination address for which the timer | ||
428 | * expires, set RTO <- RTO * 2 ("back off the timer"). The | ||
429 | * maximum value discussed in rule C7 above (RTO.max) may be | ||
430 | * used to provide an upper bound to this doubling operation. | ||
431 | */ | ||
432 | transport->rto = min((transport->rto * 2), transport->asoc->rto_max); | ||
433 | } | ||
434 | |||
435 | /* Worker routine to handle INIT command failure. */ | ||
436 | static void sctp_cmd_init_failed(sctp_cmd_seq_t *commands, | ||
437 | struct sctp_association *asoc, | ||
438 | unsigned error) | ||
439 | { | ||
440 | struct sctp_ulpevent *event; | ||
441 | |||
442 | event = sctp_ulpevent_make_assoc_change(asoc,0, SCTP_CANT_STR_ASSOC, | ||
443 | (__u16)error, 0, 0, | ||
444 | GFP_ATOMIC); | ||
445 | |||
446 | if (event) | ||
447 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
448 | SCTP_ULPEVENT(event)); | ||
449 | |||
450 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
451 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
452 | |||
453 | /* SEND_FAILED sent later when cleaning up the association. */ | ||
454 | asoc->outqueue.error = error; | ||
455 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
456 | } | ||
457 | |||
458 | /* Worker routine to handle SCTP_CMD_ASSOC_FAILED. */ | ||
459 | static void sctp_cmd_assoc_failed(sctp_cmd_seq_t *commands, | ||
460 | struct sctp_association *asoc, | ||
461 | sctp_event_t event_type, | ||
462 | sctp_subtype_t subtype, | ||
463 | struct sctp_chunk *chunk, | ||
464 | unsigned error) | ||
465 | { | ||
466 | struct sctp_ulpevent *event; | ||
467 | |||
468 | /* Cancel any partial delivery in progress. */ | ||
469 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | ||
470 | |||
471 | event = sctp_ulpevent_make_assoc_change(asoc, 0, SCTP_COMM_LOST, | ||
472 | (__u16)error, 0, 0, | ||
473 | GFP_ATOMIC); | ||
474 | if (event) | ||
475 | sctp_add_cmd_sf(commands, SCTP_CMD_EVENT_ULP, | ||
476 | SCTP_ULPEVENT(event)); | ||
477 | |||
478 | sctp_add_cmd_sf(commands, SCTP_CMD_NEW_STATE, | ||
479 | SCTP_STATE(SCTP_STATE_CLOSED)); | ||
480 | |||
481 | /* Set sk_err to ECONNRESET on a 1-1 style socket. */ | ||
482 | if (!sctp_style(asoc->base.sk, UDP)) | ||
483 | asoc->base.sk->sk_err = ECONNRESET; | ||
484 | |||
485 | /* SEND_FAILED sent later when cleaning up the association. */ | ||
486 | asoc->outqueue.error = error; | ||
487 | sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL()); | ||
488 | } | ||
489 | |||
490 | /* Process an init chunk (may be real INIT/INIT-ACK or an embedded INIT | ||
491 | * inside the cookie. In reality, this is only used for INIT-ACK processing | ||
492 | * since all other cases use "temporary" associations and can do all | ||
493 | * their work in statefuns directly. | ||
494 | */ | ||
495 | static int sctp_cmd_process_init(sctp_cmd_seq_t *commands, | ||
496 | struct sctp_association *asoc, | ||
497 | struct sctp_chunk *chunk, | ||
498 | sctp_init_chunk_t *peer_init, int gfp) | ||
499 | { | ||
500 | int error; | ||
501 | |||
502 | /* We only process the init as a sideeffect in a single | ||
503 | * case. This is when we process the INIT-ACK. If we | ||
504 | * fail during INIT processing (due to malloc problems), | ||
505 | * just return the error and stop processing the stack. | ||
506 | */ | ||
507 | if (!sctp_process_init(asoc, chunk->chunk_hdr->type, | ||
508 | sctp_source(chunk), peer_init, gfp)) | ||
509 | error = -ENOMEM; | ||
510 | else | ||
511 | error = 0; | ||
512 | |||
513 | return error; | ||
514 | } | ||
515 | |||
516 | /* Helper function to break out starting up of heartbeat timers. */ | ||
517 | static void sctp_cmd_hb_timers_start(sctp_cmd_seq_t *cmds, | ||
518 | struct sctp_association *asoc) | ||
519 | { | ||
520 | struct sctp_transport *t; | ||
521 | struct list_head *pos; | ||
522 | |||
523 | /* Start a heartbeat timer for each transport on the association. | ||
524 | * hold a reference on the transport to make sure none of | ||
525 | * the needed data structures go away. | ||
526 | */ | ||
527 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
528 | t = list_entry(pos, struct sctp_transport, transports); | ||
529 | |||
530 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
531 | sctp_transport_hold(t); | ||
532 | } | ||
533 | } | ||
534 | |||
535 | static void sctp_cmd_hb_timers_stop(sctp_cmd_seq_t *cmds, | ||
536 | struct sctp_association *asoc) | ||
537 | { | ||
538 | struct sctp_transport *t; | ||
539 | struct list_head *pos; | ||
540 | |||
541 | /* Stop all heartbeat timers. */ | ||
542 | |||
543 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
544 | t = list_entry(pos, struct sctp_transport, transports); | ||
545 | if (del_timer(&t->hb_timer)) | ||
546 | sctp_transport_put(t); | ||
547 | } | ||
548 | } | ||
549 | |||
550 | /* Helper function to stop any pending T3-RTX timers */ | ||
551 | static void sctp_cmd_t3_rtx_timers_stop(sctp_cmd_seq_t *cmds, | ||
552 | struct sctp_association *asoc) | ||
553 | { | ||
554 | struct sctp_transport *t; | ||
555 | struct list_head *pos; | ||
556 | |||
557 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
558 | t = list_entry(pos, struct sctp_transport, transports); | ||
559 | if (timer_pending(&t->T3_rtx_timer) && | ||
560 | del_timer(&t->T3_rtx_timer)) { | ||
561 | sctp_transport_put(t); | ||
562 | } | ||
563 | } | ||
564 | } | ||
565 | |||
566 | |||
567 | /* Helper function to update the heartbeat timer. */ | ||
568 | static void sctp_cmd_hb_timer_update(sctp_cmd_seq_t *cmds, | ||
569 | struct sctp_association *asoc, | ||
570 | struct sctp_transport *t) | ||
571 | { | ||
572 | /* Update the heartbeat timer. */ | ||
573 | if (!mod_timer(&t->hb_timer, sctp_transport_timeout(t))) | ||
574 | sctp_transport_hold(t); | ||
575 | } | ||
576 | |||
577 | /* Helper function to handle the reception of an HEARTBEAT ACK. */ | ||
578 | static void sctp_cmd_transport_on(sctp_cmd_seq_t *cmds, | ||
579 | struct sctp_association *asoc, | ||
580 | struct sctp_transport *t, | ||
581 | struct sctp_chunk *chunk) | ||
582 | { | ||
583 | sctp_sender_hb_info_t *hbinfo; | ||
584 | |||
585 | /* 8.3 Upon the receipt of the HEARTBEAT ACK, the sender of the | ||
586 | * HEARTBEAT should clear the error counter of the destination | ||
587 | * transport address to which the HEARTBEAT was sent. | ||
588 | * The association's overall error count is also cleared. | ||
589 | */ | ||
590 | t->error_count = 0; | ||
591 | t->asoc->overall_error_count = 0; | ||
592 | |||
593 | /* Mark the destination transport address as active if it is not so | ||
594 | * marked. | ||
595 | */ | ||
596 | if (!t->active) | ||
597 | sctp_assoc_control_transport(asoc, t, SCTP_TRANSPORT_UP, | ||
598 | SCTP_HEARTBEAT_SUCCESS); | ||
599 | |||
600 | /* The receiver of the HEARTBEAT ACK should also perform an | ||
601 | * RTT measurement for that destination transport address | ||
602 | * using the time value carried in the HEARTBEAT ACK chunk. | ||
603 | */ | ||
604 | hbinfo = (sctp_sender_hb_info_t *) chunk->skb->data; | ||
605 | sctp_transport_update_rto(t, (jiffies - hbinfo->sent_at)); | ||
606 | } | ||
607 | |||
608 | /* Helper function to do a transport reset at the expiry of the hearbeat | ||
609 | * timer. | ||
610 | */ | ||
611 | static void sctp_cmd_transport_reset(sctp_cmd_seq_t *cmds, | ||
612 | struct sctp_association *asoc, | ||
613 | struct sctp_transport *t) | ||
614 | { | ||
615 | sctp_transport_lower_cwnd(t, SCTP_LOWER_CWND_INACTIVE); | ||
616 | |||
617 | /* Mark one strike against a transport. */ | ||
618 | sctp_do_8_2_transport_strike(asoc, t); | ||
619 | } | ||
620 | |||
621 | /* Helper function to process the process SACK command. */ | ||
622 | static int sctp_cmd_process_sack(sctp_cmd_seq_t *cmds, | ||
623 | struct sctp_association *asoc, | ||
624 | struct sctp_sackhdr *sackh) | ||
625 | { | ||
626 | int err; | ||
627 | |||
628 | if (sctp_outq_sack(&asoc->outqueue, sackh)) { | ||
629 | /* There are no more TSNs awaiting SACK. */ | ||
630 | err = sctp_do_sm(SCTP_EVENT_T_OTHER, | ||
631 | SCTP_ST_OTHER(SCTP_EVENT_NO_PENDING_TSN), | ||
632 | asoc->state, asoc->ep, asoc, NULL, | ||
633 | GFP_ATOMIC); | ||
634 | } else { | ||
635 | /* Windows may have opened, so we need | ||
636 | * to check if we have DATA to transmit | ||
637 | */ | ||
638 | err = sctp_outq_flush(&asoc->outqueue, 0); | ||
639 | } | ||
640 | |||
641 | return err; | ||
642 | } | ||
643 | |||
644 | /* Helper function to set the timeout value for T2-SHUTDOWN timer and to set | ||
645 | * the transport for a shutdown chunk. | ||
646 | */ | ||
647 | static void sctp_cmd_setup_t2(sctp_cmd_seq_t *cmds, | ||
648 | struct sctp_association *asoc, | ||
649 | struct sctp_chunk *chunk) | ||
650 | { | ||
651 | struct sctp_transport *t; | ||
652 | |||
653 | t = sctp_assoc_choose_shutdown_transport(asoc); | ||
654 | asoc->shutdown_last_sent_to = t; | ||
655 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T2_SHUTDOWN] = t->rto; | ||
656 | chunk->transport = t; | ||
657 | } | ||
658 | |||
659 | /* Helper function to change the state of an association. */ | ||
660 | static void sctp_cmd_new_state(sctp_cmd_seq_t *cmds, | ||
661 | struct sctp_association *asoc, | ||
662 | sctp_state_t state) | ||
663 | { | ||
664 | struct sock *sk = asoc->base.sk; | ||
665 | |||
666 | asoc->state = state; | ||
667 | |||
668 | if (sctp_style(sk, TCP)) { | ||
669 | /* Change the sk->sk_state of a TCP-style socket that has | ||
670 | * sucessfully completed a connect() call. | ||
671 | */ | ||
672 | if (sctp_state(asoc, ESTABLISHED) && sctp_sstate(sk, CLOSED)) | ||
673 | sk->sk_state = SCTP_SS_ESTABLISHED; | ||
674 | |||
675 | /* Set the RCV_SHUTDOWN flag when a SHUTDOWN is received. */ | ||
676 | if (sctp_state(asoc, SHUTDOWN_RECEIVED) && | ||
677 | sctp_sstate(sk, ESTABLISHED)) | ||
678 | sk->sk_shutdown |= RCV_SHUTDOWN; | ||
679 | } | ||
680 | |||
681 | if (sctp_state(asoc, ESTABLISHED) || | ||
682 | sctp_state(asoc, CLOSED) || | ||
683 | sctp_state(asoc, SHUTDOWN_RECEIVED)) { | ||
684 | /* Wake up any processes waiting in the asoc's wait queue in | ||
685 | * sctp_wait_for_connect() or sctp_wait_for_sndbuf(). | ||
686 | */ | ||
687 | if (waitqueue_active(&asoc->wait)) | ||
688 | wake_up_interruptible(&asoc->wait); | ||
689 | |||
690 | /* Wake up any processes waiting in the sk's sleep queue of | ||
691 | * a TCP-style or UDP-style peeled-off socket in | ||
692 | * sctp_wait_for_accept() or sctp_wait_for_packet(). | ||
693 | * For a UDP-style socket, the waiters are woken up by the | ||
694 | * notifications. | ||
695 | */ | ||
696 | if (!sctp_style(sk, UDP)) | ||
697 | sk->sk_state_change(sk); | ||
698 | } | ||
699 | } | ||
700 | |||
701 | /* Helper function to delete an association. */ | ||
702 | static void sctp_cmd_delete_tcb(sctp_cmd_seq_t *cmds, | ||
703 | struct sctp_association *asoc) | ||
704 | { | ||
705 | struct sock *sk = asoc->base.sk; | ||
706 | |||
707 | /* If it is a non-temporary association belonging to a TCP-style | ||
708 | * listening socket that is not closed, do not free it so that accept() | ||
709 | * can pick it up later. | ||
710 | */ | ||
711 | if (sctp_style(sk, TCP) && sctp_sstate(sk, LISTENING) && | ||
712 | (!asoc->temp) && (sk->sk_shutdown != SHUTDOWN_MASK)) | ||
713 | return; | ||
714 | |||
715 | sctp_unhash_established(asoc); | ||
716 | sctp_association_free(asoc); | ||
717 | } | ||
718 | |||
719 | /* | ||
720 | * ADDIP Section 4.1 ASCONF Chunk Procedures | ||
721 | * A4) Start a T-4 RTO timer, using the RTO value of the selected | ||
722 | * destination address (we use active path instead of primary path just | ||
723 | * because primary path may be inactive. | ||
724 | */ | ||
725 | static void sctp_cmd_setup_t4(sctp_cmd_seq_t *cmds, | ||
726 | struct sctp_association *asoc, | ||
727 | struct sctp_chunk *chunk) | ||
728 | { | ||
729 | struct sctp_transport *t; | ||
730 | |||
731 | t = asoc->peer.active_path; | ||
732 | asoc->timeouts[SCTP_EVENT_TIMEOUT_T4_RTO] = t->rto; | ||
733 | chunk->transport = t; | ||
734 | } | ||
735 | |||
736 | /* Process an incoming Operation Error Chunk. */ | ||
737 | static void sctp_cmd_process_operr(sctp_cmd_seq_t *cmds, | ||
738 | struct sctp_association *asoc, | ||
739 | struct sctp_chunk *chunk) | ||
740 | { | ||
741 | struct sctp_operr_chunk *operr_chunk; | ||
742 | struct sctp_errhdr *err_hdr; | ||
743 | |||
744 | operr_chunk = (struct sctp_operr_chunk *)chunk->chunk_hdr; | ||
745 | err_hdr = &operr_chunk->err_hdr; | ||
746 | |||
747 | switch (err_hdr->cause) { | ||
748 | case SCTP_ERROR_UNKNOWN_CHUNK: | ||
749 | { | ||
750 | struct sctp_chunkhdr *unk_chunk_hdr; | ||
751 | |||
752 | unk_chunk_hdr = (struct sctp_chunkhdr *)err_hdr->variable; | ||
753 | switch (unk_chunk_hdr->type) { | ||
754 | /* ADDIP 4.1 A9) If the peer responds to an ASCONF with an | ||
755 | * ERROR chunk reporting that it did not recognized the ASCONF | ||
756 | * chunk type, the sender of the ASCONF MUST NOT send any | ||
757 | * further ASCONF chunks and MUST stop its T-4 timer. | ||
758 | */ | ||
759 | case SCTP_CID_ASCONF: | ||
760 | asoc->peer.asconf_capable = 0; | ||
761 | sctp_add_cmd_sf(cmds, SCTP_CMD_TIMER_STOP, | ||
762 | SCTP_TO(SCTP_EVENT_TIMEOUT_T4_RTO)); | ||
763 | break; | ||
764 | default: | ||
765 | break; | ||
766 | } | ||
767 | break; | ||
768 | } | ||
769 | default: | ||
770 | break; | ||
771 | } | ||
772 | } | ||
773 | |||
774 | /* Process variable FWDTSN chunk information. */ | ||
775 | static void sctp_cmd_process_fwdtsn(struct sctp_ulpq *ulpq, | ||
776 | struct sctp_chunk *chunk) | ||
777 | { | ||
778 | struct sctp_fwdtsn_skip *skip; | ||
779 | /* Walk through all the skipped SSNs */ | ||
780 | sctp_walk_fwdtsn(skip, chunk) { | ||
781 | sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn)); | ||
782 | } | ||
783 | |||
784 | return; | ||
785 | } | ||
786 | |||
787 | /* Helper function to remove the association non-primary peer | ||
788 | * transports. | ||
789 | */ | ||
790 | static void sctp_cmd_del_non_primary(struct sctp_association *asoc) | ||
791 | { | ||
792 | struct sctp_transport *t; | ||
793 | struct list_head *pos; | ||
794 | struct list_head *temp; | ||
795 | |||
796 | list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) { | ||
797 | t = list_entry(pos, struct sctp_transport, transports); | ||
798 | if (!sctp_cmp_addr_exact(&t->ipaddr, | ||
799 | &asoc->peer.primary_addr)) { | ||
800 | sctp_assoc_del_peer(asoc, &t->ipaddr); | ||
801 | } | ||
802 | } | ||
803 | |||
804 | return; | ||
805 | } | ||
806 | |||
807 | /* These three macros allow us to pull the debugging code out of the | ||
808 | * main flow of sctp_do_sm() to keep attention focused on the real | ||
809 | * functionality there. | ||
810 | */ | ||
811 | #define DEBUG_PRE \ | ||
812 | SCTP_DEBUG_PRINTK("sctp_do_sm prefn: " \ | ||
813 | "ep %p, %s, %s, asoc %p[%s], %s\n", \ | ||
814 | ep, sctp_evttype_tbl[event_type], \ | ||
815 | (*debug_fn)(subtype), asoc, \ | ||
816 | sctp_state_tbl[state], state_fn->name) | ||
817 | |||
818 | #define DEBUG_POST \ | ||
819 | SCTP_DEBUG_PRINTK("sctp_do_sm postfn: " \ | ||
820 | "asoc %p, status: %s\n", \ | ||
821 | asoc, sctp_status_tbl[status]) | ||
822 | |||
823 | #define DEBUG_POST_SFX \ | ||
824 | SCTP_DEBUG_PRINTK("sctp_do_sm post sfx: error %d, asoc %p[%s]\n", \ | ||
825 | error, asoc, \ | ||
826 | sctp_state_tbl[(asoc && sctp_id2assoc(ep->base.sk, \ | ||
827 | sctp_assoc2id(asoc)))?asoc->state:SCTP_STATE_CLOSED]) | ||
828 | |||
829 | /* | ||
830 | * This is the master state machine processing function. | ||
831 | * | ||
832 | * If you want to understand all of lksctp, this is a | ||
833 | * good place to start. | ||
834 | */ | ||
835 | int sctp_do_sm(sctp_event_t event_type, sctp_subtype_t subtype, | ||
836 | sctp_state_t state, | ||
837 | struct sctp_endpoint *ep, | ||
838 | struct sctp_association *asoc, | ||
839 | void *event_arg, | ||
840 | int gfp) | ||
841 | { | ||
842 | sctp_cmd_seq_t commands; | ||
843 | const sctp_sm_table_entry_t *state_fn; | ||
844 | sctp_disposition_t status; | ||
845 | int error = 0; | ||
846 | typedef const char *(printfn_t)(sctp_subtype_t); | ||
847 | |||
848 | static printfn_t *table[] = { | ||
849 | NULL, sctp_cname, sctp_tname, sctp_oname, sctp_pname, | ||
850 | }; | ||
851 | printfn_t *debug_fn __attribute__ ((unused)) = table[event_type]; | ||
852 | |||
853 | /* Look up the state function, run it, and then process the | ||
854 | * side effects. These three steps are the heart of lksctp. | ||
855 | */ | ||
856 | state_fn = sctp_sm_lookup_event(event_type, state, subtype); | ||
857 | |||
858 | sctp_init_cmd_seq(&commands); | ||
859 | |||
860 | DEBUG_PRE; | ||
861 | status = (*state_fn->fn)(ep, asoc, subtype, event_arg, &commands); | ||
862 | DEBUG_POST; | ||
863 | |||
864 | error = sctp_side_effects(event_type, subtype, state, | ||
865 | ep, asoc, event_arg, status, | ||
866 | &commands, gfp); | ||
867 | DEBUG_POST_SFX; | ||
868 | |||
869 | return error; | ||
870 | } | ||
871 | |||
872 | #undef DEBUG_PRE | ||
873 | #undef DEBUG_POST | ||
874 | |||
875 | /***************************************************************** | ||
876 | * This the master state function side effect processing function. | ||
877 | *****************************************************************/ | ||
878 | static int sctp_side_effects(sctp_event_t event_type, sctp_subtype_t subtype, | ||
879 | sctp_state_t state, | ||
880 | struct sctp_endpoint *ep, | ||
881 | struct sctp_association *asoc, | ||
882 | void *event_arg, | ||
883 | sctp_disposition_t status, | ||
884 | sctp_cmd_seq_t *commands, | ||
885 | int gfp) | ||
886 | { | ||
887 | int error; | ||
888 | |||
889 | /* FIXME - Most of the dispositions left today would be categorized | ||
890 | * as "exceptional" dispositions. For those dispositions, it | ||
891 | * may not be proper to run through any of the commands at all. | ||
892 | * For example, the command interpreter might be run only with | ||
893 | * disposition SCTP_DISPOSITION_CONSUME. | ||
894 | */ | ||
895 | if (0 != (error = sctp_cmd_interpreter(event_type, subtype, state, | ||
896 | ep, asoc, | ||
897 | event_arg, status, | ||
898 | commands, gfp))) | ||
899 | goto bail; | ||
900 | |||
901 | switch (status) { | ||
902 | case SCTP_DISPOSITION_DISCARD: | ||
903 | SCTP_DEBUG_PRINTK("Ignored sctp protocol event - state %d, " | ||
904 | "event_type %d, event_id %d\n", | ||
905 | state, event_type, subtype.chunk); | ||
906 | break; | ||
907 | |||
908 | case SCTP_DISPOSITION_NOMEM: | ||
909 | /* We ran out of memory, so we need to discard this | ||
910 | * packet. | ||
911 | */ | ||
912 | /* BUG--we should now recover some memory, probably by | ||
913 | * reneging... | ||
914 | */ | ||
915 | error = -ENOMEM; | ||
916 | break; | ||
917 | |||
918 | case SCTP_DISPOSITION_DELETE_TCB: | ||
919 | /* This should now be a command. */ | ||
920 | break; | ||
921 | |||
922 | case SCTP_DISPOSITION_CONSUME: | ||
923 | case SCTP_DISPOSITION_ABORT: | ||
924 | /* | ||
925 | * We should no longer have much work to do here as the | ||
926 | * real work has been done as explicit commands above. | ||
927 | */ | ||
928 | break; | ||
929 | |||
930 | case SCTP_DISPOSITION_VIOLATION: | ||
931 | printk(KERN_ERR "sctp protocol violation state %d " | ||
932 | "chunkid %d\n", state, subtype.chunk); | ||
933 | break; | ||
934 | |||
935 | case SCTP_DISPOSITION_NOT_IMPL: | ||
936 | printk(KERN_WARNING "sctp unimplemented feature in state %d, " | ||
937 | "event_type %d, event_id %d\n", | ||
938 | state, event_type, subtype.chunk); | ||
939 | break; | ||
940 | |||
941 | case SCTP_DISPOSITION_BUG: | ||
942 | printk(KERN_ERR "sctp bug in state %d, " | ||
943 | "event_type %d, event_id %d\n", | ||
944 | state, event_type, subtype.chunk); | ||
945 | BUG(); | ||
946 | break; | ||
947 | |||
948 | default: | ||
949 | printk(KERN_ERR "sctp impossible disposition %d " | ||
950 | "in state %d, event_type %d, event_id %d\n", | ||
951 | status, state, event_type, subtype.chunk); | ||
952 | BUG(); | ||
953 | break; | ||
954 | }; | ||
955 | |||
956 | bail: | ||
957 | return error; | ||
958 | } | ||
959 | |||
960 | /******************************************************************** | ||
961 | * 2nd Level Abstractions | ||
962 | ********************************************************************/ | ||
963 | |||
964 | /* This is the side-effect interpreter. */ | ||
965 | static int sctp_cmd_interpreter(sctp_event_t event_type, | ||
966 | sctp_subtype_t subtype, | ||
967 | sctp_state_t state, | ||
968 | struct sctp_endpoint *ep, | ||
969 | struct sctp_association *asoc, | ||
970 | void *event_arg, | ||
971 | sctp_disposition_t status, | ||
972 | sctp_cmd_seq_t *commands, | ||
973 | int gfp) | ||
974 | { | ||
975 | int error = 0; | ||
976 | int force; | ||
977 | sctp_cmd_t *cmd; | ||
978 | struct sctp_chunk *new_obj; | ||
979 | struct sctp_chunk *chunk = NULL; | ||
980 | struct sctp_packet *packet; | ||
981 | struct list_head *pos; | ||
982 | struct timer_list *timer; | ||
983 | unsigned long timeout; | ||
984 | struct sctp_transport *t; | ||
985 | struct sctp_sackhdr sackh; | ||
986 | int local_cork = 0; | ||
987 | |||
988 | if (SCTP_EVENT_T_TIMEOUT != event_type) | ||
989 | chunk = (struct sctp_chunk *) event_arg; | ||
990 | |||
991 | /* Note: This whole file is a huge candidate for rework. | ||
992 | * For example, each command could either have its own handler, so | ||
993 | * the loop would look like: | ||
994 | * while (cmds) | ||
995 | * cmd->handle(x, y, z) | ||
996 | * --jgrimm | ||
997 | */ | ||
998 | while (NULL != (cmd = sctp_next_cmd(commands))) { | ||
999 | switch (cmd->verb) { | ||
1000 | case SCTP_CMD_NOP: | ||
1001 | /* Do nothing. */ | ||
1002 | break; | ||
1003 | |||
1004 | case SCTP_CMD_NEW_ASOC: | ||
1005 | /* Register a new association. */ | ||
1006 | if (local_cork) { | ||
1007 | sctp_outq_uncork(&asoc->outqueue); | ||
1008 | local_cork = 0; | ||
1009 | } | ||
1010 | asoc = cmd->obj.ptr; | ||
1011 | /* Register with the endpoint. */ | ||
1012 | sctp_endpoint_add_asoc(ep, asoc); | ||
1013 | sctp_hash_established(asoc); | ||
1014 | break; | ||
1015 | |||
1016 | case SCTP_CMD_UPDATE_ASSOC: | ||
1017 | sctp_assoc_update(asoc, cmd->obj.ptr); | ||
1018 | break; | ||
1019 | |||
1020 | case SCTP_CMD_PURGE_OUTQUEUE: | ||
1021 | sctp_outq_teardown(&asoc->outqueue); | ||
1022 | break; | ||
1023 | |||
1024 | case SCTP_CMD_DELETE_TCB: | ||
1025 | if (local_cork) { | ||
1026 | sctp_outq_uncork(&asoc->outqueue); | ||
1027 | local_cork = 0; | ||
1028 | } | ||
1029 | /* Delete the current association. */ | ||
1030 | sctp_cmd_delete_tcb(commands, asoc); | ||
1031 | asoc = NULL; | ||
1032 | break; | ||
1033 | |||
1034 | case SCTP_CMD_NEW_STATE: | ||
1035 | /* Enter a new state. */ | ||
1036 | sctp_cmd_new_state(commands, asoc, cmd->obj.state); | ||
1037 | break; | ||
1038 | |||
1039 | case SCTP_CMD_REPORT_TSN: | ||
1040 | /* Record the arrival of a TSN. */ | ||
1041 | sctp_tsnmap_mark(&asoc->peer.tsn_map, cmd->obj.u32); | ||
1042 | break; | ||
1043 | |||
1044 | case SCTP_CMD_REPORT_FWDTSN: | ||
1045 | /* Move the Cumulattive TSN Ack ahead. */ | ||
1046 | sctp_tsnmap_skip(&asoc->peer.tsn_map, cmd->obj.u32); | ||
1047 | |||
1048 | /* Abort any in progress partial delivery. */ | ||
1049 | sctp_ulpq_abort_pd(&asoc->ulpq, GFP_ATOMIC); | ||
1050 | break; | ||
1051 | |||
1052 | case SCTP_CMD_PROCESS_FWDTSN: | ||
1053 | sctp_cmd_process_fwdtsn(&asoc->ulpq, cmd->obj.ptr); | ||
1054 | break; | ||
1055 | |||
1056 | case SCTP_CMD_GEN_SACK: | ||
1057 | /* Generate a Selective ACK. | ||
1058 | * The argument tells us whether to just count | ||
1059 | * the packet and MAYBE generate a SACK, or | ||
1060 | * force a SACK out. | ||
1061 | */ | ||
1062 | force = cmd->obj.i32; | ||
1063 | error = sctp_gen_sack(asoc, force, commands); | ||
1064 | break; | ||
1065 | |||
1066 | case SCTP_CMD_PROCESS_SACK: | ||
1067 | /* Process an inbound SACK. */ | ||
1068 | error = sctp_cmd_process_sack(commands, asoc, | ||
1069 | cmd->obj.ptr); | ||
1070 | break; | ||
1071 | |||
1072 | case SCTP_CMD_GEN_INIT_ACK: | ||
1073 | /* Generate an INIT ACK chunk. */ | ||
1074 | new_obj = sctp_make_init_ack(asoc, chunk, GFP_ATOMIC, | ||
1075 | 0); | ||
1076 | if (!new_obj) | ||
1077 | goto nomem; | ||
1078 | |||
1079 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1080 | SCTP_CHUNK(new_obj)); | ||
1081 | break; | ||
1082 | |||
1083 | case SCTP_CMD_PEER_INIT: | ||
1084 | /* Process a unified INIT from the peer. | ||
1085 | * Note: Only used during INIT-ACK processing. If | ||
1086 | * there is an error just return to the outter | ||
1087 | * layer which will bail. | ||
1088 | */ | ||
1089 | error = sctp_cmd_process_init(commands, asoc, chunk, | ||
1090 | cmd->obj.ptr, gfp); | ||
1091 | break; | ||
1092 | |||
1093 | case SCTP_CMD_GEN_COOKIE_ECHO: | ||
1094 | /* Generate a COOKIE ECHO chunk. */ | ||
1095 | new_obj = sctp_make_cookie_echo(asoc, chunk); | ||
1096 | if (!new_obj) { | ||
1097 | if (cmd->obj.ptr) | ||
1098 | sctp_chunk_free(cmd->obj.ptr); | ||
1099 | goto nomem; | ||
1100 | } | ||
1101 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1102 | SCTP_CHUNK(new_obj)); | ||
1103 | |||
1104 | /* If there is an ERROR chunk to be sent along with | ||
1105 | * the COOKIE_ECHO, send it, too. | ||
1106 | */ | ||
1107 | if (cmd->obj.ptr) | ||
1108 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1109 | SCTP_CHUNK(cmd->obj.ptr)); | ||
1110 | |||
1111 | /* FIXME - Eventually come up with a cleaner way to | ||
1112 | * enabling COOKIE-ECHO + DATA bundling during | ||
1113 | * multihoming stale cookie scenarios, the following | ||
1114 | * command plays with asoc->peer.retran_path to | ||
1115 | * avoid the problem of sending the COOKIE-ECHO and | ||
1116 | * DATA in different paths, which could result | ||
1117 | * in the association being ABORTed if the DATA chunk | ||
1118 | * is processed first by the server. Checking the | ||
1119 | * init error counter simply causes this command | ||
1120 | * to be executed only during failed attempts of | ||
1121 | * association establishment. | ||
1122 | */ | ||
1123 | if ((asoc->peer.retran_path != | ||
1124 | asoc->peer.primary_path) && | ||
1125 | (asoc->counters[SCTP_COUNTER_INIT_ERROR] > 0)) { | ||
1126 | sctp_add_cmd_sf(commands, | ||
1127 | SCTP_CMD_FORCE_PRIM_RETRAN, | ||
1128 | SCTP_NULL()); | ||
1129 | } | ||
1130 | |||
1131 | break; | ||
1132 | |||
1133 | case SCTP_CMD_GEN_SHUTDOWN: | ||
1134 | /* Generate SHUTDOWN when in SHUTDOWN_SENT state. | ||
1135 | * Reset error counts. | ||
1136 | */ | ||
1137 | asoc->overall_error_count = 0; | ||
1138 | |||
1139 | /* Generate a SHUTDOWN chunk. */ | ||
1140 | new_obj = sctp_make_shutdown(asoc, chunk); | ||
1141 | if (!new_obj) | ||
1142 | goto nomem; | ||
1143 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1144 | SCTP_CHUNK(new_obj)); | ||
1145 | break; | ||
1146 | |||
1147 | case SCTP_CMD_CHUNK_ULP: | ||
1148 | /* Send a chunk to the sockets layer. */ | ||
1149 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | ||
1150 | "chunk_up:", cmd->obj.ptr, | ||
1151 | "ulpq:", &asoc->ulpq); | ||
1152 | sctp_ulpq_tail_data(&asoc->ulpq, cmd->obj.ptr, | ||
1153 | GFP_ATOMIC); | ||
1154 | break; | ||
1155 | |||
1156 | case SCTP_CMD_EVENT_ULP: | ||
1157 | /* Send a notification to the sockets layer. */ | ||
1158 | SCTP_DEBUG_PRINTK("sm_sideff: %s %p, %s %p.\n", | ||
1159 | "event_up:",cmd->obj.ptr, | ||
1160 | "ulpq:",&asoc->ulpq); | ||
1161 | sctp_ulpq_tail_event(&asoc->ulpq, cmd->obj.ptr); | ||
1162 | break; | ||
1163 | |||
1164 | case SCTP_CMD_REPLY: | ||
1165 | /* If an caller has not already corked, do cork. */ | ||
1166 | if (!asoc->outqueue.cork) { | ||
1167 | sctp_outq_cork(&asoc->outqueue); | ||
1168 | local_cork = 1; | ||
1169 | } | ||
1170 | /* Send a chunk to our peer. */ | ||
1171 | error = sctp_outq_tail(&asoc->outqueue, cmd->obj.ptr); | ||
1172 | break; | ||
1173 | |||
1174 | case SCTP_CMD_SEND_PKT: | ||
1175 | /* Send a full packet to our peer. */ | ||
1176 | packet = cmd->obj.ptr; | ||
1177 | sctp_packet_transmit(packet); | ||
1178 | sctp_ootb_pkt_free(packet); | ||
1179 | break; | ||
1180 | |||
1181 | case SCTP_CMD_RETRAN: | ||
1182 | /* Mark a transport for retransmission. */ | ||
1183 | sctp_retransmit(&asoc->outqueue, cmd->obj.transport, | ||
1184 | SCTP_RTXR_T3_RTX); | ||
1185 | break; | ||
1186 | |||
1187 | case SCTP_CMD_TRANSMIT: | ||
1188 | /* Kick start transmission. */ | ||
1189 | error = sctp_outq_uncork(&asoc->outqueue); | ||
1190 | local_cork = 0; | ||
1191 | break; | ||
1192 | |||
1193 | case SCTP_CMD_ECN_CE: | ||
1194 | /* Do delayed CE processing. */ | ||
1195 | sctp_do_ecn_ce_work(asoc, cmd->obj.u32); | ||
1196 | break; | ||
1197 | |||
1198 | case SCTP_CMD_ECN_ECNE: | ||
1199 | /* Do delayed ECNE processing. */ | ||
1200 | new_obj = sctp_do_ecn_ecne_work(asoc, cmd->obj.u32, | ||
1201 | chunk); | ||
1202 | if (new_obj) | ||
1203 | sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, | ||
1204 | SCTP_CHUNK(new_obj)); | ||
1205 | break; | ||
1206 | |||
1207 | case SCTP_CMD_ECN_CWR: | ||
1208 | /* Do delayed CWR processing. */ | ||
1209 | sctp_do_ecn_cwr_work(asoc, cmd->obj.u32); | ||
1210 | break; | ||
1211 | |||
1212 | case SCTP_CMD_SETUP_T2: | ||
1213 | sctp_cmd_setup_t2(commands, asoc, cmd->obj.ptr); | ||
1214 | break; | ||
1215 | |||
1216 | case SCTP_CMD_TIMER_START: | ||
1217 | timer = &asoc->timers[cmd->obj.to]; | ||
1218 | timeout = asoc->timeouts[cmd->obj.to]; | ||
1219 | if (!timeout) | ||
1220 | BUG(); | ||
1221 | |||
1222 | timer->expires = jiffies + timeout; | ||
1223 | sctp_association_hold(asoc); | ||
1224 | add_timer(timer); | ||
1225 | break; | ||
1226 | |||
1227 | case SCTP_CMD_TIMER_RESTART: | ||
1228 | timer = &asoc->timers[cmd->obj.to]; | ||
1229 | timeout = asoc->timeouts[cmd->obj.to]; | ||
1230 | if (!mod_timer(timer, jiffies + timeout)) | ||
1231 | sctp_association_hold(asoc); | ||
1232 | break; | ||
1233 | |||
1234 | case SCTP_CMD_TIMER_STOP: | ||
1235 | timer = &asoc->timers[cmd->obj.to]; | ||
1236 | if (timer_pending(timer) && del_timer(timer)) | ||
1237 | sctp_association_put(asoc); | ||
1238 | break; | ||
1239 | |||
1240 | case SCTP_CMD_INIT_RESTART: | ||
1241 | /* Do the needed accounting and updates | ||
1242 | * associated with restarting an initialization | ||
1243 | * timer. | ||
1244 | */ | ||
1245 | asoc->counters[SCTP_COUNTER_INIT_ERROR]++; | ||
1246 | asoc->timeouts[cmd->obj.to] *= 2; | ||
1247 | if (asoc->timeouts[cmd->obj.to] > | ||
1248 | asoc->max_init_timeo) { | ||
1249 | asoc->timeouts[cmd->obj.to] = | ||
1250 | asoc->max_init_timeo; | ||
1251 | } | ||
1252 | |||
1253 | /* If we've sent any data bundled with | ||
1254 | * COOKIE-ECHO we need to resend. | ||
1255 | */ | ||
1256 | list_for_each(pos, &asoc->peer.transport_addr_list) { | ||
1257 | t = list_entry(pos, struct sctp_transport, | ||
1258 | transports); | ||
1259 | sctp_retransmit_mark(&asoc->outqueue, t, 0); | ||
1260 | } | ||
1261 | |||
1262 | sctp_add_cmd_sf(commands, | ||
1263 | SCTP_CMD_TIMER_RESTART, | ||
1264 | SCTP_TO(cmd->obj.to)); | ||
1265 | break; | ||
1266 | |||
1267 | case SCTP_CMD_INIT_FAILED: | ||
1268 | sctp_cmd_init_failed(commands, asoc, cmd->obj.u32); | ||
1269 | break; | ||
1270 | |||
1271 | case SCTP_CMD_ASSOC_FAILED: | ||
1272 | sctp_cmd_assoc_failed(commands, asoc, event_type, | ||
1273 | subtype, chunk, cmd->obj.u32); | ||
1274 | break; | ||
1275 | |||
1276 | case SCTP_CMD_COUNTER_INC: | ||
1277 | asoc->counters[cmd->obj.counter]++; | ||
1278 | break; | ||
1279 | |||
1280 | case SCTP_CMD_COUNTER_RESET: | ||
1281 | asoc->counters[cmd->obj.counter] = 0; | ||
1282 | break; | ||
1283 | |||
1284 | case SCTP_CMD_REPORT_DUP: | ||
1285 | sctp_tsnmap_mark_dup(&asoc->peer.tsn_map, | ||
1286 | cmd->obj.u32); | ||
1287 | break; | ||
1288 | |||
1289 | case SCTP_CMD_REPORT_BAD_TAG: | ||
1290 | SCTP_DEBUG_PRINTK("vtag mismatch!\n"); | ||
1291 | break; | ||
1292 | |||
1293 | case SCTP_CMD_STRIKE: | ||
1294 | /* Mark one strike against a transport. */ | ||
1295 | sctp_do_8_2_transport_strike(asoc, cmd->obj.transport); | ||
1296 | break; | ||
1297 | |||
1298 | case SCTP_CMD_TRANSPORT_RESET: | ||
1299 | t = cmd->obj.transport; | ||
1300 | sctp_cmd_transport_reset(commands, asoc, t); | ||
1301 | break; | ||
1302 | |||
1303 | case SCTP_CMD_TRANSPORT_ON: | ||
1304 | t = cmd->obj.transport; | ||
1305 | sctp_cmd_transport_on(commands, asoc, t, chunk); | ||
1306 | break; | ||
1307 | |||
1308 | case SCTP_CMD_HB_TIMERS_START: | ||
1309 | sctp_cmd_hb_timers_start(commands, asoc); | ||
1310 | break; | ||
1311 | |||
1312 | case SCTP_CMD_HB_TIMER_UPDATE: | ||
1313 | t = cmd->obj.transport; | ||
1314 | sctp_cmd_hb_timer_update(commands, asoc, t); | ||
1315 | break; | ||
1316 | |||
1317 | case SCTP_CMD_HB_TIMERS_STOP: | ||
1318 | sctp_cmd_hb_timers_stop(commands, asoc); | ||
1319 | break; | ||
1320 | |||
1321 | case SCTP_CMD_REPORT_ERROR: | ||
1322 | error = cmd->obj.error; | ||
1323 | break; | ||
1324 | |||
1325 | case SCTP_CMD_PROCESS_CTSN: | ||
1326 | /* Dummy up a SACK for processing. */ | ||
1327 | sackh.cum_tsn_ack = cmd->obj.u32; | ||
1328 | sackh.a_rwnd = 0; | ||
1329 | sackh.num_gap_ack_blocks = 0; | ||
1330 | sackh.num_dup_tsns = 0; | ||
1331 | sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, | ||
1332 | SCTP_SACKH(&sackh)); | ||
1333 | break; | ||
1334 | |||
1335 | case SCTP_CMD_DISCARD_PACKET: | ||
1336 | /* We need to discard the whole packet. */ | ||
1337 | chunk->pdiscard = 1; | ||
1338 | break; | ||
1339 | |||
1340 | case SCTP_CMD_RTO_PENDING: | ||
1341 | t = cmd->obj.transport; | ||
1342 | t->rto_pending = 1; | ||
1343 | break; | ||
1344 | |||
1345 | case SCTP_CMD_PART_DELIVER: | ||
1346 | sctp_ulpq_partial_delivery(&asoc->ulpq, cmd->obj.ptr, | ||
1347 | GFP_ATOMIC); | ||
1348 | break; | ||
1349 | |||
1350 | case SCTP_CMD_RENEGE: | ||
1351 | sctp_ulpq_renege(&asoc->ulpq, cmd->obj.ptr, | ||
1352 | GFP_ATOMIC); | ||
1353 | break; | ||
1354 | |||
1355 | case SCTP_CMD_SETUP_T4: | ||
1356 | sctp_cmd_setup_t4(commands, asoc, cmd->obj.ptr); | ||
1357 | break; | ||
1358 | |||
1359 | case SCTP_CMD_PROCESS_OPERR: | ||
1360 | sctp_cmd_process_operr(commands, asoc, chunk); | ||
1361 | break; | ||
1362 | case SCTP_CMD_CLEAR_INIT_TAG: | ||
1363 | asoc->peer.i.init_tag = 0; | ||
1364 | break; | ||
1365 | case SCTP_CMD_DEL_NON_PRIMARY: | ||
1366 | sctp_cmd_del_non_primary(asoc); | ||
1367 | break; | ||
1368 | case SCTP_CMD_T3_RTX_TIMERS_STOP: | ||
1369 | sctp_cmd_t3_rtx_timers_stop(commands, asoc); | ||
1370 | break; | ||
1371 | case SCTP_CMD_FORCE_PRIM_RETRAN: | ||
1372 | t = asoc->peer.retran_path; | ||
1373 | asoc->peer.retran_path = asoc->peer.primary_path; | ||
1374 | error = sctp_outq_uncork(&asoc->outqueue); | ||
1375 | local_cork = 0; | ||
1376 | asoc->peer.retran_path = t; | ||
1377 | break; | ||
1378 | default: | ||
1379 | printk(KERN_WARNING "Impossible command: %u, %p\n", | ||
1380 | cmd->verb, cmd->obj.ptr); | ||
1381 | break; | ||
1382 | }; | ||
1383 | if (error) | ||
1384 | break; | ||
1385 | } | ||
1386 | |||
1387 | out: | ||
1388 | if (local_cork) | ||
1389 | sctp_outq_uncork(&asoc->outqueue); | ||
1390 | return error; | ||
1391 | nomem: | ||
1392 | error = -ENOMEM; | ||
1393 | goto out; | ||
1394 | } | ||
1395 | |||